repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
keras
keras-master/keras/optimizers_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras optimizers.""" import tensorflow.compat.v2 as tf import gc import weakref import numpy as np import keras from keras import keras_parameterized from keras import optimizer_v1 from keras import testing_utils from keras.utils import np_utils from tensorflow.python.training.adam import AdamOptimizer from tensorflow.python.training.experimental.loss_scale_optimizer import MixedPrecisionLossScaleOptimizer def _get_model(input_dim, num_hidden, output_dim): model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, activation='relu', input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim, activation='softmax')) return model @keras_parameterized.run_all_keras_modes class KerasOptimizersTest(keras_parameterized.TestCase): def _test_optimizer(self, optimizer, target=0.75): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2) y_train = np_utils.to_categorical(y_train) model = _get_model(x_train.shape[1], 20, y_train.shape[1]) model.compile( loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly()) np.testing.assert_equal( keras.backend.get_value(model.optimizer.iterations), 0) history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0) np.testing.assert_equal( keras.backend.get_value(model.optimizer.iterations), 126) # 63 steps per epoch self.assertGreaterEqual(history.history['acc'][-1], target) config = keras.optimizers.serialize(optimizer) optim = keras.optimizers.deserialize(config) new_config = keras.optimizers.serialize(optim) new_config['class_name'] = new_config['class_name'].lower() new_config['config'].pop('name', None) if 'amsgrad' not in config['config']: new_config['config'].pop('amsgrad', None) if 'decay' in new_config['config'] and 'schedule_decay' in config['config']: new_config['config']['schedule_decay'] = new_config['config'].pop('decay') if 'momentum' not in config['config']: new_config['config'].pop('momentum', None) if 'centered' not in config['config']: new_config['config'].pop('centered', None) self.assertDictEqual(config, new_config) # Test constraints. model = keras.models.Sequential() dense = keras.layers.Dense( 10, input_shape=(x_train.shape[1],), kernel_constraint=lambda x: 0. * x + 1., bias_constraint=lambda x: 0. * x + 2., activation='relu') model.add(dense) model.add(keras.layers.Dense(y_train.shape[1], activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) np.testing.assert_equal( keras.backend.get_value(model.optimizer.iterations), 126) # Using same optimizer from before model.train_on_batch(x_train[:10], y_train[:10]) np.testing.assert_equal( keras.backend.get_value(model.optimizer.iterations), 127) kernel, bias = dense.get_weights() np.testing.assert_allclose(kernel, 1., atol=1e-3) np.testing.assert_allclose(bias, 2., atol=1e-3) def test_sgd(self): with self.cached_session(): self._test_optimizer(optimizer_v1.SGD()) def test_momentum(self): with self.cached_session(): self._test_optimizer( optimizer_v1.SGD(lr=0.01, momentum=0.9, nesterov=True)) def test_rmsprop(self): with self.cached_session(): self._test_optimizer(optimizer_v1.RMSprop()) self._test_optimizer(optimizer_v1.RMSprop(decay=1e-3)) def test_adagrad(self): with self.cached_session(): self._test_optimizer(optimizer_v1.Adagrad()) self._test_optimizer(optimizer_v1.Adagrad(decay=1e-3)) def test_adadelta(self): with self.cached_session(): self._test_optimizer(optimizer_v1.Adadelta(), target=0.6) # Accuracy seems dependent on the initialization. Even adding # tf.compat.v1.Print nodes in the graph seemed to affect the # initialization seed, and hence the accuracy. self._test_optimizer(optimizer_v1.Adadelta(decay=1e-3), target=0.4) def test_adam(self): with self.cached_session(): self._test_optimizer(optimizer_v1.Adam()) # Accuracy seems dependent on the seed initialization. # TODO(b/121051441): fix test flakiness. self._test_optimizer(optimizer_v1.Adam(decay=1e-3), target=0.73) self._test_optimizer(optimizer_v1.Adam(amsgrad=True)) def test_adamax(self): with self.cached_session(): self._test_optimizer(optimizer_v1.Adamax()) self._test_optimizer(optimizer_v1.Adamax(decay=1e-3)) def test_nadam(self): with self.cached_session(): self._test_optimizer(optimizer_v1.Nadam()) def test_clipnorm(self): with self.cached_session(): self._test_optimizer( optimizer_v1.SGD(lr=0.01, momentum=0.9, clipnorm=0.5)) def test_clipvalue(self): with self.cached_session(): self._test_optimizer( optimizer_v1.SGD(lr=0.01, momentum=0.9, clipvalue=0.5)) def test_tf_optimizer(self): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') optimizer = optimizer_v1.TFOptimizer(AdamOptimizer(0.01)) model = keras.models.Sequential() model.add(keras.layers.Dense( 2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1))) # This is possible model.compile( loss='mean_squared_error', optimizer=optimizer, run_eagerly=testing_utils.should_run_eagerly()) keras.backend.track_tf_optimizer(optimizer) model.fit(np.random.random((5, 3)), np.random.random((5, 2)), epochs=1, batch_size=5, verbose=0) # not supported with self.assertRaises(NotImplementedError): _ = optimizer.weights with self.assertRaises(NotImplementedError): optimizer.get_config() with self.assertRaises(NotImplementedError): optimizer.from_config(None) def test_optimizer_garbage_collection(self): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') graph = tf.Graph() with graph.as_default(): optimizer = optimizer_v1.TFOptimizer(AdamOptimizer(0.01)) keras.backend.track_tf_optimizer(optimizer) optimizer_weak = weakref.ref(optimizer) graph_weak = weakref.ref(graph) del graph, optimizer gc.collect() # Check that the weak references are dead now. self.assertIs(graph_weak(), None) self.assertIs(optimizer_weak(), None) def test_tf_optimizer_iterations(self): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') with self.cached_session(): optimizer = optimizer_v1.TFOptimizer(AdamOptimizer(0.01)) model = keras.models.Sequential() model.add(keras.layers.Dense( 2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1))) model.compile( loss='mean_squared_error', optimizer=optimizer, run_eagerly=testing_utils.should_run_eagerly()) keras.backend.track_tf_optimizer(optimizer) self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 0) model.fit(np.random.random((55, 3)), np.random.random((55, 2)), epochs=1, batch_size=5, verbose=0) self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 11) def test_negative_clipvalue_or_clipnorm(self): with self.assertRaises(ValueError): _ = optimizer_v1.SGD(lr=0.01, clipvalue=-0.5) with self.assertRaises(ValueError): _ = optimizer_v1.Adam(clipnorm=-2.0) def test_mixed_precision_loss_scale_optimizer(self): if tf.executing_eagerly(): self.skipTest('v1 optimizer does not run in eager mode') optimizer = MixedPrecisionLossScaleOptimizer(AdamOptimizer(), 'dynamic') model = keras.models.Sequential() model.add( keras.layers.Dense( 2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1))) model.compile( loss='mean_squared_error', optimizer=optimizer, run_eagerly=testing_utils.should_run_eagerly()) model.fit( np.random.random((5, 3)), np.random.random((5, 2)), epochs=1, batch_size=5, verbose=0) def test_deserialization_error(self): with self.assertRaisesRegex(ValueError, 'Could not interpret optimizer'): keras.optimizers.get(0) if __name__ == '__main__': tf.test.main()
9,695
36.581395
105
py
keras
keras-master/keras/losses_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras loss functions.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from tensorflow.python.autograph.impl import api as autograph from keras import activations from keras import backend from keras import combinations from keras import losses from keras.utils import losses_utils ALL_LOSSES = [ losses.mean_squared_error, losses.mean_absolute_error, losses.mean_absolute_percentage_error, losses.mean_squared_logarithmic_error, losses.squared_hinge, losses.hinge, losses.categorical_crossentropy, losses.binary_crossentropy, losses.kl_divergence, losses.poisson, losses.cosine_similarity, losses.log_cosh, losses.categorical_hinge ] class KerasLossesTest(tf.test.TestCase, parameterized.TestCase): def test_objective_shapes_3d(self): with self.cached_session(): y_a = backend.variable(np.random.random((5, 6, 7))) y_b = backend.variable(np.random.random((5, 6, 7))) for obj in ALL_LOSSES: objective_output = obj(y_a, y_b) self.assertListEqual(objective_output.shape.as_list(), [5, 6]) def test_objective_shapes_2d(self): with self.cached_session(): y_a = backend.variable(np.random.random((6, 7))) y_b = backend.variable(np.random.random((6, 7))) for obj in ALL_LOSSES: objective_output = obj(y_a, y_b) self.assertListEqual(objective_output.shape.as_list(), [ 6, ]) def test_cce_one_hot(self): with self.cached_session(): y_a = backend.variable(np.random.randint(0, 7, (5, 6))) y_b = backend.variable(np.random.random((5, 6, 7))) objective_output = losses.sparse_categorical_crossentropy(y_a, y_b) assert backend.eval(objective_output).shape == (5, 6) y_a = backend.variable(np.random.randint(0, 7, (6,))) y_b = backend.variable(np.random.random((6, 7))) objective_output = losses.sparse_categorical_crossentropy(y_a, y_b) assert backend.eval(objective_output).shape == (6,) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_loss(self): target = backend.variable(np.random.randint(0, 1, (5, 1))) logits = backend.variable(np.random.random((5, 1))) softmax_output = backend.softmax(logits) output_from_logit = losses.categorical_crossentropy( target, logits, from_logits=True) output_from_softmax = losses.categorical_crossentropy( target, softmax_output) np.testing.assert_allclose( backend.eval(output_from_logit), backend.eval(output_from_softmax), atol=1e-5) axis = 0 output_from_logit_axis = losses.categorical_crossentropy( target, logits, from_logits=True, axis=axis) output_from_softmax_axis = losses.categorical_crossentropy( target, softmax_output, axis=axis) np.testing.assert_allclose( backend.eval(output_from_logit_axis), backend.eval(output_from_softmax_axis), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self): t = backend.placeholder() p = backend.placeholder() o = losses.categorical_crossentropy(t, p) t_val = tf.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) p_val = tf.convert_to_tensor([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .116, .062], 1e-3) # from logits p_val = tf.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) o = losses.categorical_crossentropy(t, p, from_logits=True) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, 0, .17], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_sparse_categorical_crossentropy_loss(self): target = backend.variable(np.random.randint(0, 1, (5, 1))) logits = backend.variable(np.random.random((5, 1))) softmax_output = backend.softmax(logits) output_from_logit = losses.sparse_categorical_crossentropy( target, logits, from_logits=True) output_from_softmax = losses.sparse_categorical_crossentropy( target, softmax_output) np.testing.assert_allclose( backend.eval(output_from_logit), backend.eval(output_from_softmax), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph'])) def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self): # This test only runs in graph because the TF op layer is not supported yet # for sparse ops. t = backend.placeholder() p = backend.placeholder() o = losses.sparse_categorical_crossentropy(t, p) t_val = tf.convert_to_tensor([0, 1, 2]) p_val = tf.convert_to_tensor([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .116, .062], 1e-3) # from logits p_val = tf.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) o = losses.sparse_categorical_crossentropy(t, p, from_logits=True) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, 0, .17], 1e-3) @combinations.generate(combinations.combine(mode=['eager'])) def test_sparse_categorical_crossentropy_with_float16(self): # See https://github.com/keras-team/keras/issues/15012 for more details. # we don't cast y_true to have same dtype as y_pred, since y_pred could be # float16 which has a small upbound, and the casting could cause an # underflow. The y_true will be used as int64 anyway. # create 2 observations with 2049 labels, since 2048 is the largest number # for float16 y_true = [0, 2049] # should result in a loss close to 0 since predicting y_true perfectly y_pred = np.zeros((2, 2050)) y_pred[0][0] = 1 y_pred[1][2049] = 1 y_pred_16 = tf.convert_to_tensor(y_pred, dtype=tf.float16) # If we did a cast for y_true to float16 in SparseCategoricalCrossentropy, # then the loss will not be zero. scce = losses.SparseCategoricalCrossentropy() self.assertAllClose(scce(y_true, y_pred_16).numpy(), 0.0, atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_binary_crossentropy_loss(self): target = backend.variable(np.random.randint(0, 1, (5, 1))) logits = backend.variable(np.random.random((5, 1))) sigmoid_output = backend.sigmoid(logits) output_from_logit = losses.binary_crossentropy( target, logits, from_logits=True) output_from_sigmoid = losses.binary_crossentropy(target, sigmoid_output) np.testing.assert_allclose( backend.eval(output_from_logit), backend.eval(output_from_sigmoid), atol=1e-5) axis = 0 output_from_logit_axis = losses.binary_crossentropy( target, logits, from_logits=True, axis=axis) output_from_sigmoid_axis = losses.binary_crossentropy( target, sigmoid_output, axis=axis) np.testing.assert_allclose( backend.eval(output_from_logit_axis), backend.eval(output_from_sigmoid_axis), atol=1e-5) def test_get_bce(self): bce_fn = losses.get('bce') self.assertEqual(bce_fn, losses.binary_crossentropy) def test_serialization(self): fn = losses.get('mse') config = losses.serialize(fn) new_fn = losses.deserialize(config) self.assertEqual(fn, new_fn) def test_categorical_hinge(self): y_pred = backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])) y_true = backend.variable(np.array([[0, 1, 0], [1, 0, 0]])) expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0 loss = backend.eval(losses.categorical_hinge(y_true, y_pred)) self.assertAllClose(expected_loss, np.mean(loss)) def test_loss_wrapper(self): loss_fn = losses.get('mse') mse_obj = losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__) self.assertEqual(mse_obj.name, 'mean_squared_error') self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.AUTO) y_true = tf.constant([[1., 9.], [2., 5.]]) y_pred = tf.constant([[4., 8.], [12., 3.]]) sample_weight = tf.constant([1.2, 0.5]) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) # mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2] # mse = [5, 52] # weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26] # reduced_weighted_mse = (6 + 26) / 2 = self.assertAllClose(self.evaluate(loss), 16, 1e-2) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_loss_wrapper_autograph(self): # Test that functions with control flow wrapped in a LossFunctionWrapper # get autographed when in a tf.function def loss_fn(y_true, y_pred): mse_loss_fn = losses.get('mse') if tf.reduce_mean(y_true) > 0: return mse_loss_fn(y_true, y_pred) else: return mse_loss_fn(y_true, y_pred) mse_obj = losses.LossFunctionWrapper(loss_fn) y_true = tf.constant([[1., 9.], [2., 5.]]) y_pred = tf.constant([[4., 8.], [12., 3.]]) sample_weight = tf.constant([1.2, 0.5]) @tf.function def tf_functioned_loss_fn(y_true, y_pred, sample_weight=None): return mse_obj(y_true, y_pred, sample_weight=sample_weight) loss = tf_functioned_loss_fn(y_true, y_pred, sample_weight=sample_weight) # mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2] # mse = [5, 52] # weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26] # reduced_weighted_mse = (6 + 26) / 2 = self.assertAllClose(self.evaluate(loss), 16, 1e-2) def test_invalid_reduction(self): with self.assertRaisesRegex(ValueError, 'Invalid Reduction Key: Foo.'): losses.MeanSquaredError(reduction='Foo') mse_obj = losses.MeanSquaredError() y = tf.constant([1]) mse_obj.reduction = 'Bar' with self.assertRaisesRegex(ValueError, 'Invalid Reduction Key: Bar.'): mse_obj(y, y) def test_deserialization_error(self): with self.assertRaisesRegex(ValueError, 'Could not interpret loss'): losses.get(0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_binary_crossentropy_uses_cached_logits(self): logits = tf.constant([[-30., 30.]]) y_pred = activations.sigmoid(logits) self.assertTrue(hasattr(y_pred, '_keras_logits')) y_true = tf.constant([[0., 1.]]) loss = losses.binary_crossentropy(y_true, y_pred)[0] # Check that logits are used. If y_pred is used directly, loss will # collapse to 0 from underflow. self.assertNotEqual(self.evaluate(loss), 0.) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_uses_cached_logits(self): logits = tf.constant([[-5., 0., 5.]]) y_pred = activations.softmax(logits) self.assertTrue(hasattr(y_pred, '_keras_logits')) y_true = tf.constant([[0., 0., 1.]]) loss = losses.categorical_crossentropy(y_true, logits, from_logits=True)[0] # Check that logits are used. If y_pred is used directly, loss will # collapse to 0 from underflow. self.assertNotEqual(self.evaluate(loss), 0.) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_sparse_categorical_crossentropy_uses_cached_logits(self): logits = tf.constant([[-5., 0., 5.]]) y_pred = activations.softmax(logits) self.assertTrue(hasattr(y_pred, '_keras_logits')) y_true = tf.constant([2]) loss = losses.sparse_categorical_crossentropy( y_true, logits, from_logits=True)[0] # Check that logits are used. If y_pred is used directly, loss will # collapse to 0 from underflow. self.assertNotEqual(self.evaluate(loss), 0.) @combinations.generate(combinations.combine(mode=['eager'])) def test_loss_not_autographed_in_eager(self): class MyLoss(losses.Loss): def call(self, y_true, y_pred): return y_true - y_pred loss = MyLoss() y_true = tf.constant([[0., 0., 0.]]) y_pred = tf.constant([[1., 1., 1.]]) def tf_convert(fn, _): assert False, 'Function should not be autographed.' return fn with tf.compat.v1.test.mock.patch.object(autograph, 'tf_convert', tf_convert): loss(y_true, y_pred) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanSquaredErrorTest(tf.test.TestCase): def test_config(self): mse_obj = losses.MeanSquaredError( reduction=losses_utils.ReductionV2.SUM, name='mse_1') self.assertEqual(mse_obj.name, 'mse_1') self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3)) loss = mse_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mse_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 49.5, 3) def test_scalar_weighted(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mse_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 113.85, 3) def test_sample_weighted(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3) def test_ragged_tensors(self): mse_obj = losses.MeanSquaredError() y_true = tf.ragged.constant([[1., 1., 9.], [2., 5.]]) y_pred = tf.ragged.constant([[4., 1., 8.], [12., 3.]]) sample_weight = tf.constant([1.2, 0.5]) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) # mse = [((4 - 1)^2 + (8 - 9)^2) / 3, ((12 - 2)^2 + (3 - 5)^2) / 2] # mse = [3.(3), 52] # weighted_mse = [3.(3) * 1.2, 52 * 0.5] = [4, 26] # reduced_weighted_mse = (4 + 26) / 2 = self.assertAllClose(self.evaluate(loss), 15, 1e-2) def test_timestep_weighted(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32) sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3) def test_zero_weighted(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mse_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_invalid_sample_weight(self): mse_obj = losses.MeanSquaredError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1)) sample_weight = tf.constant([3, 6, 5, 0], shape=(2, 2)) with self.assertRaisesRegex((ValueError, tf.errors.InvalidArgumentError), (r'Incompatible shapes: \[2,3\] vs. \[2,2\]|' 'Dimensions must be equal')): mse_obj(y_true, y_pred, sample_weight=sample_weight) def test_no_reduction(self): mse_obj = losses.MeanSquaredError(reduction=losses_utils.ReductionV2.NONE) y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mse_obj(y_true, y_pred, sample_weight=2.3) loss = self.evaluate(loss) self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3) def test_sum_reduction(self): mse_obj = losses.MeanSquaredError(reduction=losses_utils.ReductionV2.SUM) y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mse_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanAbsoluteErrorTest(tf.test.TestCase): def test_config(self): mae_obj = losses.MeanAbsoluteError( reduction=losses_utils.ReductionV2.SUM, name='mae_1') self.assertEqual(mae_obj.name, 'mae_1') self.assertEqual(mae_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3)) loss = mae_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mae_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 5.5, 3) def test_scalar_weighted(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mae_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 12.65, 3) def test_sample_weighted(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = mae_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3) def test_timestep_weighted(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32) sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = mae_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3) def test_zero_weighted(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mae_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_invalid_sample_weight(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1)) sample_weight = tf.constant([3, 6, 5, 0], shape=(2, 2)) with self.assertRaisesRegex((ValueError, tf.errors.InvalidArgumentError), (r'Incompatible shapes: \[2,3\] vs. \[2,2\]|' 'Dimensions must be equal')): mae_obj(y_true, y_pred, sample_weight=sample_weight) def test_no_reduction(self): mae_obj = losses.MeanAbsoluteError(reduction=losses_utils.ReductionV2.NONE) y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mae_obj(y_true, y_pred, sample_weight=2.3) loss = self.evaluate(loss) self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3) def test_sum_reduction(self): mae_obj = losses.MeanAbsoluteError(reduction=losses_utils.ReductionV2.SUM) y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mae_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3) def test_ragged_tensor(self): mae_obj = losses.MeanAbsoluteError() y_true = tf.ragged.constant([[1, 9, 2], [-5, -2]], dtype=tf.float32) y_pred = tf.ragged.constant([[4, 8, 12], [8, 1]], dtype=tf.float32) # loss = [14/3, 16/2] sample_weight = tf.constant([1.2, 1.0], shape=(2, 1)) loss = mae_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 6.8, 5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanAbsolutePercentageErrorTest(tf.test.TestCase): def test_config(self): mape_obj = losses.MeanAbsolutePercentageError( reduction=losses_utils.ReductionV2.SUM, name='mape_1') self.assertEqual(mape_obj.name, 'mape_1') self.assertEqual(mape_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mape_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mape_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3) def test_scalar_weighted(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mape_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 487.259, 3) def test_sample_weighted(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = mape_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3) def test_ragged_tensors(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.ragged.constant([[1, 9, 2], [-5, -2]]) y_pred = tf.ragged.constant([[4, 8, 12], [8, 1]], dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = mape_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 510.7222, 3) def test_timestep_weighted(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32) sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = mape_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3) def test_zero_weighted(self): mape_obj = losses.MeanAbsolutePercentageError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mape_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_no_reduction(self): mape_obj = losses.MeanAbsolutePercentageError( reduction=losses_utils.ReductionV2.NONE) y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = mape_obj(y_true, y_pred, sample_weight=2.3) loss = self.evaluate(loss) self.assertArrayNear(loss, [621.8518, 352.6666], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanSquaredLogarithmicErrorTest(tf.test.TestCase): def test_config(self): msle_obj = losses.MeanSquaredLogarithmicError( reduction=losses_utils.ReductionV2.SUM, name='mape_1') self.assertEqual(msle_obj.name, 'mape_1') self.assertEqual(msle_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): msle_obj = losses.MeanSquaredLogarithmicError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = msle_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3) def test_scalar_weighted(self): msle_obj = losses.MeanSquaredLogarithmicError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = msle_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3) def test_sample_weighted(self): msle_obj = losses.MeanSquaredLogarithmicError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = msle_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3) def test_timestep_weighted(self): msle_obj = losses.MeanSquaredLogarithmicError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32) sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = msle_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3) def test_zero_weighted(self): msle_obj = losses.MeanSquaredLogarithmicError() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = msle_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_ragged_tensors(self): msle_obj = losses.MeanSquaredLogarithmicError() y_true = tf.ragged.constant([[1, 9, 2], [-5, -2]]) # log(max(y_true, 0) + 1): [[0.69314, 2.3025, 1.0986], [0., 0.]] y_pred = tf.ragged.constant([[4, 8, 12], [8, 1]], dtype=tf.float32) # log(max(y_pred, 0) + 1): [[1.6094, 2.1972, 2.5649], [2.1972, 0.6932]] # per batch loss: [1.0002, 2.6541] sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = msle_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 5.1121, 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CosineSimilarityTest(tf.test.TestCase): def l2_norm(self, x, axis): epsilon = 1e-12 square_sum = np.sum(np.square(x), axis=axis, keepdims=True) x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon)) return np.multiply(x, x_inv_norm) def setup(self, axis=1): self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32) self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32) y_true = self.l2_norm(self.np_y_true, axis) y_pred = self.l2_norm(self.np_y_pred, axis) self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,)) self.y_true = tf.constant(self.np_y_true) self.y_pred = tf.constant(self.np_y_pred) def test_config(self): cosine_obj = losses.CosineSimilarity( axis=2, reduction=losses_utils.ReductionV2.SUM, name='cosine_loss') self.assertEqual(cosine_obj.name, 'cosine_loss') self.assertEqual(cosine_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() cosine_obj = losses.CosineSimilarity() loss = cosine_obj(self.y_true, self.y_pred) expected_loss = -np.mean(self.expected_loss) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() cosine_obj = losses.CosineSimilarity() sample_weight = 2.3 loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = -np.mean(self.expected_loss * sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_sample_weighted(self): self.setup() cosine_obj = losses.CosineSimilarity() sample_weight = np.asarray([1.2, 3.4]) loss = cosine_obj( self.y_true, self.y_pred, sample_weight=tf.constant(sample_weight)) expected_loss = -np.mean(self.expected_loss * sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() cosine_obj = losses.CosineSimilarity() np_y_true = self.np_y_true.reshape((2, 3, 1)) np_y_pred = self.np_y_pred.reshape((2, 3, 1)) sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3)) y_true = self.l2_norm(np_y_true, 2) y_pred = self.l2_norm(np_y_pred, 2) expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,)) y_true = tf.constant(np_y_true) y_pred = tf.constant(np_y_pred) loss = cosine_obj( y_true, y_pred, sample_weight=tf.constant(sample_weight)) expected_loss = -np.mean(expected_loss * sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() cosine_obj = losses.CosineSimilarity() loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) def test_axis(self): self.setup(axis=1) cosine_obj = losses.CosineSimilarity(axis=1) loss = cosine_obj(self.y_true, self.y_pred) expected_loss = -np.mean(self.expected_loss) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BinaryCrossentropyTest(tf.test.TestCase): def test_config(self): bce_obj = losses.BinaryCrossentropy( reduction=losses_utils.ReductionV2.SUM, name='bce_1') self.assertEqual(bce_obj.name, 'bce_1') self.assertEqual(bce_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=tf.float32) bce_obj = losses.BinaryCrossentropy() loss = bce_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) # Test with logits. logits = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) bce_obj = losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) bce_obj = losses.BinaryCrossentropy() loss = bce_obj(y_true, y_pred) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [0, 15.33, 0, 0] # Reduced loss = 15.33 / 4 self.assertAlmostEqual(self.evaluate(loss), 3.833, 3) # Test with logits. y_true = tf.constant([[1, 0, 1], [0, 1, 1]]) logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) bce_obj = losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # = [((100 - 100 * 1 + log(1 + exp(-100))) + # (0 + 100 * 0 + log(1 + exp(-100))) + # (100 - 100 * 1 + log(1 + exp(-100))), # ((100 - 100 * 0 + log(1 + exp(-100))) + # (100 - 100 * 1 + log(1 + exp(-100))) + # (0 + 100 * 1 + log(1 + exp(-100))))] # = [(0 + 0 + 0) / 3, 200 / 3] # Reduced loss = (0 + 66.666) / 2 self.assertAlmostEqual(self.evaluate(loss), 33.333, 3) def test_scalar_weighted(self): bce_obj = losses.BinaryCrossentropy() y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) loss = bce_obj(y_true, y_pred, sample_weight=2.3) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [0, 15.33, 0, 0] # Weighted loss = [0, 15.33 * 2.3, 0, 0] # Reduced loss = 15.33 * 2.3 / 4 self.assertAlmostEqual(self.evaluate(loss), 8.817, 3) # Test with logits. y_true = tf.constant([[1, 0, 1], [0, 1, 1]]) logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) bce_obj = losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits, sample_weight=2.3) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0) / 3, 200 / 3] # Weighted loss = [0 * 2.3, 66.666 * 2.3] # Reduced loss = (0 + 66.666 * 2.3) / 2 self.assertAlmostEqual(self.evaluate(loss), 76.667, 3) def test_sample_weighted(self): bce_obj = losses.BinaryCrossentropy() y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = bce_obj(y_true, y_pred, sample_weight=sample_weight) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [0, 15.33, 0, 0] # Reduced loss = 15.33 * 1.2 / 4 self.assertAlmostEqual(self.evaluate(loss), 4.6, 3) # Test with logits. y_true = tf.constant([[1, 0, 1], [0, 1, 1]]) logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) weights = tf.constant([4, 3]) bce_obj = losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits, sample_weight=weights) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0)/3, 200 / 3] # Weighted loss = [0 * 4, 66.666 * 3] # Reduced loss = (0 + 66.666 * 3) / 2 self.assertAlmostEqual(self.evaluate(loss), 100, 3) def test_no_reduction(self): y_true = tf.constant([[1, 0, 1], [0, 1, 1]]) logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) bce_obj = losses.BinaryCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.NONE) loss = bce_obj(y_true, logits) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0)/3, (200)/3] self.assertAllClose((0., 66.6666), self.evaluate(loss), 3) def test_label_smoothing(self): logits = tf.constant([[100.0, -100.0, -100.0]]) y_true = tf.constant([[1, 0, 1]]) label_smoothing = 0.1 # Loss: max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Label smoothing: z' = z * (1 - L) + 0.5L # 1 = 1 - 0.5L # 0 = 0.5L # Applying the above two fns to the given input: # (100 - 100 * (1 - 0.5 L) + 0 + # 0 + 100 * (0.5 L) + 0 + # 0 + 100 * (1 - 0.5 L) + 0) * (1/3) # = (100 + 50L) * 1/3 bce_obj = losses.BinaryCrossentropy( from_logits=True, label_smoothing=label_smoothing) loss = bce_obj(y_true, logits) expected_value = (100.0 + 50.0 * label_smoothing) / 3.0 self.assertAlmostEqual(self.evaluate(loss), expected_value, 3) def test_ragged_tensors(self): bce_obj = losses.BinaryCrossentropy() y_true = tf.ragged.constant([[1, 0, 1], [0]]) y_pred = tf.ragged.constant([[1, 1, 1], [0]], dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = bce_obj(y_true, y_pred, sample_weight=sample_weight) # per batch loss = [ sum([0, 15.33, 0]) / 3, 0. ] # = [ 5.11, 0] # Reduced loss = 5.11 * 1.2 / 2 self.assertAlmostEqual(self.evaluate(loss), 3.0666, 3) # Test with logits. y_true = tf.ragged.constant([[1, 0, 1], [0, 1]]) logits = tf.ragged.constant([[100.0, -100.0, 100.0], [100.0, 100.0]]) weights = tf.constant([4, 3]) bce_obj = losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits, sample_weight=weights) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0)/3, 100 / 2] # Weighted loss = [0 * 4, 50 * 3] # Reduced loss = (0 + 50 * 3) / 2 self.assertAlmostEqual(self.evaluate(loss), 75., 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CategoricalCrossentropyTest(tf.test.TestCase): def test_config(self): cce_obj = losses.CategoricalCrossentropy( reduction=losses_utils.ReductionV2.SUM, name='bce_1') self.assertEqual(cce_obj.name, 'bce_1') self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=tf.int64) y_pred = tf.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=tf.float32) cce_obj = losses.CategoricalCrossentropy() loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) # Test with logits. logits = tf.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]]) cce_obj = losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): cce_obj = losses.CategoricalCrossentropy() y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y_pred = tf.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=tf.float32) loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), .3239, 3) # Test with logits. logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), .0573, 3) def test_scalar_weighted(self): cce_obj = losses.CategoricalCrossentropy() y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y_pred = tf.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=tf.float32) loss = cce_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .7449, 3) # Test with logits. logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .1317, 3) def test_sample_weighted(self): cce_obj = losses.CategoricalCrossentropy() y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y_pred = tf.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=tf.float32) sample_weight = tf.constant([[1.2], [3.4], [5.6]], shape=(3, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3) # Test with logits. logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3) def test_no_reduction(self): y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.CategoricalCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.NONE) loss = cce_obj(y_true, logits) self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3) def test_label_smoothing(self): logits = tf.constant([[100.0, -100.0, -100.0]]) y_true = tf.constant([[1, 0, 0]]) label_smoothing = 0.1 # Softmax Cross Entropy Loss: -\sum_i p_i \log q_i # where for a softmax activation # \log q_i = x_i - \log \sum_j \exp x_j # = x_i - x_max - \log \sum_j \exp (x_j - x_max) # For our activations, [100, -100, -100] # \log ( exp(0) + exp(-200) + exp(-200) ) = 0 # so our log softmaxes become: [0, -200, -200] # Label smoothing: z' = z * (1 - L) + L/n # 1 = 1 - L + L/n # 0 = L/n # Applying the above two fns to the given input: # -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n cce_obj = losses.CategoricalCrossentropy( from_logits=True, label_smoothing=label_smoothing) loss = cce_obj(y_true, logits) expected_value = 400.0 * label_smoothing / 3.0 self.assertAlmostEqual(self.evaluate(loss), expected_value, 3) def test_shape_mismatch(self): y_true = tf.constant([[0], [1], [2]]) y_pred = tf.constant([[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]]) cce_obj = losses.CategoricalCrossentropy() with self.assertRaisesRegex(ValueError, 'Shapes .+ are incompatible'): cce_obj(y_true, y_pred) def test_ragged_tensors(self): cce_obj = losses.CategoricalCrossentropy() y_true = tf.ragged.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]]) y_pred = tf.ragged.constant( [[[.9, .05, .05], [.5, .89, .6]], [[.05, .01, .94]]], dtype=tf.float32) # batch losses [[0.1054, 0.8047], [0.0619]] sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) # sum([0.1054, 0.8047, 0.0619]) / 3 self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3) # Test with logits. logits = tf.ragged.constant([[[8., 1., 1.], [0., 9., 1.]], [[2., 3., 5.]]]) cce_obj = losses.CategoricalCrossentropy(from_logits=True) # batch losses [[0.0018, 0.0004], [0.1698]] loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3) def test_ragged_tensors_ragged_sample_weights(self): cce_obj = losses.CategoricalCrossentropy() y_true = tf.ragged.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]]) y_pred = tf.ragged.constant( [[[.9, .05, .05], [.05, .89, .06]], [[.05, .01, .94]]], dtype=tf.float32) # batch losses [[0.1054, 0.1165], [0.0619]] # Use independent weights for each batch element sample_weight = tf.ragged.constant([[1.2, 3.4], [5.6]], dtype=tf.float32) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) # sum([0.1054*1.2, 0.1165*3.4, 0.0619*5.6])/3 self.assertAlmostEqual(self.evaluate(loss), 0.2897, 3) # Test with logits. logits = tf.ragged.constant([[[8., 1., 1.], [0., 9., 1.]], [[2., 3., 5.]]]) cce_obj = losses.CategoricalCrossentropy(from_logits=True) # batch losses [[0.0018, 0.0004], [0.1698]] # sum([0.0018*1.2, 0.0004*3.4, 0.1698*5.6]) / 3 loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.3181, 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class SparseCategoricalCrossentropyTest(tf.test.TestCase): def test_config(self): cce_obj = losses.SparseCategoricalCrossentropy( reduction=losses_utils.ReductionV2.SUM, name='scc') self.assertEqual(cce_obj.name, 'scc') self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): y_true = tf.constant([[0], [1], [2]], dtype=tf.int64) y_pred = tf.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=tf.float32) cce_obj = losses.SparseCategoricalCrossentropy() loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) # Test with logits. logits = tf.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]]) cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): cce_obj = losses.SparseCategoricalCrossentropy() y_true = tf.constant([0, 1, 2]) y_pred = tf.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=tf.float32) loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), .3239, 3) # Test with logits. logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), .0573, 3) def test_scalar_weighted(self): cce_obj = losses.SparseCategoricalCrossentropy() y_true = tf.constant([[0], [1], [2]]) y_pred = tf.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=tf.float32) loss = cce_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .7449, 3) # Test with logits. logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .1317, 3) def test_sample_weighted(self): cce_obj = losses.SparseCategoricalCrossentropy() y_true = tf.constant([[0], [1], [2]]) y_pred = tf.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=tf.float32) sample_weight = tf.constant([[1.2], [3.4], [5.6]], shape=(3, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3) # Test with logits. logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3) def test_no_reduction(self): y_true = tf.constant([[0], [1], [2]]) logits = tf.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = losses.SparseCategoricalCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.NONE) loss = cce_obj(y_true, logits) self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3) def test_non_tensor(self): # Test case for GitHub issue 33394. cce_obj = losses.SparseCategoricalCrossentropy() y_true = [[0], [1], [2]] y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] loss = cce_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .7449, 3) def test_ragged_tensors(self): cce_obj = losses.SparseCategoricalCrossentropy() y_true = tf.ragged.constant([[0, 1], [2]]) y_pred = tf.ragged.constant( [[[.9, .05, .05], [.5, .89, .6]], [[.05, .01, .94]]], dtype=tf.float32) # batch losses [[0.1054, 0.8047], [0.0619]] sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) # sum([0.1054, 0.8047, 0.0619]) / 3 self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3) # Test with logits. logits = tf.ragged.constant([[[8., 1., 1.], [0., 9., 1.]], [[2., 3., 5.]]]) cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True) # batch losses [[0.0018, 0.0004], [0.1698]] loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3) def test_ragged_tensors_rank_1(self): cce_obj = losses.SparseCategoricalCrossentropy() y_true = tf.ragged.constant([[0, 1], [2]]) y_pred = tf.ragged.constant( [[[.9, .05, .05], [.5, .89, .6]], [[.05, .01, .94]]], ragged_rank=1, dtype=tf.float32) # batch losses [[0.1054, 0.8047], [0.0619]] sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) # sum([0.1054, 0.8047, 0.0619]) / 3 self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3) # Test with logits. logits = tf.ragged.constant( [[[8., 1., 1.], [0., 9., 1.]], [[2., 3., 5.]]], ragged_rank=1) cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True) # batch losses [[0.0018, 0.0004], [0.1698]] loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3) def test_ragged_tensors_3d(self): # shape [2, 1, None] y_true = tf.ragged.constant([[[1, 1]], [[0]]]) # shape [2, 1, None, 2] y_pred = tf.ragged.constant([[[[0.1, 0.9], [0.1, 0.9]]], [[[0.9, 0.1]]]]) cce_obj = losses.SparseCategoricalCrossentropy() loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 0.1054, 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class HingeTest(tf.test.TestCase): def test_config(self): hinge_obj = losses.Hinge( reduction=losses_utils.ReductionV2.SUM, name='hinge_loss') self.assertEqual(hinge_obj.name, 'hinge_loss') self.assertEqual(hinge_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): hinge_obj = losses.Hinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # reduced loss = (0.6 + 0.4125) / 2 loss = hinge_obj(y_true, y_pred) self.assertAllClose(0.506, self.evaluate(loss), atol=1e-3) def test_scalar_weighted(self): hinge_obj = losses.Hinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # weighted_loss = [0.6 * 2.3, 0.4125 * 2.3] # reduced loss = (0.6 + 0.4125) * 2.3 / 2 loss = hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 1.164, 3) # Verify we get the same output when the same input is given loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAllClose(self.evaluate(loss), self.evaluate(loss_2), 1e-3) def test_sample_weighted(self): hinge_obj = losses.Hinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # weighted loss = [0.6 * 1.2, 0.4125 * 3.4] # reduced loss = (0.6 * 1.2 + 0.4125 * 3.4) / 2 sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 1.061, 1e-3) def test_timestep_weighted(self): hinge_obj = losses.Hinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1)) y_pred = tf.constant( [[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1)) sample_weight = tf.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4)) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]] # y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]], # [[0.25], [1], [0.5], [0.6]]] # 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]], # [[0.75], [0], [0.5], [0.4]]] # loss = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # weighted loss = [[2.1, 4.8, 4.5, 0], [3, 0, 0.5, 1.2]] # reduced loss = (2.1 + 4.8 + 4.5 + 0 + 3 + 0 + 0.5 + 1.2) / 8 loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 2.012, 1e-3) def test_zero_weighted(self): hinge_obj = losses.Hinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) loss = hinge_obj(y_true, y_pred, sample_weight=0) self.assertAllClose(self.evaluate(loss), 0., 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class SquaredHingeTest(tf.test.TestCase): def test_config(self): sq_hinge_obj = losses.SquaredHinge( reduction=losses_utils.ReductionV2.SUM, name='sq_hinge_loss') self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss') self.assertEqual(sq_hinge_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): sq_hinge_obj = losses.SquaredHinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # reduced loss = (0.485 + 0.2431) / 2 loss = sq_hinge_obj(y_true, y_pred) self.assertAllClose(self.evaluate(loss), 0.364, 1e-3) def test_scalar_weighted(self): sq_hinge_obj = losses.SquaredHinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # weighted loss = [0.485 * 2.3, 0.2431 * 2.3] # reduced loss = (0.485 + 0.2431) * 2.3 / 2 loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAllClose(self.evaluate(loss), 0.837, 1e-3) # Verify we get the same output when the same input is given loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): sq_hinge_obj = losses.SquaredHinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # weighted loss = [0.485 * 1.2, 0.2431 * 3.4] # reduced loss = (0.485 * 1.2 + 0.2431 * 3.4) / 2 sample_weight = tf.constant([1.2, 3.4]) loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 0.704, 1e-3) def test_timestep_weighted(self): sq_hinge_obj = losses.SquaredHinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1)) y_pred = tf.constant( [[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1)) sample_weight = tf.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4)) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]] # y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]], # [[0.25], [1], [0.5], [0.6]]] # 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]], # [[0.75], [0], [0.5], [0.4]]] # loss = [[0.49, 0.64, 0.81, 0], [0.5625, 0, 0.25, 0.16]] # weighted loss = [[1.47, 3.84, 4.05, 0], [2.25, 0, 0.25, 0.48]] # reduced loss = (1.47 + 3.84 + 4.05 + 0 + 2.25 + 0 + 0.25 + 0.48) / 8 loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 1.542, 1e-3) def test_zero_weighted(self): sq_hinge_obj = losses.SquaredHinge() y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) loss = sq_hinge_obj(y_true, y_pred, sample_weight=0) self.assertAllClose(self.evaluate(loss), 0., 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CategoricalHingeTest(tf.test.TestCase): def test_config(self): cat_hinge_obj = losses.CategoricalHinge( reduction=losses_utils.ReductionV2.SUM, name='cat_hinge_loss') self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss') self.assertEqual(cat_hinge_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): cat_hinge_obj = losses.CategoricalHinge() y_true = tf.constant([1, 9, 2, -5], shape=(2, 2)) y_pred = tf.constant([4, 8, 12, 8], shape=(2, 2), dtype=tf.float32) loss = cat_hinge_obj(y_true, y_pred) # pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16] # neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48] # cat_hinge = max(0., neg - pos + 1.) = [0, 65] # reduced_loss = (0 + 65)/2 = 32.5 self.assertAlmostEqual(self.evaluate(loss), 32.5, 3) def test_scalar_weighted(self): cat_hinge_obj = losses.CategoricalHinge() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 83.95, 3) # Verify we get the same output when the same input is given loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): cat_hinge_obj = losses.CategoricalHinge() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 124.1, 3) def test_timestep_weighted(self): cat_hinge_obj = losses.CategoricalHinge() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32) sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 4.0, 3) def test_zero_weighted(self): cat_hinge_obj = losses.CategoricalHinge() y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32) loss = cat_hinge_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LogCoshTest(tf.test.TestCase): def setup(self): y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3)) y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3)) self.batch_size = 6 error = y_pred - y_true self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2) self.y_pred = tf.constant(y_pred, dtype=tf.float32) self.y_true = tf.constant(y_true) def test_config(self): logcosh_obj = losses.LogCosh( reduction=losses_utils.ReductionV2.SUM, name='logcosh_loss') self.assertEqual(logcosh_obj.name, 'logcosh_loss') self.assertEqual(logcosh_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() logcosh_obj = losses.LogCosh() loss = logcosh_obj(self.y_true, self.y_pred) expected_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() logcosh_obj = losses.LogCosh() sample_weight = 2.3 loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = sample_weight * np.sum( self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) # Verify we get the same output when the same input is given loss_2 = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() logcosh_obj = losses.LogCosh() sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) expected_loss = np.sum(expected_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() logcosh_obj = losses.LogCosh() y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1) y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1) error = y_pred - y_true expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2) sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1)) y_pred = tf.constant(y_pred, dtype=tf.float32) y_true = tf.constant(y_true) loss = logcosh_obj( y_true, y_pred, sample_weight=tf.constant(sample_weight, shape=(2, 3))) expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() logcosh_obj = losses.LogCosh() sample_weight = 0 loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class PoissonTest(tf.test.TestCase): def setup(self): self.np_y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3)) self.np_y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3)) self.batch_size = 6 self.expected_losses = self.np_y_pred - np.multiply(self.np_y_true, np.log(self.np_y_pred)) self.y_pred = tf.constant(self.np_y_pred, dtype=tf.float32) self.y_true = tf.constant(self.np_y_true) def test_config(self): poisson_obj = losses.Poisson( reduction=losses_utils.ReductionV2.SUM, name='poisson') self.assertEqual(poisson_obj.name, 'poisson') self.assertEqual(poisson_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() poisson_obj = losses.Poisson() loss = poisson_obj(self.y_true, self.y_pred) expected_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() poisson_obj = losses.Poisson() sample_weight = 2.3 loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = sample_weight * np.sum( self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) # Verify we get the same output when the same input is given loss_2 = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() poisson_obj = losses.Poisson() sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) expected_loss = np.sum(expected_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() poisson_obj = losses.Poisson() y_true = self.np_y_true.reshape(2, 3, 1) y_pred = self.np_y_pred.reshape(2, 3, 1) sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1) expected_losses = y_pred - np.multiply(y_true, np.log(y_pred)) y_pred = tf.constant(y_pred, dtype=tf.float32) y_true = tf.constant(y_true) loss = poisson_obj( y_true, y_pred, sample_weight=tf.constant(sample_weight, shape=(2, 3))) expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() poisson_obj = losses.Poisson() loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class KLDivergenceTest(tf.test.TestCase): def setup(self): self.np_y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3)) self.np_y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3)) self.batch_size = 2 self.expected_losses = np.multiply(self.np_y_true, np.log(self.np_y_true / self.np_y_pred)) self.y_pred = tf.constant(self.np_y_pred, dtype=tf.float32) self.y_true = tf.constant(self.np_y_true) def test_config(self): k_obj = losses.KLDivergence( reduction=losses_utils.ReductionV2.SUM, name='kld') self.assertEqual(k_obj.name, 'kld') self.assertEqual(k_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() k_obj = losses.KLDivergence() loss = k_obj(self.y_true, self.y_pred) expected_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() k_obj = losses.KLDivergence() sample_weight = 2.3 loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = sample_weight * np.sum( self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) # Verify we get the same output when the same input is given loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() k_obj = losses.KLDivergence() sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(2, 3)) expected_loss = np.sum(expected_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() k_obj = losses.KLDivergence() y_true = self.np_y_true.reshape(2, 3, 1) y_pred = self.np_y_pred.reshape(2, 3, 1) sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3) expected_losses = np.sum( np.multiply(y_true, np.log(y_true / y_pred)), axis=-1) y_pred = tf.constant(y_pred, dtype=tf.float32) y_true = tf.constant(y_true) loss = k_obj( y_true, y_pred, sample_weight=tf.constant(sample_weight)) num_timesteps = 3 expected_loss = np.sum(expected_losses * sample_weight) / ( self.batch_size * num_timesteps) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() k_obj = losses.KLDivergence() loss = k_obj(self.y_true, self.y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class HuberLossTest(tf.test.TestCase): def huber_loss(self, y_true, y_pred, delta=1.0): error = y_pred - y_true abs_error = np.abs(error) quadratic = np.minimum(abs_error, delta) linear = np.subtract(abs_error, quadratic) return np.add( np.multiply(0.5, np.multiply(quadratic, quadratic)), np.multiply(delta, linear)) def setup(self, delta=1.0): self.np_y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3)) self.np_y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3)) self.batch_size = 6 self.expected_losses = self.huber_loss(self.np_y_true, self.np_y_pred, delta) self.y_pred = tf.constant(self.np_y_pred) self.y_true = tf.constant(self.np_y_true) def test_config(self): h_obj = losses.Huber(reduction=losses_utils.ReductionV2.SUM, name='huber') self.assertEqual(h_obj.name, 'huber') self.assertEqual(h_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct(self): self.setup() h_obj = losses.Huber() loss = h_obj(self.y_true, self.y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): self.setup() h_obj = losses.Huber() loss = h_obj(self.y_true, self.y_pred) actual_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_scalar_weighted(self): self.setup() h_obj = losses.Huber() sample_weight = 2.3 loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) # Verify we get the same output when the same input is given loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() h_obj = losses.Huber() sample_weight = tf.constant((1.2, 3.4), shape=(2, 1)) loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) actual_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) actual_loss = np.sum(actual_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_timestep_weighted(self): self.setup() h_obj = losses.Huber() y_pred = self.np_y_pred.reshape((2, 3, 1)) y_true = self.np_y_true.reshape((2, 3, 1)) expected_losses = self.huber_loss(y_true, y_pred) y_pred = tf.constant(y_pred) y_true = tf.constant(y_true) sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1)) loss = h_obj( y_true, y_pred, sample_weight=tf.constant(sample_weight, shape=(2, 3))) actual_loss = np.multiply(expected_losses, sample_weight) actual_loss = np.sum(actual_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_zero_weighted(self): self.setup() h_obj = losses.Huber() sample_weight = 0 loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0., 3) def test_non_default_delta(self): self.setup(delta=0.8) h_obj = losses.Huber(delta=0.8) sample_weight = 2.3 loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_loss_with_non_default_dtype(self): # Test case for GitHub issue: # https://github.com/tensorflow/tensorflow/issues/39004 self.setup() h_obj = losses.Huber() try: backend.set_floatx('float64') loss = h_obj(self.y_true, self.y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) finally: backend.set_floatx('float32') class BinaryTruePositivesViaControlFlow(losses.Loss): def __init__(self, reduction=losses_utils.ReductionV2.AUTO): super().__init__(reduction=reduction) def call(self, y_true, y_pred): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) result = tf.constant(0.0) for i in range(len(y_true)): for j in range(len(y_true[i])): if y_true[i][j] and y_pred[i][j]: result = result + 1 return result @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CustomLossTest(tf.test.TestCase): def test_autograph(self): y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]]) y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]]) @tf.function def loss_fn(y_true, y_pred): loss_obj = BinaryTruePositivesViaControlFlow() return loss_obj(y_true, y_pred) loss = loss_fn(y_true, y_pred) self.assertAllEqual( self.evaluate(loss), 7.0, ) if __name__ == '__main__': tf.test.main()
79,819
40.3147
82
py
keras
keras-master/keras/losses.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-classes-have-attributes """Built-in loss functions.""" import tensorflow.compat.v2 as tf import abc import functools from keras import backend from keras.utils import losses_utils from keras.utils import tf_utils from keras.utils.generic_utils import deserialize_keras_object from keras.utils.generic_utils import serialize_keras_object from tensorflow.python.ops.ragged import ragged_map_ops from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls @keras_export('keras.losses.Loss') class Loss: """Loss base class. To be implemented by subclasses: * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. Example subclass implementation: ```python class MeanSquaredError(Loss): def call(self, y_true, y_pred): y_pred = tf.convert_to_tensor_v2(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return tf.reduce_mean(math_ops.square(y_pred - y_true), axis=-1) ``` When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction types, and reduce losses explicitly in your training loop. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details on this. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ```python with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) .... loss = (tf.reduce_sum(loss_obj(labels, predictions)) * (1. / global_batch_size)) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None): """Initializes `Loss` class. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. """ losses_utils.ReductionV2.validate(reduction) self.reduction = reduction self.name = name # SUM_OVER_BATCH is only allowed in losses managed by `fit` or # CannedEstimators. self._allow_sum_over_batch_size = False self._set_name_scope() def _set_name_scope(self): """Creates a valid `name_scope` name.""" if self.name is None: self._name_scope = self.__class__.__name__ elif self.name == '<lambda>': self._name_scope = 'lambda' else: # E.g. '_my_loss' => 'my_loss' self._name_scope = self.name.strip('_') def __call__(self, y_true, y_pred, sample_weight=None): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` sample_weight: Optional `sample_weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each loss element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss functions reduce by 1 dimension, usually axis=-1.) Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1` because all loss functions reduce by 1 dimension, usually axis=-1.) Raises: ValueError: If the shape of `sample_weight` is invalid. """ # If we are wrapping a lambda function strip '<>' from the name as it is not # accepted in scope name. graph_ctx = tf_utils.graph_context_for_symbolic_tensors( y_true, y_pred, sample_weight) with backend.name_scope(self._name_scope), graph_ctx: if tf.executing_eagerly(): call_fn = self.call else: call_fn = tf.__internal__.autograph.tf_convert(self.call, tf.__internal__.autograph.control_status_ctx()) losses = call_fn(y_true, y_pred) return losses_utils.compute_weighted_loss( losses, sample_weight, reduction=self._get_reduction()) @classmethod def from_config(cls, config): """Instantiates a `Loss` from its config (output of `get_config()`). Args: config: Output of `get_config()`. Returns: A `Loss` instance. """ return cls(**config) def get_config(self): """Returns the config dictionary for a `Loss` instance.""" return {'reduction': self.reduction, 'name': self.name} @abc.abstractmethod @doc_controls.for_subclass_implementers def call(self, y_true, y_pred): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` Returns: Loss values with the shape `[batch_size, d0, .. dN-1]`. """ raise NotImplementedError('Must be implemented in subclasses.') def _get_reduction(self): """Handles `AUTO` reduction cases and returns the reduction value.""" if (not self._allow_sum_over_batch_size and tf.distribute.has_strategy() and (self.reduction == losses_utils.ReductionV2.AUTO or self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)): raise ValueError( 'Please use `tf.keras.losses.Reduction.SUM` or ' '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are ' 'used with `tf.distribute.Strategy` outside of the built-in training ' 'loops. You can implement ' '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch ' 'size like:\n```\nwith strategy.scope():\n' ' loss_obj = tf.keras.losses.CategoricalCrossentropy(' 'reduction=tf.keras.losses.Reduction.NONE)\n....\n' ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * ' '(1. / global_batch_size)\n```\nPlease see ' 'https://www.tensorflow.org/tutorials/distribute/custom_training' ' for more details.') if self.reduction == losses_utils.ReductionV2.AUTO: return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE return self.reduction class LossFunctionWrapper(Loss): """Wraps a loss function in the `Loss` class.""" def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs): """Initializes `LossFunctionWrapper` class. Args: fn: The loss function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. **kwargs: The keyword arguments that are passed on to `fn`. """ super().__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs def call(self, y_true, y_pred): """Invokes the `LossFunctionWrapper` instance. Args: y_true: Ground truth values. y_pred: The predicted values. Returns: Loss values per sample. """ if tf.is_tensor(y_pred) and tf.is_tensor(y_true): y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true) ag_fn = tf.__internal__.autograph.tf_convert(self.fn, tf.__internal__.autograph.control_status_ctx()) return ag_fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): config = {} for k, v in self._fn_kwargs.items(): config[k] = backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.losses.MeanSquaredError') class MeanSquaredError(LossFunctionWrapper): """Computes the mean of squares of errors between labels and predictions. `loss = square(y_true - y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mse = tf.keras.losses.MeanSquaredError() >>> mse(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mse(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mse(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error'): """Initializes `MeanSquaredError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_squared_error'. """ super().__init__(mean_squared_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanAbsoluteError') class MeanAbsoluteError(LossFunctionWrapper): """Computes the mean of absolute difference between labels and predictions. `loss = abs(y_true - y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError() >>> mae(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mae(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mae(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_error'): """Initializes `MeanAbsoluteError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_absolute_error'. """ super().__init__(mean_absolute_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanAbsolutePercentageError') class MeanAbsolutePercentageError(LossFunctionWrapper): """Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * abs(y_true - y_pred) / y_true` Standalone usage: >>> y_true = [[2., 1.], [2., 3.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError() >>> mape(y_true, y_pred).numpy() 50. >>> # Calling with 'sample_weight'. >>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 20. >>> # Using 'sum' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mape(y_true, y_pred).numpy() 100. >>> # Using 'none' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mape(y_true, y_pred).numpy() array([25., 75.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_percentage_error'): """Initializes `MeanAbsolutePercentageError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_absolute_percentage_error'. """ super().__init__( mean_absolute_percentage_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanSquaredLogarithmicError') class MeanSquaredLogarithmicError(LossFunctionWrapper): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = square(log(y_true + 1.) - log(y_pred + 1.))` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError() >>> msle(y_true, y_pred).numpy() 0.240 >>> # Calling with 'sample_weight'. >>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.120 >>> # Using 'sum' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.SUM) >>> msle(y_true, y_pred).numpy() 0.480 >>> # Using 'none' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.NONE) >>> msle(y_true, y_pred).numpy() array([0.240, 0.240], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_logarithmic_error'): """Initializes `MeanSquaredLogarithmicError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_squared_logarithmic_error'. """ super().__init__( mean_squared_logarithmic_error, name=name, reduction=reduction) @keras_export('keras.losses.BinaryCrossentropy') class BinaryCrossentropy(LossFunctionWrapper): """Computes the cross-entropy loss between true labels and predicted labels. Use this cross-entropy loss for binary (0 or 1) classification applications. The loss function requires the following inputs: - `y_true` (true label): This is either 0 or 1. - `y_pred` (predicted value): This is the model's prediction, i.e, a single floating-point value which either represents a [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf] when `from_logits=True`) or a probability (i.e, value in [0., 1.] when `from_logits=False`). **Recommended Usage:** (set `from_logits=True`) With `tf.keras` API: ```python model.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), .... ) ``` As a standalone function: >>> # Example 1: (batch_size = 1, number of samples = 4) >>> y_true = [0, 1, 0, 0] >>> y_pred = [-18.6, 0.51, 2.94, -12.8] >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred).numpy() 0.865 >>> # Example 2: (batch_size = 2, number of samples = 4) >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]] >>> # Using default 'auto'/'sum_over_batch_size' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred).numpy() 0.865 >>> # Using 'sample_weight' attribute >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.243 >>> # Using 'sum' reduction` type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=tf.keras.losses.Reduction.SUM) >>> bce(y_true, y_pred).numpy() 1.730 >>> # Using 'none' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=tf.keras.losses.Reduction.NONE) >>> bce(y_true, y_pred).numpy() array([0.235, 1.496], dtype=float32) **Default Usage:** (set `from_logits=False`) >>> # Make the following updates to the above "Recommended Usage" section >>> # 1. Set `from_logits=False` >>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False') >>> # 2. Update `y_pred` to use probabilities instead of logits >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]] """ def __init__(self, from_logits=False, label_smoothing=0., axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy'): """Initializes `BinaryCrossentropy` instance. Args: from_logits: Whether to interpret `y_pred` as a tensor of [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume that `y_pred` contains probabilities (i.e., values in [0, 1]). label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we compute the loss between the predicted labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards 0.5. Larger values of `label_smoothing` correspond to heavier smoothing. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Name for the op. Defaults to 'binary_crossentropy'. """ super().__init__( binary_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) self.from_logits = from_logits @keras_export('keras.losses.CategoricalCrossentropy') class CategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided in a `one_hot` representation. If you want to provide labels as integers, please use `SparseCategoricalCrossentropy` loss. There should be `# classes` floating point values per feature. In the snippet below, there is `# classes` floating pointing values per example. The shape of both `y_pred` and `y_true` are `[batch_size, num_classes]`. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy() >>> cce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> cce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> cce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy()) ``` """ def __init__(self, from_logits=False, label_smoothing=0., axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='categorical_crossentropy'): """Initializes `CategoricalCrossentropy` instance. Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'categorical_crossentropy'. """ super().__init__( categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) @keras_export('keras.losses.SparseCategoricalCrossentropy') class SparseCategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` loss. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy() >>> scce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> scce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> scce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) ``` """ def __init__(self, from_logits=False, reduction=losses_utils.ReductionV2.AUTO, name='sparse_categorical_crossentropy'): """Initializes `SparseCategoricalCrossentropy` instance. Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'sparse_categorical_crossentropy'. """ super().__init__( sparse_categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits) @keras_export('keras.losses.Hinge') class Hinge(LossFunctionWrapper): """Computes the hinge loss between `y_true` and `y_pred`. `loss = maximum(1 - y_true * y_pred, 0)` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Hinge() >>> h(y_true, y_pred).numpy() 1.3 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.55 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.6 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.1, 1.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'): """Initializes `Hinge` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'hinge'. """ super().__init__(hinge, name=name, reduction=reduction) @keras_export('keras.losses.SquaredHinge') class SquaredHinge(LossFunctionWrapper): """Computes the squared hinge loss between `y_true` and `y_pred`. `loss = square(maximum(1 - y_true * y_pred, 0))` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.SquaredHinge() >>> h(y_true, y_pred).numpy() 1.86 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.73 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 3.72 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.46, 2.26], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='squared_hinge'): """Initializes `SquaredHinge` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'squared_hinge'. """ super().__init__(squared_hinge, name=name, reduction=reduction) @keras_export('keras.losses.CategoricalHinge') class CategoricalHinge(LossFunctionWrapper): """Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.CategoricalHinge() >>> h(y_true, y_pred).numpy() 1.4 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.6 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.8 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.2, 1.6], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='categorical_hinge'): """Initializes `CategoricalHinge` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'categorical_hinge'. """ super().__init__(categorical_hinge, name=name, reduction=reduction) @keras_export('keras.losses.Poisson') class Poisson(LossFunctionWrapper): """Computes the Poisson loss between `y_true` and `y_pred`. `loss = y_pred - y_true * log(y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> p = tf.keras.losses.Poisson() >>> p(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.4 >>> # Using 'sum' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.SUM) >>> p(y_true, y_pred).numpy() 0.999 >>> # Using 'none' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.NONE) >>> p(y_true, y_pred).numpy() array([0.999, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'): """Initializes `Poisson` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'poisson'. """ super().__init__(poisson, name=name, reduction=reduction) @keras_export('keras.losses.LogCosh') class LogCosh(LossFunctionWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error `y_pred - y_true`. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> l = tf.keras.losses.LogCosh() >>> l(y_true, y_pred).numpy() 0.108 >>> # Calling with 'sample_weight'. >>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.087 >>> # Using 'sum' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.SUM) >>> l(y_true, y_pred).numpy() 0.217 >>> # Using 'none' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.NONE) >>> l(y_true, y_pred).numpy() array([0.217, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='log_cosh'): """Initializes `LogCosh` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'log_cosh'. """ super().__init__(log_cosh, name=name, reduction=reduction) @keras_export('keras.losses.KLDivergence') class KLDivergence(LossFunctionWrapper): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> kl = tf.keras.losses.KLDivergence() >>> kl(y_true, y_pred).numpy() 0.458 >>> # Calling with 'sample_weight'. >>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.366 >>> # Using 'sum' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.SUM) >>> kl(y_true, y_pred).numpy() 0.916 >>> # Using 'none' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.NONE) >>> kl(y_true, y_pred).numpy() array([0.916, -3.08e-06], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='kl_divergence'): """Initializes `KLDivergence` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'kl_divergence'. """ super().__init__(kl_divergence, name=name, reduction=reduction) @keras_export('keras.losses.Huber') class Huber(LossFunctionWrapper): """Computes the Huber loss between `y_true` and `y_pred`. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Huber() >>> h(y_true, y_pred).numpy() 0.155 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.09 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 0.31 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([0.18, 0.13], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Huber()) ``` """ def __init__(self, delta=1.0, reduction=losses_utils.ReductionV2.AUTO, name='huber_loss'): """Initializes `Huber` instance. Args: delta: A float, the point where the Huber loss function changes from a quadratic to linear. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'huber_loss'. """ super().__init__(huber, name=name, reduction=reduction, delta=delta) @keras_export('keras.metrics.mean_squared_error', 'keras.metrics.mse', 'keras.metrics.MSE', 'keras.losses.mean_squared_error', 'keras.losses.mse', 'keras.losses.MSE') @tf.__internal__.dispatch.add_dispatch_support def mean_squared_error(y_true, y_pred): """Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. `loss = mean(square(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1) def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred, y_pred_extra_dim=False): """Apply a loss function on a per batch basis. Args: loss_fn: The loss function y_true: truth values (RaggedTensor) y_pred: predicted values (RaggedTensor) y_pred_extra_dim: whether y_pred has an additional dimension compared to y_true Returns: Loss-function result. A dense tensor if the output has a single dimension (per-batch loss value); a ragged tensor otherwise. """ def rt_is_equiv_dense(rt): """Returns true if this RaggedTensor has the same row_lenghts across all ragged dimensions and thus can be converted to a dense tensor without loss of information. Args: rt: RaggedTensor. """ return tf.reduce_all([ tf.equal( tf.math.reduce_variance(tf.cast(row_lens, backend.floatx())), tf.constant([0.])) for row_lens in rt.nested_row_lengths() ]) def _convert_to_dense(inputs): return tuple( rt.to_tensor() if isinstance(rt, tf.RaggedTensor) else rt for rt in inputs) def _call_loss(inputs, ragged_output): """ Adapt the result to ragged or dense tensor according to the expected output type. This is done so that all the return values of the map operation have the same type. """ r = loss_fn(*inputs) if ragged_output and not isinstance(r, tf.RaggedTensor): r = tf.RaggedTensor.from_tensor(r) elif not ragged_output and isinstance(r, tf.RaggedTensor): r = r.to_tensor() return r def _wrapper(inputs, ragged_output): _, y_pred = inputs if isinstance(y_pred, tf.RaggedTensor): return tf.cond( rt_is_equiv_dense(y_pred), lambda: _call_loss(_convert_to_dense(inputs), ragged_output), lambda: _call_loss(inputs, ragged_output)) return loss_fn(*inputs) if not isinstance(y_true, tf.RaggedTensor): return loss_fn(y_true, y_pred.to_tensor()) lshape = y_pred.shape.as_list()[1:-1] if len(lshape) > 0: spec = tf.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype) else: spec = tf.TensorSpec(shape=[], dtype=y_pred.dtype) nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)] if y_pred_extra_dim: # The last dimension of a categorical prediction may be ragged or not. rdims = [len(slist) for slist in nested_splits_list] if rdims[0] == rdims[1] - 1: nested_splits_list[1] = nested_splits_list[1][:-1] map_fn = functools.partial(_wrapper, ragged_output=len(lshape) > 1) assertion_list = ragged_util.assert_splits_match(nested_splits_list) with tf.control_dependencies(assertion_list): return ragged_map_ops.map_fn(map_fn, elems=(y_true, y_pred), dtype=spec) @dispatch.dispatch_for_types(mean_squared_error, tf.RaggedTensor) def _ragged_tensor_mse(y_true, y_pred): """Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`. y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise a Dense tensor with dimensions [batch_size] is returned. """ return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred) @keras_export('keras.metrics.mean_absolute_error', 'keras.metrics.mae', 'keras.metrics.MAE', 'keras.losses.mean_absolute_error', 'keras.losses.mae', 'keras.losses.MAE') @tf.__internal__.dispatch.add_dispatch_support def mean_absolute_error(y_true, y_pred): """Computes the mean absolute error between labels and predictions. `loss = mean(abs(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return backend.mean(tf.abs(y_pred - y_true), axis=-1) @dispatch.dispatch_for_types(mean_absolute_error, tf.RaggedTensor) def _ragged_tensor_mae(y_true, y_pred): """RaggedTensor adapter for mean_absolute_error.""" return _ragged_tensor_apply_loss(mean_absolute_error, y_true, y_pred) @keras_export('keras.metrics.mean_absolute_percentage_error', 'keras.metrics.mape', 'keras.metrics.MAPE', 'keras.losses.mean_absolute_percentage_error', 'keras.losses.mape', 'keras.losses.MAPE') @tf.__internal__.dispatch.add_dispatch_support def mean_absolute_percentage_error(y_true, y_pred): """Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)` Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) diff = tf.abs( (y_true - y_pred) / backend.maximum(tf.abs(y_true), backend.epsilon())) return 100. * backend.mean(diff, axis=-1) @dispatch.dispatch_for_types(mean_absolute_percentage_error, tf.RaggedTensor) def _ragged_tensor_mape(y_true, y_pred): """Support RaggedTensors.""" return _ragged_tensor_apply_loss(mean_absolute_percentage_error, y_true, y_pred) @keras_export('keras.metrics.mean_squared_logarithmic_error', 'keras.metrics.msle', 'keras.metrics.MSLE', 'keras.losses.mean_squared_logarithmic_error', 'keras.losses.msle', 'keras.losses.MSLE') @tf.__internal__.dispatch.add_dispatch_support def mean_squared_logarithmic_error(y_true, y_pred): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = np.maximum(y_true, 1e-7) >>> y_pred = np.maximum(y_pred, 1e-7) >>> assert np.allclose( ... loss.numpy(), ... np.mean( ... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) first_log = tf.math.log(backend.maximum(y_pred, backend.epsilon()) + 1.) second_log = tf.math.log(backend.maximum(y_true, backend.epsilon()) + 1.) return backend.mean( tf.math.squared_difference(first_log, second_log), axis=-1) @dispatch.dispatch_for_types(mean_squared_logarithmic_error, tf.RaggedTensor) def _ragged_tensor_msle(y_true, y_pred): """Implements support for handling RaggedTensors.""" return _ragged_tensor_apply_loss(mean_squared_logarithmic_error, y_true, y_pred) def _maybe_convert_labels(y_true): """Converts binary labels into -1/1.""" are_zeros = tf.equal(y_true, 0) are_ones = tf.equal(y_true, 1) is_binary = tf.reduce_all(tf.logical_or(are_zeros, are_ones)) def _convert_binary_labels(): # Convert the binary labels to -1 or 1. return 2. * y_true - 1. updated_y_true = tf.__internal__.smart_cond.smart_cond(is_binary, _convert_binary_labels, lambda: y_true) return updated_y_true @keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge') @tf.__internal__.dispatch.add_dispatch_support def squared_hinge(y_true, y_pred): """Computes the squared hinge loss between `y_true` and `y_pred`. `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return backend.mean( tf.square(tf.maximum(1. - y_true * y_pred, 0.)), axis=-1) @keras_export('keras.metrics.hinge', 'keras.losses.hinge') @tf.__internal__.dispatch.add_dispatch_support def hinge(y_true, y_pred): """Computes the hinge loss between `y_true` and `y_pred`. `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return backend.mean(tf.maximum(1. - y_true * y_pred, 0.), axis=-1) @keras_export('keras.losses.categorical_hinge') @tf.__internal__.dispatch.add_dispatch_support def categorical_hinge(y_true, y_pred): """Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> pos = np.sum(y_true * y_pred, axis=-1) >>> neg = np.amax((1. - y_true) * y_pred, axis=-1) >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.)) Args: y_true: The ground truth values. `y_true` values are expected to be either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor). y_pred: The predicted values. Returns: Categorical hinge loss values. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) pos = tf.reduce_sum(y_true * y_pred, axis=-1) neg = tf.reduce_max((1. - y_true) * y_pred, axis=-1) zero = tf.cast(0., y_pred.dtype) return tf.maximum(neg - pos + 1., zero) @keras_export('keras.losses.huber', v1=[]) @tf.__internal__.dispatch.add_dispatch_support def huber(y_true, y_pred, delta=1.0): """Computes Huber loss value. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = d * |x| - 0.5 * d^2 if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. delta: A float, the point where the Huber loss function changes from a quadratic to linear. Returns: Tensor with one scalar loss entry per sample. """ y_pred = tf.cast(y_pred, dtype=backend.floatx()) y_true = tf.cast(y_true, dtype=backend.floatx()) delta = tf.cast(delta, dtype=backend.floatx()) error = tf.subtract(y_pred, y_true) abs_error = tf.abs(error) half = tf.convert_to_tensor(0.5, dtype=abs_error.dtype) return backend.mean( tf.where(abs_error <= delta, half * tf.square(error), delta * abs_error - half * tf.square(delta)), axis=-1) @keras_export('keras.losses.log_cosh', 'keras.losses.logcosh', 'keras.metrics.log_cosh', 'keras.metrics.logcosh') @tf.__internal__.dispatch.add_dispatch_support def log_cosh(y_true, y_pred): """Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) def _logcosh(x): return x + tf.math.softplus(-2. * x) - tf.cast( tf.math.log(2.), x.dtype) return backend.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0., axis=-1): """Computes the categorical crossentropy loss. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Categorical crossentropy loss value. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) label_smoothing = tf.convert_to_tensor( label_smoothing, dtype=backend.floatx()) def _smooth_labels(): num_classes = tf.cast(tf.shape(y_true)[-1], y_pred.dtype) return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes) y_true = tf.__internal__.smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true) return backend.categorical_crossentropy( y_true, y_pred, from_logits=from_logits, axis=axis) @dispatch.dispatch_for_types(categorical_crossentropy, tf.RaggedTensor) def _ragged_tensor_categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0., axis=-1): """Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. Returns: Categorical crossentropy loss value. Expected shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by CategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectivly the resulting loss is the sum of the individual loss values divided by 3. """ fn = functools.partial( categorical_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export('keras.metrics.sparse_categorical_crossentropy', 'keras.losses.sparse_categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1): """Computes the sparse categorical crossentropy loss. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Ground truth values. y_pred: The predicted values. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Sparse categorical crossentropy loss value. """ y_pred = tf.convert_to_tensor(y_pred) return backend.sparse_categorical_crossentropy( y_true, y_pred, from_logits=from_logits, axis=axis) @dispatch.dispatch_for_types(sparse_categorical_crossentropy, tf.RaggedTensor) def _ragged_tensor_sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1): """ Implements support for handling RaggedTensors. Expected y_pred shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by SparseCategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively, the resulting loss is the sum of the individual loss values divided by 3. """ fn = functools.partial( sparse_categorical_crossentropy, from_logits=from_logits, axis=axis) return _ragged_tensor_apply_loss(fn, y_true, y_pred, y_pred_extra_dim=True) @keras_export('keras.metrics.binary_crossentropy', 'keras.losses.binary_crossentropy') @tf.__internal__.dispatch.add_dispatch_support def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0., axis=-1): """Computes the binary crossentropy loss. Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.916 , 0.714], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing` for the target class and `0.5 * label_smoothing` for the non-target class. axis: The axis along which the mean is computed. Defaults to -1. Returns: Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) label_smoothing = tf.convert_to_tensor( label_smoothing, dtype=backend.floatx()) def _smooth_labels(): return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing y_true = tf.__internal__.smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true) return backend.mean( backend.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=axis) @dispatch.dispatch_for_types(binary_crossentropy, tf.RaggedTensor) def _ragged_tensor_binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0., axis=-1): """Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Axis along which to compute crossentropy. Returns: Binary crossentropy loss value. Expected shape: (batch, sequence_len) with sequence_len being variable per batch. Return shape: (batch,); returns the per batch mean of the loss values. When used by BinaryCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches. """ fn = functools.partial( binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export('keras.metrics.kl_divergence', 'keras.metrics.kullback_leibler_divergence', 'keras.metrics.kld', 'keras.metrics.KLD', 'keras.losses.kl_divergence', 'keras.losses.kullback_leibler_divergence', 'keras.losses.kld', 'keras.losses.KLD') @tf.__internal__.dispatch.add_dispatch_support def kl_divergence(y_true, y_pred): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1) >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1) >>> assert np.array_equal( ... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1)) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. Returns: A `Tensor` with loss. Raises: TypeError: If `y_true` cannot be cast to the `y_pred.dtype`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = backend.clip(y_true, backend.epsilon(), 1) y_pred = backend.clip(y_pred, backend.epsilon(), 1) return tf.reduce_sum(y_true * tf.math.log(y_true / y_pred), axis=-1) @keras_export('keras.metrics.poisson', 'keras.losses.poisson') @tf.__internal__.dispatch.add_dispatch_support def poisson(y_true, y_pred): """Computes the Poisson loss between y_true and y_pred. The Poisson loss is the mean of the elements of the `Tensor` `y_pred - y_true * log(y_pred)`. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.poisson(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_pred = y_pred + 1e-7 >>> assert np.allclose( ... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Poisson loss value. shape = `[batch_size, d0, .. dN-1]`. Raises: InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return backend.mean( y_pred - y_true * tf.math.log(y_pred + backend.epsilon()), axis=-1) @keras_export( 'keras.losses.cosine_similarity', v1=[ 'keras.metrics.cosine_proximity', 'keras.metrics.cosine', 'keras.losses.cosine_proximity', 'keras.losses.cosine', 'keras.losses.cosine_similarity', ]) @tf.__internal__.dispatch.add_dispatch_support def cosine_similarity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1) >>> loss.numpy() array([-0., -0.999, 0.999], dtype=float32) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Returns: Cosine similarity tensor. """ y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred, axis=axis) return -tf.reduce_sum(y_true * y_pred, axis=axis) @keras_export('keras.losses.CosineSimilarity') class CosineSimilarity(LossFunctionWrapper): """Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = -((0. + 0.) + (0.5 + 0.5)) / 2 >>> cosine_loss(y_true, y_pred).numpy() -0.5 >>> # Calling with 'sample_weight'. >>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() -0.0999 >>> # Using 'sum' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.SUM) >>> cosine_loss(y_true, y_pred).numpy() -0.999 >>> # Using 'none' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.NONE) >>> cosine_loss(y_true, y_pred).numpy() array([-0., -0.999], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1)) ``` Args: axis: The axis along which the cosine similarity is computed (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial] (https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. """ def __init__(self, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='cosine_similarity'): super().__init__( cosine_similarity, reduction=reduction, name=name, axis=axis) # Aliases. bce = BCE = binary_crossentropy mse = MSE = mean_squared_error mae = MAE = mean_absolute_error mape = MAPE = mean_absolute_percentage_error msle = MSLE = mean_squared_logarithmic_error kld = KLD = kullback_leibler_divergence = kl_divergence logcosh = log_cosh huber_loss = huber def is_categorical_crossentropy(loss): result = ((isinstance(loss, CategoricalCrossentropy) or (isinstance(loss, LossFunctionWrapper) and loss.fn == categorical_crossentropy) or (hasattr(loss, '__name__') and loss.__name__ == 'categorical_crossentropy') or (loss == 'categorical_crossentropy'))) return result @keras_export('keras.losses.serialize') def serialize(loss): """Serializes loss function or `Loss` instance. Args: loss: A Keras `Loss` instance or a loss function. Returns: Loss configuration dictionary. """ return serialize_keras_object(loss) @keras_export('keras.losses.deserialize') def deserialize(name, custom_objects=None): """Deserializes a serialized loss class/function instance. Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `Loss` instance or a loss function. """ return deserialize_keras_object( name, module_objects=globals(), custom_objects=custom_objects, printable_module_name='loss function') @keras_export('keras.losses.get') def get(identifier): """Retrieves a Keras loss as a `function`/`Loss` class instance. The `identifier` may be the string name of a loss function or `Loss` class. >>> loss = tf.keras.losses.get("categorical_crossentropy") >>> type(loss) <class 'function'> >>> loss = tf.keras.losses.get("CategoricalCrossentropy") >>> type(loss) <class '...keras.losses.CategoricalCrossentropy'> You can also specify `config` of the loss to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Loss` class >>> identifier = {"class_name": "CategoricalCrossentropy", ... "config": {"from_logits": True}} >>> loss = tf.keras.losses.get(identifier) >>> type(loss) <class '...keras.losses.CategoricalCrossentropy'> Args: identifier: A loss identifier. One of None or string name of a loss function/class or loss configuration dictionary or a loss function or a loss class instance. Returns: A Keras loss as a `function`/ `Loss` class instance. Raises: ValueError: If `identifier` cannot be interpreted. """ if identifier is None: return None if isinstance(identifier, str): identifier = str(identifier) return deserialize(identifier) if isinstance(identifier, dict): return deserialize(identifier) if callable(identifier): return identifier raise ValueError( f'Could not interpret loss function identifier: {identifier}') LABEL_DTYPES_FOR_LOSSES = { tf.compat.v1.losses.sparse_softmax_cross_entropy: 'int32', sparse_categorical_crossentropy: 'int32' }
78,681
35.613309
113
py
keras
keras-master/keras/callbacks.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-import-not-at-top # pylint: disable=g-classes-have-attributes """Callbacks: utilities called at certain points during model training.""" import tensorflow.compat.v2 as tf import collections import copy import csv import json import os import re import sys import time import numpy as np from keras import backend from keras.distribute import distributed_file_utils from keras.distribute import worker_training_state from keras.optimizer_v2 import learning_rate_schedule from keras.utils import generic_utils from keras.utils import tf_utils from keras.utils import version_utils from keras.utils.data_utils import Sequence from keras.utils.generic_utils import Progbar from keras.utils.io_utils import path_to_string from keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls try: import requests except ImportError: requests = None # Note: `configure_callbacks` is only used in TF1. def configure_callbacks(callbacks, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, count_mode='steps', mode=ModeKeys.TRAIN): """Configures callbacks for use in various training loops. Args: callbacks: List of Callbacks. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for. Returns: Instance of CallbackList used to control all Callbacks. """ # Check if callbacks have already been configured. if isinstance(callbacks, CallbackList): return callbacks if not callbacks: callbacks = [] # Add additional callbacks during training. if mode == ModeKeys.TRAIN: model.history = History() callbacks = [BaseLogger()] + (callbacks or []) + [model.history] if verbose: callbacks.append(ProgbarLogger(count_mode)) callback_list = CallbackList(callbacks) # Set callback model callback_model = model._get_callback_model() # pylint: disable=protected-access callback_list.set_model(callback_model) set_callback_parameters( callback_list, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=samples, verbose=verbose, mode=mode) callback_list.model.stop_training = False return callback_list def set_callback_parameters(callback_list, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, mode=ModeKeys.TRAIN): """Sets callback parameters. Args: callback_list: CallbackList instance. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for. """ metric_names = model.metrics_names for cbk in callback_list: if isinstance(cbk, (BaseLogger, ProgbarLogger)): cbk.stateful_metrics = metric_names[1:] # Exclude `loss` # Set callback parameters callback_metrics = [] # When we have deferred build scenario with iterator input, we will compile # when we standardize first batch of data. if mode != ModeKeys.PREDICT: callback_metrics = copy.copy(metric_names) if do_validation: callback_metrics += ['val_' + n for n in metric_names] callback_params = { 'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics, } callback_list.set_params(callback_params) def _is_generator_like(data): """Checks if data is a generator, Sequence, or Iterator.""" return (hasattr(data, '__next__') or hasattr(data, 'next') or isinstance( data, (Sequence, tf.compat.v1.data.Iterator, tf.data.Iterator))) def make_logs(model, logs, outputs, mode, prefix=''): """Computes logs for sending to `on_batch_end` methods.""" metric_names = model.metrics_names if mode in {ModeKeys.TRAIN, ModeKeys.TEST} and metric_names: for label, output in zip(metric_names, outputs): logs[prefix + label] = output else: logs['outputs'] = outputs return logs @keras_export('keras.callbacks.CallbackList') class CallbackList: """Container abstracting a list of callbacks.""" def __init__(self, callbacks=None, add_history=False, add_progbar=False, model=None, **params): """Container for `Callback` instances. This object wraps a list of `Callback` instances, making it possible to call them all at once via a single endpoint (e.g. `callback_list.on_epoch_end(...)`). Args: callbacks: List of `Callback` instances. add_history: Whether a `History` callback should be added, if one does not already exist in the `callbacks` list. add_progbar: Whether a `ProgbarLogger` callback should be added, if one does not already exist in the `callbacks` list. model: The `Model` these callbacks are used with. **params: If provided, parameters will be passed to each `Callback` via `Callback.set_params`. """ self.callbacks = tf.nest.flatten(callbacks) if callbacks else [] self._add_default_callbacks(add_history, add_progbar) if model: self.set_model(model) if params: self.set_params(params) # Performance optimization: determines if batch hooks need to be called. # pylint: disable=protected-access self._supports_tf_logs = all( getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks) self._batch_hooks_support_tf_logs = all( getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks if cb._implements_train_batch_hooks() or cb ._implements_test_batch_hooks() or cb._implements_predict_batch_hooks()) self._should_call_train_batch_hooks = any( cb._implements_train_batch_hooks() for cb in self.callbacks) self._should_call_test_batch_hooks = any( cb._implements_test_batch_hooks() for cb in self.callbacks) self._should_call_predict_batch_hooks = any( cb._implements_predict_batch_hooks() for cb in self.callbacks) # pylint: enable=protected-access self._disallow_batch_hooks_in_ps_strategy() # Performance check: Check batch hooks for slowness compared to batch time. # Only run check for custom callbacks (i.e. not present in this file). self._check_timing = any( cbk.__class__.__name__ not in globals() for cbk in self.callbacks) self._num_batches_for_timing_check = 5 self._hook_times = {} self._batch_start_time = None self._batch_times = [] def _add_default_callbacks(self, add_history, add_progbar): """Adds `Callback`s that are always present.""" self._progbar = None self._history = None for cb in self.callbacks: if isinstance(cb, ProgbarLogger): self._progbar = cb elif isinstance(cb, History): self._history = cb if self._progbar is None and add_progbar: self._progbar = ProgbarLogger(count_mode='steps') self.callbacks.insert(0, self._progbar) if self._history is None and add_history: self._history = History() self.callbacks.append(self._history) def _process_logs(self, logs, is_batch_hook=False): """Turns tensors into numpy arrays or Python scalars if necessary.""" if logs is None: return {} if self._supports_tf_logs: return logs if is_batch_hook and self._batch_hooks_support_tf_logs: return logs return tf_utils.sync_to_numpy_or_python_type(logs) def append(self, callback): self.callbacks.append(callback) def set_params(self, params): self.params = params for callback in self.callbacks: callback.set_params(params) def set_model(self, model): self.model = model if self._history: model.history = self._history for callback in self.callbacks: callback.set_model(model) def _call_batch_hook(self, mode, hook, batch, logs=None): """Helper function for all batch_{begin | end} methods.""" if not self.callbacks: return if hook == 'begin': self._call_batch_begin_hook(mode, batch, logs) elif hook == 'end': self._call_batch_end_hook(mode, batch, logs) else: raise ValueError( f'Unrecognized hook: {hook}. Expected values are ["begin", "end"]') def _call_batch_begin_hook(self, mode, batch, logs): """Helper function for `on_*_batch_begin` methods.""" hook_name = 'on_{mode}_batch_begin'.format(mode=mode) self._call_batch_hook_helper(hook_name, batch, logs) if self._check_timing: self._batch_start_time = time.time() def _call_batch_end_hook(self, mode, batch, logs): """Helper function for `on_*_batch_end` methods.""" hook_name = 'on_{mode}_batch_end'.format(mode=mode) if self._check_timing and batch >= 1: batch_time = time.time() - self._batch_start_time self._batch_times.append(batch_time) self._call_batch_hook_helper(hook_name, batch, logs) if len(self._batch_times) >= self._num_batches_for_timing_check: end_hook_name = hook_name begin_hook_name = 'on_{mode}_batch_begin'.format(mode=mode) avg_batch_time = sum(self._batch_times) / len(self._batch_times) avg_end_hook_time = sum(self._hook_times[end_hook_name]) / len( self._hook_times[end_hook_name]) avg_begin_hook_time = sum(self._hook_times[begin_hook_name]) / len( self._hook_times[begin_hook_name]) threshold_time = 1.0 * avg_batch_time warning_msg = ('Callback method `{hook}` is slow compared to ' 'the batch time (batch time: {batch_time:.4f}s vs ' '`{hook}` time: {hook_time:.4f}s). Check your callbacks.') if avg_begin_hook_time > threshold_time: logging.warning(warning_msg.format( hook=begin_hook_name, batch_time=avg_batch_time, hook_time=avg_begin_hook_time)) if avg_end_hook_time > threshold_time: logging.warning(warning_msg.format( hook=end_hook_name, batch_time=avg_batch_time, hook_time=avg_end_hook_time)) self._check_timing = False self._batch_start_time = None self._batch_times = [] self._hook_times = {} def _call_batch_hook_helper(self, hook_name, batch, logs): """Helper function for `on_*_batch_*` methods.""" if self._check_timing: start_time = time.time() logs = self._process_logs(logs, is_batch_hook=True) for callback in self.callbacks: hook = getattr(callback, hook_name) hook(batch, logs) if self._check_timing: if hook_name not in self._hook_times: self._hook_times[hook_name] = [] self._hook_times[hook_name].append(time.time() - start_time) def _call_begin_hook(self, mode): """Helper function for on_{train|test|predict}_begin methods.""" if mode == ModeKeys.TRAIN: self.on_train_begin() elif mode == ModeKeys.TEST: self.on_test_begin() else: self.on_predict_begin() def _call_end_hook(self, mode): """Helper function for on_{train|test|predict}_end methods.""" if mode == ModeKeys.TRAIN: self.on_train_end() elif mode == ModeKeys.TEST: self.on_test_end() else: self.on_predict_end() def on_batch_begin(self, batch, logs=None): if self._should_call_train_batch_hooks: self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs) def on_batch_end(self, batch, logs=None): if self._should_call_train_batch_hooks: self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs) def on_epoch_begin(self, epoch, logs=None): """Calls the `on_epoch_begin` methods of its callbacks. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_epoch_begin(epoch, logs) def on_epoch_end(self, epoch, logs=None): """Calls the `on_epoch_end` methods of its callbacks. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys are prefixed with `val_`. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_epoch_end(epoch, logs) def on_train_batch_begin(self, batch, logs=None): """Calls the `on_train_batch_begin` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.train_step`. Typically, the values of the `Model`'s metrics are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`. """ if self._should_call_train_batch_hooks: self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs) def on_train_batch_end(self, batch, logs=None): """Calls the `on_train_batch_end` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ if self._should_call_train_batch_hooks: self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs) def on_test_batch_begin(self, batch, logs=None): """Calls the `on_test_batch_begin` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.test_step`. Typically, the values of the `Model`'s metrics are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`. """ if self._should_call_test_batch_hooks: self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs) def on_test_batch_end(self, batch, logs=None): """Calls the `on_test_batch_end` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ if self._should_call_test_batch_hooks: self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs) def on_predict_batch_begin(self, batch, logs=None): """Calls the `on_predict_batch_begin` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.predict_step`, it typically returns a dict with a key 'outputs' containing the model's outputs. """ if self._should_call_predict_batch_hooks: self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs) def on_predict_batch_end(self, batch, logs=None): """Calls the `on_predict_batch_end` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ if self._should_call_predict_batch_hooks: self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs) def on_train_begin(self, logs=None): """Calls the `on_train_begin` methods of its callbacks. Args: logs: Dict. Currently, no data is passed via this argument for this method, but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_train_begin(logs) def on_train_end(self, logs=None): """Calls the `on_train_end` methods of its callbacks. Args: logs: Dict. Currently, no data is passed via this argument for this method, but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_train_end(logs) def on_test_begin(self, logs=None): """Calls the `on_test_begin` methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_test_begin(logs) def on_test_end(self, logs=None): """Calls the `on_test_end` methods of its callbacks. Args: logs: Dict. Currently, no data is passed via this argument for this method, but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_test_end(logs) def on_predict_begin(self, logs=None): """Calls the 'on_predict_begin` methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_predict_begin(logs) def on_predict_end(self, logs=None): """Calls the `on_predict_end` methods of its callbacks. Args: logs: Dict. Currently, no data is passed via this argument for this method, but that may change in the future. """ logs = self._process_logs(logs) for callback in self.callbacks: callback.on_predict_end(logs) def __iter__(self): return iter(self.callbacks) def _disallow_batch_hooks_in_ps_strategy(self): """Error out if batch-level callbacks are passed with PSStrategy.""" # pylint: disable=protected-access strategy = tf.distribute.get_strategy() if strategy._should_use_with_coordinator: unsupported_callbacks = [] for cb in self.callbacks: # These Callbacks can accept RemoteValues directly. if getattr(cb, '_supports_tf_logs', False): continue if (cb._implements_train_batch_hooks() or cb._implements_test_batch_hooks() or cb._implements_predict_batch_hooks()): unsupported_callbacks.append(cb) if unsupported_callbacks: raise ValueError( 'Batch-level `Callback`s are not supported with ' '`ParameterServerStrategy`. Found unsupported ' f'callbacks: {unsupported_callbacks}') # pylint: enable=protected-access @keras_export('keras.callbacks.Callback') class Callback: """Abstract base class used to build new callbacks. Callbacks can be passed to keras methods such as `fit`, `evaluate`, and `predict` in order to hook into the various stages of the model training and inference lifecycle. To create a custom callback, subclass `keras.callbacks.Callback` and override the method associated with the stage of interest. See https://www.tensorflow.org/guide/keras/custom_callback for more information. Example: >>> training_finished = False >>> class MyCallback(tf.keras.callbacks.Callback): ... def on_train_end(self, logs=None): ... global training_finished ... training_finished = True >>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))]) >>> model.compile(loss='mean_squared_error') >>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]), ... callbacks=[MyCallback()]) >>> assert training_finished == True If you want to use `Callback` objects in a custom training loop: 1. You should pack all your callbacks into a single `callbacks.CallbackList` so they can all be called together. 2. You will need to manually call all the `on_*` methods at the apropriate locations in your loop. Like this: ``` callbacks = tf.keras.callbacks.CallbackList([...]) callbacks.append(...) callbacks.on_train_begin(...) for epoch in range(EPOCHS): callbacks.on_epoch_begin(epoch) for i, data in dataset.enumerate(): callbacks.on_train_batch_begin(i) batch_logs = model.train_step(data) callbacks.on_train_batch_end(i, batch_logs) epoch_logs = ... callbacks.on_epoch_end(epoch, epoch_logs) final_logs=... callbacks.on_train_end(final_logs) ``` Attributes: params: Dict. Training parameters (eg. verbosity, batch size, number of epochs...). model: Instance of `keras.models.Model`. Reference of the model being trained. The `logs` dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch (see method-specific docstrings). """ def __init__(self): self.validation_data = None # pylint: disable=g-missing-from-attributes self.model = None # Whether this Callback should only run on the chief worker in a # Multi-Worker setting. # TODO(omalleyt): Make this attr public once solution is stable. self._chief_worker_only = None self._supports_tf_logs = False def set_params(self, params): self.params = params def set_model(self, model): self.model = model @doc_controls.for_subclass_implementers @generic_utils.default def on_batch_begin(self, batch, logs=None): """A backwards compatibility alias for `on_train_batch_begin`.""" @doc_controls.for_subclass_implementers @generic_utils.default def on_batch_end(self, batch, logs=None): """A backwards compatibility alias for `on_train_batch_end`.""" @doc_controls.for_subclass_implementers def on_epoch_begin(self, epoch, logs=None): """Called at the start of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers def on_epoch_end(self, epoch, logs=None): """Called at the end of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys are prefixed with `val_`. For training epoch, the values of the `Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy': 0.7}`. """ @doc_controls.for_subclass_implementers @generic_utils.default def on_train_batch_begin(self, batch, logs=None): """Called at the beginning of a training batch in `fit` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ # For backwards compatibility. self.on_batch_begin(batch, logs=logs) @doc_controls.for_subclass_implementers @generic_utils.default def on_train_batch_end(self, batch, logs=None): """Called at the end of a training batch in `fit` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ # For backwards compatibility. self.on_batch_end(batch, logs=logs) @doc_controls.for_subclass_implementers @generic_utils.default def on_test_batch_begin(self, batch, logs=None): """Called at the beginning of a batch in `evaluate` methods. Also called at the beginning of a validation batch in the `fit` methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers @generic_utils.default def on_test_batch_end(self, batch, logs=None): """Called at the end of a batch in `evaluate` methods. Also called at the end of a validation batch in the `fit` methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @doc_controls.for_subclass_implementers @generic_utils.default def on_predict_batch_begin(self, batch, logs=None): """Called at the beginning of a batch in `predict` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers @generic_utils.default def on_predict_batch_end(self, batch, logs=None): """Called at the end of a batch in `predict` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @doc_controls.for_subclass_implementers def on_train_begin(self, logs=None): """Called at the beginning of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers def on_train_end(self, logs=None): """Called at the end of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to `on_epoch_end()` is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers def on_test_begin(self, logs=None): """Called at the beginning of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers def on_test_end(self, logs=None): """Called at the end of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to `on_test_batch_end()` is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers def on_predict_begin(self, logs=None): """Called at the beginning of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @doc_controls.for_subclass_implementers def on_predict_end(self, logs=None): """Called at the end of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def _implements_train_batch_hooks(self): """Determines if this Callback should be called for each train batch.""" return (not generic_utils.is_default(self.on_batch_begin) or not generic_utils.is_default(self.on_batch_end) or not generic_utils.is_default(self.on_train_batch_begin) or not generic_utils.is_default(self.on_train_batch_end)) def _implements_test_batch_hooks(self): """Determines if this Callback should be called for each test batch.""" return (not generic_utils.is_default(self.on_test_batch_begin) or not generic_utils.is_default(self.on_test_batch_end)) def _implements_predict_batch_hooks(self): """Determines if this Callback should be called for each predict batch.""" return (not generic_utils.is_default(self.on_predict_batch_begin) or not generic_utils.is_default(self.on_predict_batch_end)) @keras_export('keras.callbacks.BaseLogger') class BaseLogger(Callback): """Callback that accumulates epoch averages of metrics. This callback is automatically applied to every Keras model. Args: stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is in `on_epoch_end`. All others will be averaged in `on_epoch_end`. """ def __init__(self, stateful_metrics=None): super(BaseLogger, self).__init__() self.stateful_metrics = set(stateful_metrics or []) def on_epoch_begin(self, epoch, logs=None): self.seen = 0 self.totals = {} def on_batch_end(self, batch, logs=None): logs = logs or {} batch_size = logs.get('size', 0) # In case of distribution strategy we can potentially run multiple steps # at the same time, we should account for that in the `seen` calculation. num_steps = logs.get('num_steps', 1) self.seen += batch_size * num_steps for k, v in logs.items(): if k in self.stateful_metrics: self.totals[k] = v else: if k in self.totals: self.totals[k] += v * batch_size else: self.totals[k] = v * batch_size def on_epoch_end(self, epoch, logs=None): if logs is not None: for k in self.params['metrics']: if k in self.totals: # Make value available to next callbacks. if k in self.stateful_metrics: logs[k] = self.totals[k] else: logs[k] = self.totals[k] / self.seen @keras_export('keras.callbacks.TerminateOnNaN') class TerminateOnNaN(Callback): """Callback that terminates training when a NaN loss is encountered. """ def __init__(self): super(TerminateOnNaN, self).__init__() self._supports_tf_logs = True def on_batch_end(self, batch, logs=None): logs = logs or {} loss = logs.get('loss') if loss is not None: loss = tf_utils.sync_to_numpy_or_python_type(loss) if np.isnan(loss) or np.isinf(loss): print('Batch %d: Invalid loss, terminating training' % (batch)) self.model.stop_training = True @keras_export('keras.callbacks.ProgbarLogger') class ProgbarLogger(Callback): """Callback that prints metrics to stdout. Args: count_mode: One of `"steps"` or `"samples"`. Whether the progress bar should count samples seen or steps (batches) seen. stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is. All others will be averaged over time (e.g. loss, etc). If not provided, defaults to the `Model`'s metrics. Raises: ValueError: In case of invalid `count_mode`. """ def __init__(self, count_mode='samples', stateful_metrics=None): super(ProgbarLogger, self).__init__() self._supports_tf_logs = True if count_mode == 'samples': self.use_steps = False elif count_mode == 'steps': self.use_steps = True else: raise ValueError( f'Unknown `count_mode`: {count_mode}. ' 'Expected values are ["samples", "steps"]') # Defaults to all Model's metrics except for loss. self.stateful_metrics = set(stateful_metrics) if stateful_metrics else set() self.seen = 0 self.progbar = None self.target = None self.verbose = 1 self.epochs = 1 self._train_step, self._test_step, self._predict_step = None, None, None self._call_batch_hooks = True self._called_in_fit = False def set_params(self, params): self.verbose = params['verbose'] self.epochs = params['epochs'] if self.use_steps and 'steps' in params: self.target = params['steps'] elif not self.use_steps and 'samples' in params: self.target = params['samples'] else: self.target = None # Will be inferred at the end of the first epoch. self._call_batch_hooks = self.verbose == 1 if self.target is None: try: self._train_step = self.model._train_counter # pylint: disable=protected-access self._test_step = self.model._test_counter # pylint: disable=protected-access self._predict_step = self.model._predict_counter # pylint: disable=protected-access except AttributeError: self._call_batch_hooks = True def on_train_begin(self, logs=None): # When this logger is called inside `fit`, validation is silent. self._called_in_fit = True def on_test_begin(self, logs=None): if not self._called_in_fit: self._reset_progbar() self._maybe_init_progbar() def on_predict_begin(self, logs=None): self._reset_progbar() self._maybe_init_progbar() def on_epoch_begin(self, epoch, logs=None): self._reset_progbar() self._maybe_init_progbar() if self.verbose and self.epochs > 1: print('Epoch %d/%d' % (epoch + 1, self.epochs)) def on_train_batch_end(self, batch, logs=None): self._batch_update_progbar(batch, logs) def on_test_batch_end(self, batch, logs=None): if not self._called_in_fit: self._batch_update_progbar(batch, logs) def on_predict_batch_end(self, batch, logs=None): # Don't pass prediction results. self._batch_update_progbar(batch, None) def on_epoch_end(self, epoch, logs=None): self._finalize_progbar(logs, self._train_step) def on_test_end(self, logs=None): if not self._called_in_fit: self._finalize_progbar(logs, self._test_step) def on_predict_end(self, logs=None): self._finalize_progbar(logs, self._predict_step) def _reset_progbar(self): self.seen = 0 self.progbar = None def _maybe_init_progbar(self): """Instantiate a `Progbar` if not yet, and update the stateful metrics.""" # TODO(rchao): Legacy TF1 code path may use list for # `self.stateful_metrics`. Remove "cast to set" when TF1 support is dropped. self.stateful_metrics = set(self.stateful_metrics) if self.model: # Update the existing stateful metrics as `self.model.metrics` may contain # updated metrics after `MetricsContainer` is built in the first train # step. self.stateful_metrics = self.stateful_metrics.union( set(m.name for m in self.model.metrics)) if self.progbar is None: self.progbar = Progbar( target=self.target, verbose=self.verbose, stateful_metrics=self.stateful_metrics, unit_name='step' if self.use_steps else 'sample') self.progbar._update_stateful_metrics(self.stateful_metrics) # pylint: disable=protected-access def _implements_train_batch_hooks(self): return self._call_batch_hooks def _implements_test_batch_hooks(self): return self._call_batch_hooks def _implements_predict_batch_hooks(self): return self._call_batch_hooks def _batch_update_progbar(self, batch, logs=None): """Updates the progbar.""" logs = logs or {} self._maybe_init_progbar() if self.use_steps: self.seen = batch + 1 # One-indexed. else: # v1 path only. logs = copy.copy(logs) batch_size = logs.pop('size', 0) num_steps = logs.pop('num_steps', 1) logs.pop('batch', None) add_seen = num_steps * batch_size self.seen += add_seen if self.verbose == 1: # Only block async when verbose = 1. logs = tf_utils.sync_to_numpy_or_python_type(logs) self.progbar.update(self.seen, list(logs.items()), finalize=False) def _finalize_progbar(self, logs, counter): logs = tf_utils.sync_to_numpy_or_python_type(logs or {}) if self.target is None: if counter is not None: counter = counter.numpy() if not self.use_steps: counter *= logs.get('size', 1) self.target = counter or self.seen self.progbar.target = self.target self.progbar.update(self.target, list(logs.items()), finalize=True) @keras_export('keras.callbacks.History') class History(Callback): """Callback that records events into a `History` object. This callback is automatically applied to every Keras model. The `History` object gets returned by the `fit` method of models. Example: >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10) >>> print(history.params) {'verbose': 1, 'epochs': 10, 'steps': 1} >>> # check the keys of history object >>> print(history.history.keys()) dict_keys(['loss']) """ def __init__(self): super(History, self).__init__() self.history = {} def on_train_begin(self, logs=None): self.epoch = [] def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epoch.append(epoch) for k, v in logs.items(): self.history.setdefault(k, []).append(v) # Set the history attribute on the model after the epoch ends. This will # make sure that the state which is set is the latest one. self.model.history = self @keras_export('keras.callbacks.ModelCheckpoint') class ModelCheckpoint(Callback): """Callback to save the Keras model or model weights at some frequency. `ModelCheckpoint` callback is used in conjunction with training using `model.fit()` to save a model or weights (in a checkpoint file) at some interval, so the model or weights can be loaded later to continue the training from the state saved. A few options this callback provides include: - Whether to only keep the model that has achieved the "best performance" so far, or whether to save the model at the end of every epoch regardless of performance. - Definition of 'best'; which quantity to monitor and whether it should be maximized or minimized. - The frequency it should save at. Currently, the callback supports saving at the end of every epoch, or after a fixed number of training batches. - Whether only weights are saved, or the whole model is saved. Note: If you get `WARNING:tensorflow:Can save best model only with <name> available, skipping` see the description of the `monitor` argument for details on how to get this right. Example: ```python model.compile(loss=..., optimizer=..., metrics=['accuracy']) EPOCHS = 10 checkpoint_filepath = '/tmp/checkpoint' model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True) # Model weights are saved at the end of every epoch, if it's the best seen # so far. model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback]) # The model weights (that are considered the best) are loaded into the model. model.load_weights(checkpoint_filepath) ``` Args: filepath: string or `PathLike`, path to save the model file. e.g. filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath` can contain named formatting options, which will be filled the value of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model checkpoints will be saved with the epoch number and the validation loss in the filename. The directory of the filepath should not be reused by any other callbacks to avoid conflicts. monitor: The metric name to monitor. Typically the metrics are set by the `Model.compile` method. Note: * Prefix the name with `"val_`" to monitor validation metrics. * Use `"loss"` or "`val_loss`" to monitor the model's total loss. * If you specify metrics as strings, like `"accuracy"`, pass the same string (with or without the `"val_"` prefix). * If you pass `metrics.Metric` objects, `monitor` should be set to `metric.name` * If you're not sure about the metric names you can check the contents of the `history.history` dictionary returned by `history = model.fit()` * Multi-output models set additional prefixes on the metric names. verbose: verbosity mode, 0 or 1. save_best_only: if `save_best_only=True`, it only saves when the model is considered the "best" and the latest best model according to the quantity monitored will not be overwritten. If `filepath` doesn't contain formatting options like `{epoch}` then `filepath` will be overwritten by each new better model. mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For `val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the mode is set to `max` if the quantities monitored are 'acc' or start with 'fmeasure' and are set to `min` for the rest of the quantities. save_weights_only: if True, then only the model's weights will be saved (`model.save_weights(filepath)`), else the full model is saved (`model.save(filepath)`). save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves the model after each epoch. When using integer, the callback saves the model at end of this many batches. If the `Model` is compiled with `steps_per_execution=N`, then the saving criteria will be checked every Nth batch. Note that if the saving isn't aligned to epochs, the monitored metric may potentially be less reliable (it could reflect as little as 1 batch, since the metrics get reset every epoch). Defaults to `'epoch'`. options: Optional `tf.train.CheckpointOptions` object if `save_weights_only` is true or optional `tf.saved_model.SaveOptions` object if `save_weights_only` is false. **kwargs: Additional arguments for backwards compatibility. Possible key is `period`. """ def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq='epoch', options=None, **kwargs): super(ModelCheckpoint, self).__init__() self._supports_tf_logs = True self.monitor = monitor self.verbose = verbose self.filepath = path_to_string(filepath) self.save_best_only = save_best_only self.save_weights_only = save_weights_only self.save_freq = save_freq self.epochs_since_last_save = 0 self._batches_seen_since_last_saving = 0 self._last_batch_seen = 0 if save_weights_only: if options is None or isinstance( options, tf.train.CheckpointOptions): self._options = options or tf.train.CheckpointOptions() else: raise TypeError( 'If save_weights_only is True, then `options` must be ' f'either None or a tf.train.CheckpointOptions. Got {options}.') else: if options is None or isinstance(options, tf.saved_model.SaveOptions): self._options = options or tf.saved_model.SaveOptions() else: raise TypeError( 'If save_weights_only is False, then `options` must be ' f'either None or a tf.saved_model.SaveOptions. Got {options}.') # Deprecated field `load_weights_on_restart` is for loading the checkpoint # file from `filepath` at the start of `model.fit()` # TODO(rchao): Remove the arg during next breaking release. if 'load_weights_on_restart' in kwargs: self.load_weights_on_restart = kwargs['load_weights_on_restart'] logging.warning('`load_weights_on_restart` argument is deprecated. ' 'Please use `model.load_weights()` for loading weights ' 'before the start of `model.fit()`.') else: self.load_weights_on_restart = False # Deprecated field `period` is for the number of epochs between which # the model is saved. if 'period' in kwargs: self.period = kwargs['period'] logging.warning('`period` argument is deprecated. Please use `save_freq` ' 'to specify the frequency in number of batches seen.') else: self.period = 1 if mode not in ['auto', 'min', 'max']: logging.warning('ModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.', mode) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf if self.save_freq != 'epoch' and not isinstance(self.save_freq, int): raise ValueError( f'Unrecognized save_freq: {self.save_freq}. ' 'Expected save_freq are "epoch" or integer') # Only the chief worker writes model checkpoints, but all workers # restore checkpoint at on_train_begin(). self._chief_worker_only = False def on_train_begin(self, logs=None): if self.load_weights_on_restart: filepath_to_load = ( self._get_most_recently_modified_file_matching_pattern(self.filepath)) if (filepath_to_load is not None and self._checkpoint_exists(filepath_to_load)): try: # `filepath` may contain placeholders such as `{epoch:02d}`, and # thus it attempts to load the most recently modified file with file # name matching the pattern. self.model.load_weights(filepath_to_load) except (IOError, ValueError) as e: raise ValueError( f'Error loading file from {filepath_to_load}. Reason: {e}') def _implements_train_batch_hooks(self): # Only call batch hooks when saving on batch return self.save_freq != 'epoch' def on_train_batch_end(self, batch, logs=None): if self._should_save_on_batch(batch): self._save_model(epoch=self._current_epoch, batch=batch, logs=logs) def on_epoch_begin(self, epoch, logs=None): self._current_epoch = epoch def on_epoch_end(self, epoch, logs=None): self.epochs_since_last_save += 1 # pylint: disable=protected-access if self.save_freq == 'epoch': self._save_model(epoch=epoch, batch=None, logs=logs) def _should_save_on_batch(self, batch): """Handles batch-level saving logic, supports steps_per_execution.""" if self.save_freq == 'epoch': return False if batch <= self._last_batch_seen: # New epoch. add_batches = batch + 1 # batches are zero-indexed. else: add_batches = batch - self._last_batch_seen self._batches_seen_since_last_saving += add_batches self._last_batch_seen = batch if self._batches_seen_since_last_saving >= self.save_freq: self._batches_seen_since_last_saving = 0 return True return False def _save_model(self, epoch, batch, logs): """Saves the model. Args: epoch: the epoch this iteration is in. batch: the batch this iteration is in. `None` if the `save_freq` is set to `epoch`. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`. """ logs = logs or {} if isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period: # Block only when saving interval is reached. logs = tf_utils.sync_to_numpy_or_python_type(logs) self.epochs_since_last_save = 0 filepath = self._get_file_path(epoch, batch, logs) try: if self.save_best_only: current = logs.get(self.monitor) if current is None: logging.warning('Can save best model only with %s available, ' 'skipping.', self.monitor) else: if self.monitor_op(current, self.best): if self.verbose > 0: print('\nEpoch %05d: %s improved from %0.5f to %0.5f,' ' saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath)) self.best = current if self.save_weights_only: self.model.save_weights( filepath, overwrite=True, options=self._options) else: self.model.save(filepath, overwrite=True, options=self._options) else: if self.verbose > 0: print('\nEpoch %05d: %s did not improve from %0.5f' % (epoch + 1, self.monitor, self.best)) else: if self.verbose > 0: print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath)) if self.save_weights_only: self.model.save_weights( filepath, overwrite=True, options=self._options) else: self.model.save(filepath, overwrite=True, options=self._options) self._maybe_remove_file() except IsADirectoryError as e: # h5py 3.x raise IOError('Please specify a non-directory filepath for ' 'ModelCheckpoint. Filepath used is an existing ' f'directory: {filepath}') except IOError as e: # h5py 2.x # `e.errno` appears to be `None` so checking the content of `e.args[0]`. if 'is a directory' in str(e.args[0]).lower(): raise IOError('Please specify a non-directory filepath for ' 'ModelCheckpoint. Filepath used is an existing ' f'directory: f{filepath}') # Re-throw the error for any other causes. raise e def _get_file_path(self, epoch, batch, logs): """Returns the file path for checkpoint.""" # pylint: disable=protected-access try: # `filepath` may contain placeholders such as `{epoch:02d}`,`{batch:02d}` # and `{mape:.2f}`. A mismatch between logged metrics and the path's # placeholders can cause formatting to fail. if batch is None or 'batch' in logs: file_path = self.filepath.format(epoch=epoch + 1, **logs) else: file_path = self.filepath.format( epoch=epoch + 1, batch=batch + 1, **logs) except KeyError as e: raise KeyError( f'Failed to format this callback filepath: "{self.filepath}". ' f'Reason: {e}') self._write_filepath = distributed_file_utils.write_filepath( file_path, self.model.distribute_strategy) return self._write_filepath def _maybe_remove_file(self): # Remove the checkpoint directory in multi-worker training where this worker # should not checkpoint. It is a dummy directory previously saved for sync # distributed training. distributed_file_utils.remove_temp_dir_with_filepath( self._write_filepath, self.model.distribute_strategy) def _checkpoint_exists(self, filepath): """Returns whether the checkpoint `filepath` refers to exists.""" if filepath.endswith('.h5'): return tf.io.gfile.exists(filepath) tf_saved_model_exists = tf.io.gfile.exists(filepath) tf_weights_only_checkpoint_exists = tf.io.gfile.exists( filepath + '.index') return tf_saved_model_exists or tf_weights_only_checkpoint_exists def _get_most_recently_modified_file_matching_pattern(self, pattern): """Returns the most recently modified filepath matching pattern. Pattern may contain python formatting placeholder. If `tf.train.latest_checkpoint()` does not return None, use that; otherwise, check for most recently modified one that matches the pattern. In the rare case where there are more than one pattern-matching file having the same modified time that is most recent among all, return the filepath that is largest (by `>` operator, lexicographically using the numeric equivalents). This provides a tie-breaker when multiple files are most recent. Note that a larger `filepath` can sometimes indicate a later time of modification (for instance, when epoch/batch is used as formatting option), but not necessarily (when accuracy or loss is used). The tie-breaker is put in the logic as best effort to return the most recent, and to avoid undeterministic result. Modified time of a file is obtained with `os.path.getmtime()`. This utility function is best demonstrated via an example: ```python file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5'] ] for file_path in file_paths: # Write something to each of the files self.assertEqual( _get_most_recently_modified_file_matching_pattern(path_pattern), file_paths[-1]) ``` Args: pattern: The file pattern that may optionally contain python placeholder such as `{epoch:02d}`. Returns: The most recently modified file's full filepath matching `pattern`. If `pattern` does not contain any placeholder, this returns the filepath that exactly matches `pattern`. Returns `None` if no match is found. """ dir_name = os.path.dirname(pattern) base_name = os.path.basename(pattern) base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$' # If tf.train.latest_checkpoint tells us there exists a latest checkpoint, # use that as it is more robust than `os.path.getmtime()`. latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name) if latest_tf_checkpoint is not None and re.match( base_name_regex, os.path.basename(latest_tf_checkpoint)): return latest_tf_checkpoint latest_mod_time = 0 file_path_with_latest_mod_time = None n_file_with_latest_mod_time = 0 file_path_with_largest_file_name = None if tf.io.gfile.exists(dir_name): for file_name in os.listdir(dir_name): # Only consider if `file_name` matches the pattern. if re.match(base_name_regex, file_name): file_path = os.path.join(dir_name, file_name) mod_time = os.path.getmtime(file_path) if (file_path_with_largest_file_name is None or file_path > file_path_with_largest_file_name): file_path_with_largest_file_name = file_path if mod_time > latest_mod_time: latest_mod_time = mod_time file_path_with_latest_mod_time = file_path # In the case a file with later modified time is found, reset # the counter for the number of files with latest modified time. n_file_with_latest_mod_time = 1 elif mod_time == latest_mod_time: # In the case a file has modified time tied with the most recent, # increment the counter for the number of files with latest modified # time by 1. n_file_with_latest_mod_time += 1 if n_file_with_latest_mod_time == 1: # Return the sole file that has most recent modified time. return file_path_with_latest_mod_time else: # If there are more than one file having latest modified time, return # the file path with the largest file name. return file_path_with_largest_file_name @keras_export('keras.callbacks.experimental.BackupAndRestore', v1=[]) class BackupAndRestore(Callback): """Callback to back up and restore the training state. `BackupAndRestore` callback is intended to recover from interruptions that happened in the middle of a model.fit execution by backing up the training states in a temporary checkpoint file (based on TF CheckpointManager) at the end of each epoch. If training restarted before completion, the training state and model are restored to the most recently saved state at the beginning of a new model.fit() run. Note that user is responsible to bring jobs back up. This callback is important for the backup and restore mechanism for fault tolerance purpose. And the model to be restored from an previous checkpoint is expected to be the same as the one used to back up. If user changes arguments passed to compile or fit, the checkpoint saved for fault tolerance can become invalid. Note: 1. This callback is not compatible with disabling eager execution. 2. A checkpoint is saved at the end of each epoch, when restoring we'll redo any partial work from an unfinished epoch in which the training got restarted (so the work done before a interruption doesn't affect the final model state). 3. This works for both single worker and multi-worker mode, only MirroredStrategy and MultiWorkerMirroredStrategy are supported for now. Example: >>> class InterruptingCallback(tf.keras.callbacks.Callback): ... def on_epoch_begin(self, epoch, logs=None): ... if epoch == 4: ... raise RuntimeError('Interrupting!') >>> callback = tf.keras.callbacks.experimental.BackupAndRestore( ... backup_dir="/tmp/backup") >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> try: ... model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10, ... batch_size=1, callbacks=[callback, InterruptingCallback()], ... verbose=0) ... except: ... pass >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10, ... batch_size=1, callbacks=[callback], verbose=0) >>> # Only 6 more epochs are run, since first trainning got interrupted at >>> # zero-indexed epoch 4, second training will continue from 4 to 9. >>> len(history.history['loss']) 6 Args: backup_dir: String, path to store the checkpoint. e.g. backup_dir = os.path.join(working_dir, 'backup') This is the directory in which the system stores temporary files to recover the model from jobs terminated unexpectedly. The directory cannot be reused elsewhere to store other files, e.g. by BackupAndRestore callback of another training, or by another callback (ModelCheckpoint) of the same training. """ def __init__(self, backup_dir): super(BackupAndRestore, self).__init__() self.backup_dir = backup_dir self._supports_tf_logs = True self._supported_strategies = ( tf.distribute.MirroredStrategy, tf.distribute.MultiWorkerMirroredStrategy, tf.distribute.experimental.TPUStrategy, tf.distribute.TPUStrategy, tf.distribute.experimental.ParameterServerStrategy) if not tf.executing_eagerly(): if tf.inside_function(): raise ValueError('This Callback\'s method contains Python state and ' 'should be called outside of `tf.function`s.') else: # Legacy graph mode: raise ValueError( 'BackupAndRestore only supports eager mode. In graph ' 'mode, consider using ModelCheckpoint to manually save ' 'and restore weights with `model.load_weights()` and by ' 'providing `initial_epoch` in `model.fit()` for fault tolerance.') # Only the chief worker writes model checkpoints, but all workers # restore checkpoint at on_train_begin(). self._chief_worker_only = False def on_train_begin(self, logs=None): # TrainingState is used to manage the training state needed for # failure-recovery of a worker in training. # pylint: disable=protected-access if self.model._distribution_strategy and not isinstance( self.model.distribute_strategy, self._supported_strategies): raise NotImplementedError( f'{type(self.model.distribute_strategy)} is not supported yet. ' 'Currently BackupAndRestore callback only supports empty strategy, ' 'MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy.') self.model._training_state = ( worker_training_state.WorkerTrainingState(self.model, self.backup_dir)) self._training_state = self.model._training_state self._training_state.restore() def on_train_end(self, logs=None): # pylint: disable=protected-access # On exit of training, delete the training state backup file that was saved # for the purpose of worker recovery. self._training_state.delete_backup() # Clean up the training state. del self._training_state del self.model._training_state def on_epoch_end(self, epoch, logs=None): # Back up the model and current epoch for possible future recovery. self._training_state.back_up(epoch) @keras_export('keras.callbacks.EarlyStopping') class EarlyStopping(Callback): """Stop training when a monitored metric has stopped improving. Assuming the goal of a training is to minimize the loss. With this, the metric to be monitored would be `'loss'`, and mode would be `'min'`. A `model.fit()` training loop will check at end of every epoch whether the loss is no longer decreasing, considering the `min_delta` and `patience` if applicable. Once it's found no longer decreasing, `model.stop_training` is marked True and the training terminates. The quantity to be monitored needs to be available in `logs` dict. To make it so, pass the loss or metrics at `model.compile()`. Args: monitor: Quantity to be monitored. min_delta: Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement. patience: Number of epochs with no improvement after which training will be stopped. verbose: verbosity mode. mode: One of `{"auto", "min", "max"}`. In `min` mode, training will stop when the quantity monitored has stopped decreasing; in `"max"` mode it will stop when the quantity monitored has stopped increasing; in `"auto"` mode, the direction is automatically inferred from the name of the monitored quantity. baseline: Baseline value for the monitored quantity. Training will stop if the model doesn't show improvement over the baseline. restore_best_weights: Whether to restore model weights from the epoch with the best value of the monitored quantity. If False, the model weights obtained at the last step of training are used. An epoch will be restored regardless of the performance relative to the `baseline`. If no epoch improves on `baseline`, training will run for `patience` epochs and restore weights from the best epoch in that set. Example: >>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3) >>> # This callback will stop the training when there is no improvement in >>> # the loss for three consecutive epochs. >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10, batch_size=1, callbacks=[callback], ... verbose=0) >>> len(history.history['loss']) # Only 4 epochs are run. 4 """ def __init__(self, monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None, restore_best_weights=False): super(EarlyStopping, self).__init__() self.monitor = monitor self.patience = patience self.verbose = verbose self.baseline = baseline self.min_delta = abs(min_delta) self.wait = 0 self.stopped_epoch = 0 self.restore_best_weights = restore_best_weights self.best_weights = None if mode not in ['auto', 'min', 'max']: logging.warning('EarlyStopping mode %s is unknown, ' 'fallback to auto mode.', mode) mode = 'auto' if mode == 'min': self.monitor_op = np.less elif mode == 'max': self.monitor_op = np.greater else: if 'acc' in self.monitor: self.monitor_op = np.greater else: self.monitor_op = np.less if self.monitor_op == np.greater: self.min_delta *= 1 else: self.min_delta *= -1 def on_train_begin(self, logs=None): # Allow instances to be re-used self.wait = 0 self.stopped_epoch = 0 self.best = np.Inf if self.monitor_op == np.less else -np.Inf self.best_weights = None def on_epoch_end(self, epoch, logs=None): current = self.get_monitor_value(logs) if current is None: return if self.restore_best_weights and self.best_weights is None: # Restore the weights after first epoch if no progress is ever made. self.best_weights = self.model.get_weights() self.wait += 1 if self._is_improvement(current, self.best): self.best = current if self.restore_best_weights: self.best_weights = self.model.get_weights() # Only restart wait if we beat both the baseline and our previous best. if self.baseline is None or self._is_improvement(current, self.baseline): self.wait = 0 # Only check after the first epoch. if self.wait >= self.patience and epoch > 0: self.stopped_epoch = epoch self.model.stop_training = True if self.restore_best_weights and self.best_weights is not None: if self.verbose > 0: print('Restoring model weights from the end of the best epoch.') self.model.set_weights(self.best_weights) def on_train_end(self, logs=None): if self.stopped_epoch > 0 and self.verbose > 0: print('Epoch %05d: early stopping' % (self.stopped_epoch + 1)) def get_monitor_value(self, logs): logs = logs or {} monitor_value = logs.get(self.monitor) if monitor_value is None: logging.warning('Early stopping conditioned on metric `%s` ' 'which is not available. Available metrics are: %s', self.monitor, ','.join(list(logs.keys()))) return monitor_value def _is_improvement(self, monitor_value, reference_value): return self.monitor_op(monitor_value - self.min_delta, reference_value) @keras_export('keras.callbacks.RemoteMonitor') class RemoteMonitor(Callback): """Callback used to stream events to a server. Requires the `requests` library. Events are sent to `root + '/publish/epoch/end/'` by default. Calls are HTTP POST, with a `data` argument which is a JSON-encoded dictionary of event data. If `send_as_json=True`, the content type of the request will be `"application/json"`. Otherwise the serialized JSON will be sent within a form. Args: root: String; root url of the target server. path: String; path relative to `root` to which the events will be sent. field: String; JSON field under which the data will be stored. The field is used only if the payload is sent within a form (i.e. send_as_json is set to False). headers: Dictionary; optional custom HTTP headers. send_as_json: Boolean; whether the request should be sent as `"application/json"`. """ def __init__(self, root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers=None, send_as_json=False): super(RemoteMonitor, self).__init__() self.root = root self.path = path self.field = field self.headers = headers self.send_as_json = send_as_json def on_epoch_end(self, epoch, logs=None): if requests is None: raise ImportError('RemoteMonitor requires the `requests` library.') logs = logs or {} send = {} send['epoch'] = epoch for k, v in logs.items(): # np.ndarray and np.generic are not scalar types # therefore we must unwrap their scalar values and # pass to the json-serializable dict 'send' if isinstance(v, (np.ndarray, np.generic)): send[k] = v.item() else: send[k] = v try: if self.send_as_json: requests.post(self.root + self.path, json=send, headers=self.headers) else: requests.post( self.root + self.path, {self.field: json.dumps(send)}, headers=self.headers) except requests.exceptions.RequestException: logging.warning('Warning: could not reach RemoteMonitor ' 'root server at ' + str(self.root)) @keras_export('keras.callbacks.LearningRateScheduler') class LearningRateScheduler(Callback): """Learning rate scheduler. At the beginning of every epoch, this callback gets the updated learning rate value from `schedule` function provided at `__init__`, with the current epoch and current learning rate, and applies the updated learning rate on the optimizer. Args: schedule: a function that takes an epoch index (integer, indexed from 0) and current learning rate (float) as inputs and returns a new learning rate as output (float). verbose: int. 0: quiet, 1: update messages. Example: >>> # This function keeps the initial learning rate for the first ten epochs >>> # and decreases it exponentially after that. >>> def scheduler(epoch, lr): ... if epoch < 10: ... return lr ... else: ... return lr * tf.math.exp(-0.1) >>> >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> round(model.optimizer.lr.numpy(), 5) 0.01 >>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler) >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=15, callbacks=[callback], verbose=0) >>> round(model.optimizer.lr.numpy(), 5) 0.00607 """ def __init__(self, schedule, verbose=0): super(LearningRateScheduler, self).__init__() self.schedule = schedule self.verbose = verbose def on_epoch_begin(self, epoch, logs=None): if not hasattr(self.model.optimizer, 'lr'): raise ValueError('Optimizer must have a "lr" attribute.') try: # new API lr = float(backend.get_value(self.model.optimizer.lr)) lr = self.schedule(epoch, lr) except TypeError: # Support for old API for backward compatibility lr = self.schedule(epoch) if not isinstance(lr, (tf.Tensor, float, np.float32, np.float64)): raise ValueError('The output of the "schedule" function ' f'should be float. Got: {lr}') if isinstance(lr, tf.Tensor) and not lr.dtype.is_floating: raise ValueError( f'The dtype of `lr` Tensor should be float. Got: {lr.dtype}') backend.set_value(self.model.optimizer.lr, backend.get_value(lr)) if self.verbose > 0: print('\nEpoch %05d: LearningRateScheduler setting learning ' 'rate to %s.' % (epoch + 1, lr)) def on_epoch_end(self, epoch, logs=None): logs = logs or {} logs['lr'] = backend.get_value(self.model.optimizer.lr) def keras_model_summary(name, data, step=None): """Writes a Keras model as JSON to as a Summary. Writing the Keras model configuration allows the TensorBoard graph plugin to render a conceptual graph, as opposed to graph of ops. In case the model fails to serialize as JSON, it ignores and returns False. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A Keras Model to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or False if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ summary_metadata = tf.compat.v1.SummaryMetadata() # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for # the rationale. summary_metadata.plugin_data.plugin_name = 'graph_keras_model' # version number = 1 summary_metadata.plugin_data.content = b'1' try: json_string = data.to_json() except Exception as exc: # pylint: disable=broad-except # An exception should not break a model code. logging.warning('Model failed to serialize as JSON. Ignoring... %s', exc) return False with tf.summary.experimental.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _): with tf.device('cpu:0'): tensor = tf.constant(json_string, dtype=tf.string) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata) @keras_export('keras.callbacks.TensorBoard', v1=[]) class TensorBoard(Callback, version_utils.TensorBoardVersionSelector): # pylint: disable=line-too-long """Enable visualizations for TensorBoard. TensorBoard is a visualization tool provided with TensorFlow. This callback logs events for TensorBoard, including: * Metrics summary plots * Training graph visualization * Activation histograms * Sampled profiling When used in `Model.evaluate`, in addition to epoch summaries, there will be a summary that records evaluation metrics vs `Model.optimizer.iterations` written. The metric names will be prepended with `evaluation`, with `Model.optimizer.iterations` being the step in the visualized TensorBoard. If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: ``` tensorboard --logdir=path_to_your_logs ``` You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). Args: log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs') This directory should not be reused by any other callbacks. histogram_freq: frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations. write_graph: whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True. write_images: whether to write model weights to visualize as image in TensorBoard. write_steps_per_second: whether to log the training steps per second into Tensorboard. This supports both epoch and batch frequency logging. update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, writes the losses and metrics to TensorBoard after each batch. The same applies for `'epoch'`. If using an integer, let's say `1000`, the callback will write the metrics and losses to TensorBoard every 1000 batches. Note that writing too frequently to TensorBoard can slow down your training. profile_batch: Profile the batch(es) to sample compute characteristics. profile_batch must be a non-negative integer or a tuple of integers. A pair of positive integers signify a range of batches to profile. By default, it will profile the second batch. Set profile_batch=0 to disable profiling. embeddings_freq: frequency (in epochs) at which embedding layers will be visualized. If set to 0, embeddings won't be visualized. embeddings_metadata: Dictionary which maps embedding layer names to the filename of a file in which to save metadata for the embedding layer. In case the same metadata file is to be used for all embedding layers, a single filename can be passed. Examples: Basic usage: ```python tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs") model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Then run the tensorboard command to view the visualizations. ``` Custom batch-level summaries in a subclassed Model: ```python class MyModel(tf.keras.Model): def build(self, _): self.dense = tf.keras.layers.Dense(10) def call(self, x): outputs = self.dense(x) tf.summary.histogram('outputs', outputs) return outputs model = MyModel() model.compile('sgd', 'mse') # Make sure to set `update_freq=N` to log a batch-level summary every N batches. # In addition to any `tf.summary` contained in `Model.call`, metrics added in # `Model.compile` will be logged every N batches. tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1) model.fit(x_train, y_train, callbacks=[tb_callback]) ``` Custom batch-level summaries in a Functional API Model: ```python def my_summary(x): tf.summary.histogram('x', x) return x inputs = tf.keras.Input(10) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Lambda(my_summary)(x) model = tf.keras.Model(inputs, outputs) model.compile('sgd', 'mse') # Make sure to set `update_freq=N` to log a batch-level summary every N batches. # In addition to any `tf.summary` contained in `Model.call`, metrics added in # `Model.compile` will be logged every N batches. tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1) model.fit(x_train, y_train, callbacks=[tb_callback]) ``` Profiling: ```python # Profile a single batch, e.g. the 5th batch. tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir='./logs', profile_batch=5) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Profile a range of batches, e.g. from 10 to 20. tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir='./logs', profile_batch=(10,20)) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) ``` """ # pylint: enable=line-too-long def __init__(self, log_dir='logs', histogram_freq=0, write_graph=True, write_images=False, write_steps_per_second=False, update_freq='epoch', profile_batch=2, embeddings_freq=0, embeddings_metadata=None, **kwargs): super(TensorBoard, self).__init__() self._supports_tf_logs = True self._validate_kwargs(kwargs) self.log_dir = path_to_string(log_dir) self.histogram_freq = histogram_freq self.write_graph = write_graph self.write_images = write_images self.write_steps_per_second = write_steps_per_second self.update_freq = 1 if update_freq == 'batch' else update_freq self.embeddings_freq = embeddings_freq self.embeddings_metadata = embeddings_metadata self._init_profile_batch(profile_batch) self._global_train_batch = 0 self._previous_epoch_iterations = 0 self._train_accumulated_time = 0 self._batch_start_time = 0 # Lazily initialized in order to avoid creating event files when # not needed. self._writers = {} # Used to restore any existing `SummaryWriter` after training ends. self._prev_summary_state = [] def _validate_kwargs(self, kwargs): """Handle arguments were supported in V1.""" if kwargs.get('write_grads', False): logging.warning('`write_grads` will be ignored in TensorFlow 2.0 ' 'for the `TensorBoard` Callback.') if kwargs.get('batch_size', False): logging.warning('`batch_size` is no longer needed in the ' '`TensorBoard` Callback and will be ignored ' 'in TensorFlow 2.0.') if kwargs.get('embeddings_layer_names', False): logging.warning('`embeddings_layer_names` is not supported in ' 'TensorFlow 2.0. Instead, all `Embedding` layers ' 'will be visualized.') if kwargs.get('embeddings_data', False): logging.warning('`embeddings_data` is not supported in TensorFlow ' '2.0. Instead, all `Embedding` variables will be ' 'visualized.') supported_kwargs = {'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'} unrecognized_kwargs = set(kwargs.keys()) - supported_kwargs # Only allow kwargs that were supported in V1. if unrecognized_kwargs: raise ValueError( 'Unrecognized arguments in `TensorBoard` Callback: ' f'{unrecognized_kwargs}. Supported kwargs are: {supported_kwargs}') def set_model(self, model): """Sets Keras model and writes graph if specified.""" self.model = model self._log_write_dir = self._get_log_write_dir() self._train_dir = os.path.join(self._log_write_dir, 'train') self._train_step = self.model._train_counter # pylint: disable=protected-access self._val_dir = os.path.join(self._log_write_dir, 'validation') self._val_step = self.model._test_counter # pylint: disable=protected-access self._writers = {} # Resets writers. self._should_write_train_graph = False if self.write_graph: self._write_keras_model_summary() self._should_write_train_graph = True if self.embeddings_freq: self._configure_embeddings() @property def _train_writer(self): if 'train' not in self._writers: self._writers['train'] = tf.summary.create_file_writer( self._train_dir) return self._writers['train'] @property def _val_writer(self): if 'val' not in self._writers: self._writers['val'] = tf.summary.create_file_writer(self._val_dir) return self._writers['val'] def _get_log_write_dir(self): """For multi-worker, only chief should write, others write to '/tmp'.""" return distributed_file_utils.write_dirpath(self.log_dir, self.model.distribute_strategy) def _delete_tmp_write_dir(self): """Deletes tmp write directories for multi-worker.""" distributed_file_utils.remove_temp_dirpath(self.log_dir, self.model.distribute_strategy) def _write_keras_model_train_graph(self): """Writes Keras model train_function graph to TensorBoard.""" with self._train_writer.as_default(): with tf.summary.record_if(True): train_fn = self.model.train_tf_function # If the train_function is a `tf.function`, we can write out a graph if hasattr(train_fn, 'function_spec'): tf.summary.graph(train_fn._concrete_stateful_fn.graph) # pylint: disable=protected-access def _write_keras_model_summary(self): """Writes Keras graph network summary to TensorBoard.""" with self._train_writer.as_default(): with tf.summary.record_if(True): summary_writable = ( self.model._is_graph_network or # pylint: disable=protected-access self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access if summary_writable: keras_model_summary('keras', self.model, step=0) def _configure_embeddings(self): """Configure the Projector for embeddings.""" # TODO(omalleyt): Add integration tests. from google.protobuf import text_format from keras.layers import embeddings from keras.protobuf import projector_config_pb2 config = projector_config_pb2.ProjectorConfig() for layer in self.model.layers: if isinstance(layer, embeddings.Embedding): embedding = config.embeddings.add() # Embeddings are always the first layer, so this naming should be # consistent in any keras models checkpoints. name = 'layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE' embedding.tensor_name = name if self.embeddings_metadata is not None: if isinstance(self.embeddings_metadata, str): embedding.metadata_path = self.embeddings_metadata else: if layer.name in self.embeddings_metadata.keys(): embedding.metadata_path = self.embeddings_metadata.pop(layer.name) if self.embeddings_metadata and not isinstance(self.embeddings_metadata, str): raise ValueError('Unrecognized `Embedding` layer names passed to ' '`keras.callbacks.TensorBoard` `embeddings_metadata` ' f'argument: {self.embeddings_metadata.keys()}') config_pbtxt = text_format.MessageToString(config) path = os.path.join(self._log_write_dir, 'projector_config.pbtxt') with tf.io.gfile.GFile(path, 'w') as f: f.write(config_pbtxt) def _push_writer(self, writer, step): """Sets the default writer for custom batch-level summaries.""" if self.update_freq == 'epoch': return should_record = lambda: tf.equal(step % self.update_freq, 0) # TODO(b/151339474): Fix deadlock when not using .value() here. summary_context = (writer.as_default(step.value()), tf.summary.record_if(should_record)) self._prev_summary_state.append(summary_context) summary_context[0].__enter__() summary_context[1].__enter__() def _pop_writer(self): """Pops the current writer.""" if self.update_freq == 'epoch': return # See _push_writer for the content of the previous_context, which is pair # of context. previous_context = self._prev_summary_state.pop() previous_context[1].__exit__(*sys.exc_info()) previous_context[0].__exit__(*sys.exc_info()) def _close_writers(self): for writer in self._writers.values(): writer.close() def _init_profile_batch(self, profile_batch): """Validate profile_batch value and set the range of batches to profile. Sets values of _start_batch and _stop_batch attributes, specifying the start and stop batch to profile. Setting `profile_batch=0` disables profiling. Args: profile_batch: The range of batches to profile. Should be a non-negative integer or a comma separated string of pair of positive integers. A pair of positive integers signify a range of batches to profile. Raises: ValueError: If profile_batch is not an integer or a comma seperated pair of positive integers. """ profile_batch_error_message = ( 'profile_batch must be a non-negative integer or 2-tuple of positive ' 'integers. A pair of positive integers signifies a range of batches ' f'to profile. Found: {profile_batch}') # Support legacy way of specifying "start,stop" or "start" as str. if isinstance(profile_batch, str): profile_batch = str(profile_batch).split(',') profile_batch = tf.nest.map_structure(int, profile_batch) if isinstance(profile_batch, int): self._start_batch = profile_batch self._stop_batch = profile_batch elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2: self._start_batch, self._stop_batch = profile_batch else: raise ValueError(profile_batch_error_message) if self._start_batch < 0 or self._stop_batch < self._start_batch: raise ValueError(profile_batch_error_message) # True when the profiler was successfully started by this callback. # We track the status here to make sure callbacks do not interfere with # each other. The callback will only stop the profiler it started. self._profiler_started = False if self._start_batch > 0: # Warm up and improve the profiling accuracy. self._start_profiler(logdir='') self._stop_profiler(save=False) # True when a trace is running. self._is_tracing = False # Setting `profile_batch=0` disables profiling. self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0) def on_train_begin(self, logs=None): self._global_train_batch = 0 self._previous_epoch_iterations = 0 self._train_accumulated_time = 0 self._push_writer(self._train_writer, self._train_step) def on_train_end(self, logs=None): self._pop_writer() if self._is_tracing: self._stop_trace() self._close_writers() self._delete_tmp_write_dir() def on_test_begin(self, logs=None): self._push_writer(self._val_writer, self._val_step) def on_test_end(self, logs=None): if self.model.optimizer and hasattr(self.model.optimizer, 'iterations'): with tf.summary.record_if(True), self._val_writer.as_default(): for name, value in logs.items(): tf.summary.scalar( 'evaluation_' + name + '_vs_iterations', value, step=self.model.optimizer.iterations.read_value()) self._pop_writer() def _implements_train_batch_hooks(self): # Only call batch hooks when tracing or write_steps_per_second are enabled return self._should_trace or self.write_steps_per_second def on_train_batch_begin(self, batch, logs=None): self._global_train_batch += 1 if self.write_steps_per_second: self._batch_start_time = time.time() if not self._should_trace: return if self._global_train_batch == self._start_batch: self._start_trace() def on_train_batch_end(self, batch, logs=None): if self._should_write_train_graph: self._write_keras_model_train_graph() self._should_write_train_graph = False if self.write_steps_per_second: batch_run_time = time.time() - self._batch_start_time self._train_accumulated_time += batch_run_time tf.summary.scalar( 'batch_steps_per_second', 1. / batch_run_time, step=self._train_step) if not self._should_trace: return if self._is_tracing and self._global_train_batch >= self._stop_batch: self._stop_trace() def on_epoch_begin(self, epoch, logs=None): # Keeps track of epoch for profiling. if self.write_steps_per_second: self._previous_epoch_iterations = self.model.optimizer.iterations.numpy() self._train_accumulated_time = 0 def on_epoch_end(self, epoch, logs=None): """Runs metrics and histogram summaries at epoch end.""" self._log_epoch_metrics(epoch, logs) if self.histogram_freq and epoch % self.histogram_freq == 0: self._log_weights(epoch) if self.embeddings_freq and epoch % self.embeddings_freq == 0: self._log_embeddings(epoch) def _start_trace(self): tf.summary.trace_on(graph=True, profiler=False) self._start_profiler(logdir=self.log_dir) self._is_tracing = True def _stop_trace(self, batch=None): """Logs the trace graph to TensorBoard.""" if batch is None: batch = self._stop_batch with self._train_writer.as_default(): with tf.summary.record_if(True): # TODO(b/126388999): Remove step info in the summary name. tf.summary.trace_export(name='batch_%d' % batch, step=batch) self._stop_profiler() self._is_tracing = False def _collect_learning_rate(self, logs): lr_schedule = getattr(self.model.optimizer, 'lr', None) if isinstance(lr_schedule, learning_rate_schedule.LearningRateSchedule): logs['learning_rate'] = lr_schedule(self.model.optimizer.iterations) return logs def _compute_steps_per_second(self): current_iteration = self.model.optimizer.iterations.numpy() steps_per_second = ((current_iteration - self._previous_epoch_iterations) / (self._train_accumulated_time)) return steps_per_second def _log_epoch_metrics(self, epoch, logs): """Writes epoch metrics out as scalar summaries. Args: epoch: Int. The global step to use for TensorBoard. logs: Dict. Keys are scalar summary names, values are scalars. """ if not logs: return train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')} val_logs = {k: v for k, v in logs.items() if k.startswith('val_')} train_logs = self._collect_learning_rate(train_logs) if self.write_steps_per_second: train_logs['steps_per_second'] = self._compute_steps_per_second() with tf.summary.record_if(True): if train_logs: with self._train_writer.as_default(): for name, value in train_logs.items(): tf.summary.scalar('epoch_' + name, value, step=epoch) if val_logs: with self._val_writer.as_default(): for name, value in val_logs.items(): name = name[4:] # Remove 'val_' prefix. tf.summary.scalar('epoch_' + name, value, step=epoch) def _log_weights(self, epoch): """Logs the weights of the Model to TensorBoard.""" with self._train_writer.as_default(): with tf.summary.record_if(True): for layer in self.model.layers: for weight in layer.weights: weight_name = weight.name.replace(':', '_') tf.summary.histogram(weight_name, weight, step=epoch) if self.write_images: self._log_weight_as_image(weight, weight_name, epoch) self._train_writer.flush() def _log_weight_as_image(self, weight, weight_name, epoch): """Logs a weight as a TensorBoard image.""" w_img = tf.squeeze(weight) shape = backend.int_shape(w_img) if len(shape) == 1: # Bias case w_img = tf.reshape(w_img, [1, shape[0], 1, 1]) elif len(shape) == 2: # Dense layer kernel case if shape[0] > shape[1]: w_img = tf.transpose(w_img) shape = backend.int_shape(w_img) w_img = tf.reshape(w_img, [1, shape[0], shape[1], 1]) elif len(shape) == 3: # ConvNet case if backend.image_data_format() == 'channels_last': # Switch to channels_first to display every kernel as a separate # image. w_img = tf.transpose(w_img, perm=[2, 0, 1]) shape = backend.int_shape(w_img) w_img = tf.reshape(w_img, [shape[0], shape[1], shape[2], 1]) shape = backend.int_shape(w_img) # Not possible to handle 3D convnets etc. if len(shape) == 4 and shape[-1] in [1, 3, 4]: tf.summary.image(weight_name, w_img, step=epoch) def _log_embeddings(self, epoch): embeddings_ckpt = os.path.join(self._log_write_dir, 'train', 'keras_embedding.ckpt-{}'.format(epoch)) self.model.save_weights(embeddings_ckpt) def _start_profiler(self, logdir): """Starts the profiler if currently inactive. Args: logdir: Directory where profiler results will be saved. """ if self._profiler_started: return try: tf.profiler.experimental.start(logdir=logdir) self._profiler_started = True except tf.errors.AlreadyExistsError as e: # Profiler errors should not be fatal. logging.error('Failed to start profiler: %s', e.message) def _stop_profiler(self, save=True): """Stops the profiler if currently active. Args: save: Whether to save the profiler results to TensorBoard. """ if not self._profiler_started: return try: tf.profiler.experimental.stop(save=save) except tf.errors.UnavailableError as e: # Profiler errors should not be fatal. logging.error('Failed to stop profiler: %s', e.message) finally: self._profiler_started = False @keras_export('keras.callbacks.ReduceLROnPlateau') class ReduceLROnPlateau(Callback): """Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. This callback monitors a quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced. Example: ```python reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001) model.fit(X_train, Y_train, callbacks=[reduce_lr]) ``` Args: monitor: quantity to be monitored. factor: factor by which the learning rate will be reduced. `new_lr = lr * factor`. patience: number of epochs with no improvement after which learning rate will be reduced. verbose: int. 0: quiet, 1: update messages. mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode, the learning rate will be reduced when the quantity monitored has stopped decreasing; in `'max'` mode it will be reduced when the quantity monitored has stopped increasing; in `'auto'` mode, the direction is automatically inferred from the name of the monitored quantity. min_delta: threshold for measuring the new optimum, to only focus on significant changes. cooldown: number of epochs to wait before resuming normal operation after lr has been reduced. min_lr: lower bound on the learning rate. """ def __init__(self, monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0, **kwargs): super(ReduceLROnPlateau, self).__init__() self.monitor = monitor if factor >= 1.0: raise ValueError( f'ReduceLROnPlateau does not support a factor >= 1.0. Got {factor}') if 'epsilon' in kwargs: min_delta = kwargs.pop('epsilon') logging.warning('`epsilon` argument is deprecated and ' 'will be removed, use `min_delta` instead.') self.factor = factor self.min_lr = min_lr self.min_delta = min_delta self.patience = patience self.verbose = verbose self.cooldown = cooldown self.cooldown_counter = 0 # Cooldown counter. self.wait = 0 self.best = 0 self.mode = mode self.monitor_op = None self._reset() def _reset(self): """Resets wait counter and cooldown counter. """ if self.mode not in ['auto', 'min', 'max']: logging.warning('Learning rate reduction mode %s is unknown, ' 'fallback to auto mode.', self.mode) self.mode = 'auto' if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.min_delta) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0 def on_train_begin(self, logs=None): self._reset() def on_epoch_end(self, epoch, logs=None): logs = logs or {} logs['lr'] = backend.get_value(self.model.optimizer.lr) current = logs.get(self.monitor) if current is None: logging.warning('Learning rate reduction is conditioned on metric `%s` ' 'which is not available. Available metrics are: %s', self.monitor, ','.join(list(logs.keys()))) else: if self.in_cooldown(): self.cooldown_counter -= 1 self.wait = 0 if self.monitor_op(current, self.best): self.best = current self.wait = 0 elif not self.in_cooldown(): self.wait += 1 if self.wait >= self.patience: old_lr = backend.get_value(self.model.optimizer.lr) if old_lr > np.float32(self.min_lr): new_lr = old_lr * self.factor new_lr = max(new_lr, self.min_lr) backend.set_value(self.model.optimizer.lr, new_lr) if self.verbose > 0: print('\nEpoch %05d: ReduceLROnPlateau reducing learning ' 'rate to %s.' % (epoch + 1, new_lr)) self.cooldown_counter = self.cooldown self.wait = 0 def in_cooldown(self): return self.cooldown_counter > 0 @keras_export('keras.callbacks.CSVLogger') class CSVLogger(Callback): """Callback that streams epoch results to a CSV file. Supports all values that can be represented as a string, including 1D iterables such as `np.ndarray`. Example: ```python csv_logger = CSVLogger('training.log') model.fit(X_train, Y_train, callbacks=[csv_logger]) ``` Args: filename: Filename of the CSV file, e.g. `'run/log.csv'`. separator: String used to separate elements in the CSV file. append: Boolean. True: append if file exists (useful for continuing training). False: overwrite existing file. """ def __init__(self, filename, separator=',', append=False): self.sep = separator self.filename = path_to_string(filename) self.append = append self.writer = None self.keys = None self.append_header = True super(CSVLogger, self).__init__() def on_train_begin(self, logs=None): if self.append: if tf.io.gfile.exists(self.filename): with tf.io.gfile.GFile(self.filename, 'r') as f: self.append_header = not bool(len(f.readline())) mode = 'a' else: mode = 'w' self.csv_file = tf.io.gfile.GFile(self.filename, mode) def on_epoch_end(self, epoch, logs=None): logs = logs or {} def handle_value(k): is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0 if isinstance(k, str): return k elif isinstance(k, collections.abc.Iterable) and not is_zero_dim_ndarray: return '"[%s]"' % (', '.join(map(str, k))) else: return k if self.keys is None: self.keys = sorted(logs.keys()) if self.model.stop_training: # We set NA so that csv parsers do not fail for this last epoch. logs = dict((k, logs[k]) if k in logs else (k, 'NA') for k in self.keys) if not self.writer: class CustomDialect(csv.excel): delimiter = self.sep fieldnames = ['epoch'] + self.keys self.writer = csv.DictWriter( self.csv_file, fieldnames=fieldnames, dialect=CustomDialect) if self.append_header: self.writer.writeheader() row_dict = collections.OrderedDict({'epoch': epoch}) row_dict.update((key, handle_value(logs[key])) for key in self.keys) self.writer.writerow(row_dict) self.csv_file.flush() def on_train_end(self, logs=None): self.csv_file.close() self.writer = None @keras_export('keras.callbacks.LambdaCallback') class LambdaCallback(Callback): r"""Callback for creating simple, custom callbacks on-the-fly. This callback is constructed with anonymous functions that will be called at the appropriate time (during `Model.{fit | evaluate | predict}`). Note that the callbacks expects positional arguments, as: - `on_epoch_begin` and `on_epoch_end` expect two positional arguments: `epoch`, `logs` - `on_batch_begin` and `on_batch_end` expect two positional arguments: `batch`, `logs` - `on_train_begin` and `on_train_end` expect one positional argument: `logs` Args: on_epoch_begin: called at the beginning of every epoch. on_epoch_end: called at the end of every epoch. on_batch_begin: called at the beginning of every batch. on_batch_end: called at the end of every batch. on_train_begin: called at the beginning of model training. on_train_end: called at the end of model training. Example: ```python # Print the batch number at the beginning of every batch. batch_print_callback = LambdaCallback( on_batch_begin=lambda batch,logs: print(batch)) # Stream the epoch loss to a file in JSON format. The file content # is not well-formed JSON but rather has a JSON object per line. import json json_log = open('loss_log.json', mode='wt', buffering=1) json_logging_callback = LambdaCallback( on_epoch_end=lambda epoch, logs: json_log.write( json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'), on_train_end=lambda logs: json_log.close() ) # Terminate some processes after having finished model training. processes = ... cleanup_callback = LambdaCallback( on_train_end=lambda logs: [ p.terminate() for p in processes if p.is_alive()]) model.fit(..., callbacks=[batch_print_callback, json_logging_callback, cleanup_callback]) ``` """ def __init__(self, on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None, **kwargs): super(LambdaCallback, self).__init__() self.__dict__.update(kwargs) if on_epoch_begin is not None: self.on_epoch_begin = on_epoch_begin else: self.on_epoch_begin = lambda epoch, logs: None if on_epoch_end is not None: self.on_epoch_end = on_epoch_end else: self.on_epoch_end = lambda epoch, logs: None if on_batch_begin is not None: self.on_batch_begin = on_batch_begin else: self.on_batch_begin = lambda batch, logs: None if on_batch_end is not None: self.on_batch_end = on_batch_end else: self.on_batch_end = lambda batch, logs: None if on_train_begin is not None: self.on_train_begin = on_train_begin else: self.on_train_begin = lambda logs: None if on_train_end is not None: self.on_train_end = on_train_end else: self.on_train_end = lambda logs: None
109,326
36.881843
100
py
keras
keras-master/keras/combinations.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This module customizes `test_combinations` for `tf.keras` related tests.""" import tensorflow.compat.v2 as tf import functools from keras import testing_utils KERAS_MODEL_TYPES = ['functional', 'subclass', 'sequential'] def keras_mode_combinations(mode=None, run_eagerly=None): """Returns the default test combinations for tf.keras tests. Note that if tf2 is enabled, then v1 session test will be skipped. Args: mode: List of modes to run the tests. The valid options are 'graph' and 'eager'. Default to ['graph', 'eager'] if not specified. If a empty list is provide, then the test will run under the context based on tf's version, eg graph for v1 and eager for v2. run_eagerly: List of `run_eagerly` value to be run with the tests. Default to [True, False] if not specified. Note that for `graph` mode, run_eagerly value will only be False. Returns: A list contains all the combinations to be used to generate test cases. """ if mode is None: mode = ['eager'] if tf.__internal__.tf2.enabled() else ['graph', 'eager'] if run_eagerly is None: run_eagerly = [True, False] result = [] if 'eager' in mode: result += tf.__internal__.test.combinations.combine(mode=['eager'], run_eagerly=run_eagerly) if 'graph' in mode: result += tf.__internal__.test.combinations.combine(mode=['graph'], run_eagerly=[False]) return result def keras_model_type_combinations(): return tf.__internal__.test.combinations.combine(model_type=KERAS_MODEL_TYPES) class KerasModeCombination(tf.__internal__.test.combinations.TestCombination): """Combination for Keras test mode. It by default includes v1_session, v2_eager and v2_tf_function. """ def context_managers(self, kwargs): run_eagerly = kwargs.pop('run_eagerly', None) if run_eagerly is not None: return [testing_utils.run_eagerly_scope(run_eagerly)] else: return [] def parameter_modifiers(self): return [tf.__internal__.test.combinations.OptionalParameter('run_eagerly')] class KerasModelTypeCombination(tf.__internal__.test.combinations.TestCombination): """Combination for Keras model types when doing model test. It by default includes 'functional', 'subclass', 'sequential'. Various methods in `testing_utils` to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models. """ def context_managers(self, kwargs): model_type = kwargs.pop('model_type', None) if model_type in KERAS_MODEL_TYPES: return [testing_utils.model_type_scope(model_type)] else: return [] def parameter_modifiers(self): return [tf.__internal__.test.combinations.OptionalParameter('model_type')] _defaults = tf.__internal__.test.combinations.generate.keywords['test_combinations'] generate = functools.partial( tf.__internal__.test.combinations.generate, test_combinations=_defaults + (KerasModeCombination(), KerasModelTypeCombination())) combine = tf.__internal__.test.combinations.combine times = tf.__internal__.test.combinations.times NamedObject = tf.__internal__.test.combinations.NamedObject
3,901
36.161905
96
py
keras
keras-master/keras/combinations_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras combinations.""" import tensorflow.compat.v2 as tf import unittest from absl.testing import parameterized from keras import combinations from keras import models as keras_models from keras import testing_utils class CombinationsTest(tf.test.TestCase): def test_run_all_keras_modes(self): test_params = [] class ExampleTest(parameterized.TestCase): def runTest(self): pass @combinations.generate(combinations.keras_mode_combinations()) def testBody(self): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() test_params.append((mode, should_run_eagerly)) e = ExampleTest() if not tf.__internal__.tf2.enabled(): e.testBody_test_mode_graph_runeagerly_False() e.testBody_test_mode_eager_runeagerly_True() e.testBody_test_mode_eager_runeagerly_False() if not tf.__internal__.tf2.enabled(): self.assertLen(test_params, 3) self.assertAllEqual(test_params, [ ("graph", False), ("eager", True), ("eager", False), ]) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(test_params, 6) else: self.assertLen(test_params, 2) self.assertAllEqual(test_params, [ ("eager", True), ("eager", False), ]) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(test_params, 4) def test_generate_keras_mode_eager_only(self): result = combinations.keras_mode_combinations(mode=["eager"]) self.assertLen(result, 2) self.assertEqual(result[0], {"mode": "eager", "run_eagerly": True}) self.assertEqual(result[1], {"mode": "eager", "run_eagerly": False}) def test_generate_keras_mode_skip_run_eagerly(self): result = combinations.keras_mode_combinations(run_eagerly=[False]) if tf.__internal__.tf2.enabled(): self.assertLen(result, 1) self.assertEqual(result[0], {"mode": "eager", "run_eagerly": False}) else: self.assertLen(result, 2) self.assertEqual(result[0], {"mode": "eager", "run_eagerly": False}) self.assertEqual(result[1], {"mode": "graph", "run_eagerly": False}) def test_run_all_keras_model_types(self): model_types = [] models = [] class ExampleTest(parameterized.TestCase): def runTest(self): pass @combinations.generate(combinations.keras_model_type_combinations()) def testBody(self): model_types.append(testing_utils.get_model_type()) models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) e = ExampleTest() e.testBody_test_modeltype_functional() e.testBody_test_modeltype_subclass() e.testBody_test_modeltype_sequential() self.assertLen(model_types, 3) self.assertAllEqual(model_types, [ "functional", "subclass", "sequential" ]) # Validate that the models are what they should be self.assertTrue(models[0]._is_graph_network) self.assertFalse(models[1]._is_graph_network) self.assertNotIsInstance(models[0], keras_models.Sequential) self.assertNotIsInstance(models[1], keras_models.Sequential) self.assertIsInstance(models[2], keras_models.Sequential) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(model_types, 6) def test_combine_combinations(self): test_cases = [] @combinations.generate(combinations.times( combinations.keras_mode_combinations(), combinations.keras_model_type_combinations())) class ExampleTest(parameterized.TestCase): def runTest(self): pass @parameterized.named_parameters(dict(testcase_name="_arg", arg=True)) def testBody(self, arg): del arg mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() test_cases.append((mode, should_run_eagerly, testing_utils.get_model_type())) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) expected_combinations = [ ("eager", False, "functional"), ("eager", False, "sequential"), ("eager", False, "subclass"), ("eager", True, "functional"), ("eager", True, "sequential"), ("eager", True, "subclass"), ] if not tf.__internal__.tf2.enabled(): expected_combinations.extend([ ("graph", False, "functional"), ("graph", False, "sequential"), ("graph", False, "subclass"), ]) self.assertAllEqual(sorted(test_cases), expected_combinations) if __name__ == "__main__": tf.test.main()
5,548
31.261628
80
py
keras
keras-master/keras/testing_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for unit-testing Keras.""" import tensorflow.compat.v2 as tf import collections import contextlib import functools import itertools import threading import numpy as np from tensorflow.python.framework import test_util from keras import backend from keras import layers from keras import models from keras.engine import base_layer_utils from keras.optimizer_v2 import adadelta as adadelta_v2 from keras.optimizer_v2 import adagrad as adagrad_v2 from keras.optimizer_v2 import adam as adam_v2 from keras.optimizer_v2 import adamax as adamax_v2 from keras.optimizer_v2 import gradient_descent as gradient_descent_v2 from keras.optimizer_v2 import nadam as nadam_v2 from keras.optimizer_v2 import rmsprop as rmsprop_v2 from keras.utils import tf_contextlib from keras.utils import tf_inspect def string_test(actual, expected): np.testing.assert_array_equal(actual, expected) def numeric_test(actual, expected): np.testing.assert_allclose(actual, expected, rtol=1e-3, atol=1e-6) def get_test_data(train_samples, test_samples, input_shape, num_classes, random_seed=None): """Generates test data to train a model on. Args: train_samples: Integer, how many training samples to generate. test_samples: Integer, how many test samples to generate. input_shape: Tuple of integers, shape of the inputs. num_classes: Integer, number of classes for the data and targets. random_seed: Integer, random seed used by numpy to generate data. Returns: A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ if random_seed is not None: np.random.seed(random_seed) num_sample = train_samples + test_samples templates = 2 * num_classes * np.random.random((num_classes,) + input_shape) y = np.random.randint(0, num_classes, size=(num_sample,)) x = np.zeros((num_sample,) + input_shape, dtype=np.float32) for i in range(num_sample): x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape) return ((x[:train_samples], y[:train_samples]), (x[train_samples:], y[train_samples:])) @test_util.disable_cudnn_autotune def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None, expected_output_shape=None, validate_training=True, adapt_data=None, custom_objects=None, test_harness=None, supports_masking=None): """Test routine for a layer with a single input and single output. Args: layer_cls: Layer class object. kwargs: Optional dictionary of keyword arguments for instantiating the layer. input_shape: Input shape tuple. input_dtype: Data type of the input data. input_data: Numpy array of input data. expected_output: Numpy array of the expected output. expected_output_dtype: Data type expected for the output. expected_output_shape: Shape tuple for the expected shape of the output. validate_training: Whether to attempt to validate training on this layer. This might be set to False for non-differentiable layers that output string or integer values. adapt_data: Optional data for an 'adapt' call. If None, adapt() will not be tested for this layer. This is only relevant for PreprocessingLayers. custom_objects: Optional dictionary mapping name strings to custom objects in the layer class. This is helpful for testing custom layers. test_harness: The Tensorflow test, if any, that this function is being called in. supports_masking: Optional boolean to check the `supports_masking` property of the layer. If None, the check will not be performed. Returns: The output data (Numpy array) returned by the layer, for additional checks to be done by the calling code. Raises: ValueError: if `input_shape is None`. """ if input_data is None: if input_shape is None: raise ValueError('input_shape is None') if not input_dtype: input_dtype = 'float32' input_data_shape = list(input_shape) for i, e in enumerate(input_data_shape): if e is None: input_data_shape[i] = np.random.randint(1, 4) input_data = 10 * np.random.random(input_data_shape) if input_dtype[:5] == 'float': input_data -= 0.5 input_data = input_data.astype(input_dtype) elif input_shape is None: input_shape = input_data.shape if input_dtype is None: input_dtype = input_data.dtype if expected_output_dtype is None: expected_output_dtype = input_dtype if tf.as_dtype(expected_output_dtype) == tf.string: if test_harness: assert_equal = test_harness.assertAllEqual else: assert_equal = string_test else: if test_harness: assert_equal = test_harness.assertAllClose else: assert_equal = numeric_test # instantiation kwargs = kwargs or {} layer = layer_cls(**kwargs) if (supports_masking is not None and layer.supports_masking != supports_masking): raise AssertionError( 'When testing layer %s, the `supports_masking` property is %r' 'but expected to be %r.\nFull kwargs: %s' % (layer_cls.__name__, layer.supports_masking, supports_masking, kwargs)) # Test adapt, if data was passed. if adapt_data is not None: layer.adapt(adapt_data) # test get_weights , set_weights at layer level weights = layer.get_weights() layer.set_weights(weights) # test and instantiation from weights if 'weights' in tf_inspect.getargspec(layer_cls.__init__): kwargs['weights'] = weights layer = layer_cls(**kwargs) # test in functional API x = layers.Input(shape=input_shape[1:], dtype=input_dtype) y = layer(x) if backend.dtype(y) != expected_output_dtype: raise AssertionError('When testing layer %s, for input %s, found output ' 'dtype=%s but expected to find %s.\nFull kwargs: %s' % (layer_cls.__name__, x, backend.dtype(y), expected_output_dtype, kwargs)) def assert_shapes_equal(expected, actual): """Asserts that the output shape from the layer matches the actual shape.""" if len(expected) != len(actual): raise AssertionError( 'When testing layer %s, for input %s, found output_shape=' '%s but expected to find %s.\nFull kwargs: %s' % (layer_cls.__name__, x, actual, expected, kwargs)) for expected_dim, actual_dim in zip(expected, actual): if isinstance(expected_dim, tf.compat.v1.Dimension): expected_dim = expected_dim.value if isinstance(actual_dim, tf.compat.v1.Dimension): actual_dim = actual_dim.value if expected_dim is not None and expected_dim != actual_dim: raise AssertionError( 'When testing layer %s, for input %s, found output_shape=' '%s but expected to find %s.\nFull kwargs: %s' % (layer_cls.__name__, x, actual, expected, kwargs)) if expected_output_shape is not None: assert_shapes_equal(tf.TensorShape(expected_output_shape), y.shape) # check shape inference model = models.Model(x, y) computed_output_shape = tuple( layer.compute_output_shape( tf.TensorShape(input_shape)).as_list()) computed_output_signature = layer.compute_output_signature( tf.TensorSpec(shape=input_shape, dtype=input_dtype)) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape assert_shapes_equal(computed_output_shape, actual_output_shape) assert_shapes_equal(computed_output_signature.shape, actual_output_shape) if computed_output_signature.dtype != actual_output.dtype: raise AssertionError( 'When testing layer %s, for input %s, found output_dtype=' '%s but expected to find %s.\nFull kwargs: %s' % (layer_cls.__name__, x, actual_output.dtype, computed_output_signature.dtype, kwargs)) if expected_output is not None: assert_equal(actual_output, expected_output) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = models.Model.from_config(model_config, custom_objects) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) output = recovered_model.predict(input_data) assert_equal(output, actual_output) # test training mode (e.g. useful for dropout tests) # Rebuild the model to avoid the graph being reused between predict() and # See b/120160788 for more details. This should be mitigated after 2.0. layer_weights = layer.get_weights() # Get the layer weights BEFORE training. if validate_training: model = models.Model(x, layer(x)) if _thread_local_data.run_eagerly is not None: model.compile( 'rmsprop', 'mse', weighted_metrics=['acc'], run_eagerly=should_run_eagerly()) else: model.compile('rmsprop', 'mse', weighted_metrics=['acc']) model.train_on_batch(input_data, actual_output) # test as first layer in Sequential API layer_config = layer.get_config() layer_config['batch_input_shape'] = input_shape layer = layer.__class__.from_config(layer_config) # Test adapt, if data was passed. if adapt_data is not None: layer.adapt(adapt_data) model = models.Sequential() model.add(layers.Input(shape=input_shape[1:], dtype=input_dtype)) model.add(layer) layer.set_weights(layer_weights) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(computed_output_shape, actual_output_shape): if expected_dim is not None: if expected_dim != actual_dim: raise AssertionError( 'When testing layer %s **after deserialization**, ' 'for input %s, found output_shape=' '%s but expected to find inferred shape %s.\nFull kwargs: %s' % (layer_cls.__name__, x, actual_output_shape, computed_output_shape, kwargs)) if expected_output is not None: assert_equal(actual_output, expected_output) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = models.Sequential.from_config(model_config, custom_objects) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) output = recovered_model.predict(input_data) assert_equal(output, actual_output) # for further checks in the caller function return actual_output _thread_local_data = threading.local() _thread_local_data.model_type = None _thread_local_data.run_eagerly = None _thread_local_data.saved_model_format = None _thread_local_data.save_kwargs = None @tf_contextlib.contextmanager def model_type_scope(value): """Provides a scope within which the model type to test is equal to `value`. The model type gets restored to its original value upon exiting the scope. Args: value: model type value Yields: The provided value. """ previous_value = _thread_local_data.model_type try: _thread_local_data.model_type = value yield value finally: # Restore model type to initial value. _thread_local_data.model_type = previous_value @tf_contextlib.contextmanager def run_eagerly_scope(value): """Provides a scope within which we compile models to run eagerly or not. The boolean gets restored to its original value upon exiting the scope. Args: value: Bool specifying if we should run models eagerly in the active test. Should be True or False. Yields: The provided value. """ previous_value = _thread_local_data.run_eagerly try: _thread_local_data.run_eagerly = value yield value finally: # Restore model type to initial value. _thread_local_data.run_eagerly = previous_value def should_run_eagerly(): """Returns whether the models we are testing should be run eagerly.""" if _thread_local_data.run_eagerly is None: raise ValueError('Cannot call `should_run_eagerly()` outside of a ' '`run_eagerly_scope()` or `run_all_keras_modes` ' 'decorator.') return _thread_local_data.run_eagerly and tf.executing_eagerly() @tf_contextlib.contextmanager def saved_model_format_scope(value, **kwargs): """Provides a scope within which the savde model format to test is `value`. The saved model format gets restored to its original value upon exiting the scope. Args: value: saved model format value **kwargs: optional kwargs to pass to the save function. Yields: The provided value. """ previous_format = _thread_local_data.saved_model_format previous_kwargs = _thread_local_data.save_kwargs try: _thread_local_data.saved_model_format = value _thread_local_data.save_kwargs = kwargs yield finally: # Restore saved model format to initial value. _thread_local_data.saved_model_format = previous_format _thread_local_data.save_kwargs = previous_kwargs def get_save_format(): if _thread_local_data.saved_model_format is None: raise ValueError( 'Cannot call `get_save_format()` outside of a ' '`saved_model_format_scope()` or `run_with_all_saved_model_formats` ' 'decorator.') return _thread_local_data.saved_model_format def get_save_kwargs(): if _thread_local_data.save_kwargs is None: raise ValueError( 'Cannot call `get_save_kwargs()` outside of a ' '`saved_model_format_scope()` or `run_with_all_saved_model_formats` ' 'decorator.') return _thread_local_data.save_kwargs or {} def get_model_type(): """Gets the model type that should be tested.""" if _thread_local_data.model_type is None: raise ValueError('Cannot call `get_model_type()` outside of a ' '`model_type_scope()` or `run_with_all_model_types` ' 'decorator.') return _thread_local_data.model_type def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None): model = models.Sequential() if input_dim: model.add(layers.Dense(num_hidden, activation='relu', input_dim=input_dim)) else: model.add(layers.Dense(num_hidden, activation='relu')) activation = 'sigmoid' if num_classes == 1 else 'softmax' model.add(layers.Dense(num_classes, activation=activation)) return model def get_small_functional_mlp(num_hidden, num_classes, input_dim): inputs = layers.Input(shape=(input_dim,)) outputs = layers.Dense(num_hidden, activation='relu')(inputs) activation = 'sigmoid' if num_classes == 1 else 'softmax' outputs = layers.Dense(num_classes, activation=activation)(outputs) return models.Model(inputs, outputs) class SmallSubclassMLP(models.Model): """A subclass model based small MLP.""" def __init__(self, num_hidden, num_classes, use_bn=False, use_dp=False, **kwargs): super(SmallSubclassMLP, self).__init__(name='test_model', **kwargs) self.use_bn = use_bn self.use_dp = use_dp self.layer_a = layers.Dense(num_hidden, activation='relu') activation = 'sigmoid' if num_classes == 1 else 'softmax' self.layer_b = layers.Dense(num_classes, activation=activation) if self.use_dp: self.dp = layers.Dropout(0.5) if self.use_bn: self.bn = layers.BatchNormalization(axis=-1) def call(self, inputs, **kwargs): x = self.layer_a(inputs) if self.use_dp: x = self.dp(x) if self.use_bn: x = self.bn(x) return self.layer_b(x) class _SmallSubclassMLPCustomBuild(models.Model): """A subclass model small MLP that uses a custom build method.""" def __init__(self, num_hidden, num_classes): super(_SmallSubclassMLPCustomBuild, self).__init__() self.layer_a = None self.layer_b = None self.num_hidden = num_hidden self.num_classes = num_classes def build(self, input_shape): self.layer_a = layers.Dense(self.num_hidden, activation='relu') activation = 'sigmoid' if self.num_classes == 1 else 'softmax' self.layer_b = layers.Dense(self.num_classes, activation=activation) def call(self, inputs, **kwargs): x = self.layer_a(inputs) return self.layer_b(x) def get_small_subclass_mlp(num_hidden, num_classes): return SmallSubclassMLP(num_hidden, num_classes) def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes): return _SmallSubclassMLPCustomBuild(num_hidden, num_classes) def get_small_mlp(num_hidden, num_classes, input_dim): """Get a small mlp of the model type specified by `get_model_type`.""" model_type = get_model_type() if model_type == 'subclass': return get_small_subclass_mlp(num_hidden, num_classes) if model_type == 'subclass_custom_build': return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes) if model_type == 'sequential': return get_small_sequential_mlp(num_hidden, num_classes, input_dim) if model_type == 'functional': return get_small_functional_mlp(num_hidden, num_classes, input_dim) raise ValueError('Unknown model type {}'.format(model_type)) class _SubclassModel(models.Model): """A Keras subclass model.""" def __init__(self, model_layers, *args, **kwargs): """Instantiate a model. Args: model_layers: a list of layers to be added to the model. *args: Model's args **kwargs: Model's keyword args, at most one of input_tensor -> the input tensor required for ragged/sparse input. """ inputs = kwargs.pop('input_tensor', None) super(_SubclassModel, self).__init__(*args, **kwargs) # Note that clone and build doesn't support lists of layers in subclassed # models. Adding each layer directly here. for i, layer in enumerate(model_layers): setattr(self, self._layer_name_for_i(i), layer) self.num_layers = len(model_layers) if inputs is not None: self._set_inputs(inputs) def _layer_name_for_i(self, i): return 'layer{}'.format(i) def call(self, inputs, **kwargs): x = inputs for i in range(self.num_layers): layer = getattr(self, self._layer_name_for_i(i)) x = layer(x) return x class _SubclassModelCustomBuild(models.Model): """A Keras subclass model that uses a custom build method.""" def __init__(self, layer_generating_func, *args, **kwargs): super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs) self.all_layers = None self._layer_generating_func = layer_generating_func def build(self, input_shape): model_layers = [] for layer in self._layer_generating_func(): model_layers.append(layer) self.all_layers = model_layers def call(self, inputs, **kwargs): x = inputs for layer in self.all_layers: x = layer(x) return x def get_model_from_layers(model_layers, input_shape=None, input_dtype=None, name=None, input_ragged=None, input_sparse=None, model_type=None): """Builds a model from a sequence of layers. Args: model_layers: The layers used to build the network. input_shape: Shape tuple of the input or 'TensorShape' instance. input_dtype: Datatype of the input. name: Name for the model. input_ragged: Boolean, whether the input data is a ragged tensor. input_sparse: Boolean, whether the input data is a sparse tensor. model_type: One of "subclass", "subclass_custom_build", "sequential", or "functional". When None, defaults to `get_model_type`. Returns: A Keras model. """ if model_type is None: model_type = get_model_type() if model_type == 'subclass': inputs = None if input_ragged or input_sparse: inputs = layers.Input( shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse) return _SubclassModel(model_layers, name=name, input_tensor=inputs) if model_type == 'subclass_custom_build': layer_generating_func = lambda: model_layers return _SubclassModelCustomBuild(layer_generating_func, name=name) if model_type == 'sequential': model = models.Sequential(name=name) if input_shape: model.add( layers.InputLayer( input_shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse)) for layer in model_layers: model.add(layer) return model if model_type == 'functional': if not input_shape: raise ValueError('Cannot create a functional model from layers with no ' 'input shape.') inputs = layers.Input( shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse) outputs = inputs for layer in model_layers: outputs = layer(outputs) return models.Model(inputs, outputs, name=name) raise ValueError('Unknown model type {}'.format(model_type)) class Bias(layers.Layer): def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') def call(self, inputs): return inputs + self.bias class _MultiIOSubclassModel(models.Model): """Multi IO Keras subclass model.""" def __init__(self, branch_a, branch_b, shared_input_branch=None, shared_output_branch=None, name=None): super(_MultiIOSubclassModel, self).__init__(name=name) self._shared_input_branch = shared_input_branch self._branch_a = branch_a self._branch_b = branch_b self._shared_output_branch = shared_output_branch def call(self, inputs, **kwargs): if self._shared_input_branch: for layer in self._shared_input_branch: inputs = layer(inputs) a = inputs b = inputs elif isinstance(inputs, dict): a = inputs['input_1'] b = inputs['input_2'] else: a, b = inputs for layer in self._branch_a: a = layer(a) for layer in self._branch_b: b = layer(b) outs = [a, b] if self._shared_output_branch: for layer in self._shared_output_branch: outs = layer(outs) return outs class _MultiIOSubclassModelCustomBuild(models.Model): """Multi IO Keras subclass model that uses a custom build method.""" def __init__(self, branch_a_func, branch_b_func, shared_input_branch_func=None, shared_output_branch_func=None): super(_MultiIOSubclassModelCustomBuild, self).__init__() self._shared_input_branch_func = shared_input_branch_func self._branch_a_func = branch_a_func self._branch_b_func = branch_b_func self._shared_output_branch_func = shared_output_branch_func self._shared_input_branch = None self._branch_a = None self._branch_b = None self._shared_output_branch = None def build(self, input_shape): if self._shared_input_branch_func(): self._shared_input_branch = self._shared_input_branch_func() self._branch_a = self._branch_a_func() self._branch_b = self._branch_b_func() if self._shared_output_branch_func(): self._shared_output_branch = self._shared_output_branch_func() def call(self, inputs, **kwargs): if self._shared_input_branch: for layer in self._shared_input_branch: inputs = layer(inputs) a = inputs b = inputs else: a, b = inputs for layer in self._branch_a: a = layer(a) for layer in self._branch_b: b = layer(b) outs = a, b if self._shared_output_branch: for layer in self._shared_output_branch: outs = layer(outs) return outs def get_multi_io_model( branch_a, branch_b, shared_input_branch=None, shared_output_branch=None): """Builds a multi-io model that contains two branches. The produced model will be of the type specified by `get_model_type`. To build a two-input, two-output model: Specify a list of layers for branch a and branch b, but do not specify any shared input branch or shared output branch. The resulting model will apply each branch to a different input, to produce two outputs. The first value in branch_a must be the Keras 'Input' layer for branch a, and the first value in branch_b must be the Keras 'Input' layer for branch b. example usage: ``` branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] model = get_multi_io_model(branch_a, branch_b) ``` To build a two-input, one-output model: Specify a list of layers for branch a and branch b, and specify a shared output branch. The resulting model will apply each branch to a different input. It will then apply the shared output branch to a tuple containing the intermediate outputs of each branch, to produce a single output. The first layer in the shared_output_branch must be able to merge a tuple of two tensors. The first value in branch_a must be the Keras 'Input' layer for branch a, and the first value in branch_b must be the Keras 'Input' layer for branch b. example usage: ``` input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] shared_output_branch = [Concatenate(), Dense(), Dense()] model = get_multi_io_model(input_branch_a, input_branch_b, shared_output_branch=shared_output_branch) ``` To build a one-input, two-output model: Specify a list of layers for branch a and branch b, and specify a shared input branch. The resulting model will take one input, and apply the shared input branch to it. It will then respectively apply each branch to that intermediate result in parallel, to produce two outputs. The first value in the shared_input_branch must be the Keras 'Input' layer for the whole model. Branch a and branch b should not contain any Input layers. example usage: ``` shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()] output_branch_a = [Dense(), Dense()] output_branch_b = [Dense(), Dense()] model = get_multi_io_model(output__branch_a, output_branch_b, shared_input_branch=shared_input_branch) ``` Args: branch_a: A sequence of layers for branch a of the model. branch_b: A sequence of layers for branch b of the model. shared_input_branch: An optional sequence of layers to apply to a single input, before applying both branches to that intermediate result. If set, the model will take only one input instead of two. Defaults to None. shared_output_branch: An optional sequence of layers to merge the intermediate results produced by branch a and branch b. If set, the model will produce only one output instead of two. Defaults to None. Returns: A multi-io model of the type specified by `get_model_type`, specified by the different branches. """ # Extract the functional inputs from the layer lists if shared_input_branch: inputs = shared_input_branch[0] shared_input_branch = shared_input_branch[1:] else: inputs = branch_a[0], branch_b[0] branch_a = branch_a[1:] branch_b = branch_b[1:] model_type = get_model_type() if model_type == 'subclass': return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch, shared_output_branch) if model_type == 'subclass_custom_build': return _MultiIOSubclassModelCustomBuild((lambda: branch_a), (lambda: branch_b), (lambda: shared_input_branch), (lambda: shared_output_branch)) if model_type == 'sequential': raise ValueError('Cannot use `get_multi_io_model` to construct ' 'sequential models') if model_type == 'functional': if shared_input_branch: a_and_b = inputs for layer in shared_input_branch: a_and_b = layer(a_and_b) a = a_and_b b = a_and_b else: a, b = inputs for layer in branch_a: a = layer(a) for layer in branch_b: b = layer(b) outputs = a, b if shared_output_branch: for layer in shared_output_branch: outputs = layer(outputs) return models.Model(inputs, outputs) raise ValueError('Unknown model type {}'.format(model_type)) _V2_OPTIMIZER_MAP = { 'adadelta': adadelta_v2.Adadelta, 'adagrad': adagrad_v2.Adagrad, 'adam': adam_v2.Adam, 'adamax': adamax_v2.Adamax, 'nadam': nadam_v2.Nadam, 'rmsprop': rmsprop_v2.RMSprop, 'sgd': gradient_descent_v2.SGD } def get_v2_optimizer(name, **kwargs): """Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed. """ try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError( 'Could not find requested v2 optimizer: {}\nValid choices: {}'.format( name, list(_V2_OPTIMIZER_MAP.keys()))) def get_expected_metric_variable_names(var_names, name_suffix=''): """Returns expected metric variable names given names and prefix/suffix.""" if tf.__internal__.tf2.enabled() or tf.executing_eagerly(): # In V1 eager mode and V2 variable names are not made unique. return [n + ':0' for n in var_names] # In V1 graph mode variable names are made unique using a suffix. return [n + name_suffix + ':0' for n in var_names] def enable_v2_dtype_behavior(fn): """Decorator for enabling the layer V2 dtype behavior on a test.""" return _set_v2_dtype_behavior(fn, True) def disable_v2_dtype_behavior(fn): """Decorator for disabling the layer V2 dtype behavior on a test.""" return _set_v2_dtype_behavior(fn, False) def _set_v2_dtype_behavior(fn, enabled): """Returns version of 'fn' that runs with v2 dtype behavior on or off.""" @functools.wraps(fn) def wrapper(*args, **kwargs): v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR base_layer_utils.V2_DTYPE_BEHAVIOR = enabled try: return fn(*args, **kwargs) finally: base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior return tf.__internal__.decorator.make_decorator(fn, wrapper) @contextlib.contextmanager def device(should_use_gpu): """Uses gpu when requested and available.""" if should_use_gpu and tf.test.is_gpu_available(): dev = '/device:GPU:0' else: dev = '/device:CPU:0' with tf.device(dev): yield @contextlib.contextmanager def use_gpu(): """Uses gpu when requested and available.""" with device(should_use_gpu=True): yield def for_all_test_methods(decorator, *args, **kwargs): """Generate class-level decorator from given method-level decorator. It is expected for the given decorator to take some arguments and return a method that is then called on the test method to produce a decorated method. Args: decorator: The decorator to apply. *args: Positional arguments **kwargs: Keyword arguments Returns: Function that will decorate a given classes test methods with the decorator. """ def all_test_methods_impl(cls): """Apply decorator to all test methods in class.""" for name in dir(cls): value = getattr(cls, name) if callable(value) and name.startswith('test') and (name != 'test_session'): setattr(cls, name, decorator(*args, **kwargs)(value)) return cls return all_test_methods_impl # The description is just for documentation purposes. def run_without_tensor_float_32(description): # pylint: disable=unused-argument """Execute test with TensorFloat-32 disabled. While almost every real-world deep learning model runs fine with TensorFloat-32, many tests use assertAllClose or similar methods. TensorFloat-32 matmuls typically will cause such methods to fail with the default tolerances. Args: description: A description used for documentation purposes, describing why the test requires TensorFloat-32 to be disabled. Returns: Decorator which runs a test with TensorFloat-32 disabled. """ def decorator(f): @functools.wraps(f) def decorated(self, *args, **kwargs): allowed = tf.config.experimental.tensor_float_32_execution_enabled() try: tf.config.experimental.enable_tensor_float_32_execution(False) f(self, *args, **kwargs) finally: tf.config.experimental.enable_tensor_float_32_execution(allowed) return decorated return decorator # The description is just for documentation purposes. def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument """Execute all tests in a class with TensorFloat-32 disabled.""" return for_all_test_methods(run_without_tensor_float_32, description) def run_v2_only(func=None): """Execute the decorated test only if running in v2 mode. This function is intended to be applied to tests that exercise v2 only functionality. If the test is run in v1 mode it will simply be skipped. See go/tf-test-decorator-cheatsheet for the decorators to use in different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method. """ def decorator(f): if tf_inspect.isclass(f): raise ValueError('`run_v2_only` only supports test methods.') def decorated(self, *args, **kwargs): if not tf.__internal__.tf2.enabled(): self.skipTest('Test is only compatible with v2') return f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator def generate_combinations_with_testcase_name(**kwargs): """Generate combinations based on its keyword arguments using combine(). This function calls combine() and appends a testcase name to the list of dictionaries returned. The 'testcase_name' key is a required for named parameterized tests. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values. """ sort_by_key = lambda k: k[0] combinations = [] for key, values in sorted(kwargs.items(), key=sort_by_key): if not isinstance(values, list): values = [values] combinations.append([(key, value) for value in values]) combinations = [collections.OrderedDict(result) for result in itertools.product(*combinations)] named_combinations = [] for combination in combinations: assert isinstance(combination, collections.OrderedDict) name = ''.join([ '_{}_{}'.format(''.join(filter(str.isalnum, key)), ''.join(filter(str.isalnum, str(value)))) for key, value in combination.items() ]) named_combinations.append( collections.OrderedDict( list(combination.items()) + [('testcase_name', '_test{}'.format(name))])) return named_combinations
36,943
33.11265
84
py
keras
keras-master/keras/optimizers.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Built-in optimizer classes. For more examples see the base class `tf.keras.optimizers.Optimizer`. """ import tensorflow.compat.v2 as tf from keras import backend from keras.optimizer_v1 import Optimizer from keras.optimizer_v1 import TFOptimizer from keras.optimizer_v2 import adadelta as adadelta_v2 from keras.optimizer_v2 import adagrad as adagrad_v2 from keras.optimizer_v2 import adam as adam_v2 from keras.optimizer_v2 import adamax as adamax_v2 from keras.optimizer_v2 import ftrl from keras.optimizer_v2 import gradient_descent as gradient_descent_v2 from keras.optimizer_v2 import nadam as nadam_v2 from keras.optimizer_v2 import optimizer_v2 from keras.optimizer_v2 import rmsprop as rmsprop_v2 from keras.utils.generic_utils import deserialize_keras_object from keras.utils.generic_utils import serialize_keras_object from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.serialize') def serialize(optimizer): """Serialize the optimizer configuration to JSON compatible python dict. The configuration can be used for persistence and reconstruct the `Optimizer` instance again. >>> tf.keras.optimizers.serialize(tf.keras.optimizers.SGD()) {'class_name': 'SGD', 'config': {'name': 'SGD', 'learning_rate': 0.01, 'decay': 0.0, 'momentum': 0.0, 'nesterov': False}} Args: optimizer: An `Optimizer` instance to serialize. Returns: Python dict which contains the configuration of the input optimizer. """ return serialize_keras_object(optimizer) @keras_export('keras.optimizers.deserialize') def deserialize(config, custom_objects=None): """Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance. """ # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import loss_scale_optimizer # pylint: disable=g-import-not-at-top all_classes = { 'adadelta': adadelta_v2.Adadelta, 'adagrad': adagrad_v2.Adagrad, 'adam': adam_v2.Adam, 'adamax': adamax_v2.Adamax, 'nadam': nadam_v2.Nadam, 'rmsprop': rmsprop_v2.RMSprop, 'sgd': gradient_descent_v2.SGD, 'ftrl': ftrl.Ftrl, 'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer, # LossScaleOptimizerV1 deserializes into LossScaleOptimizer, as # LossScaleOptimizerV1 will be removed soon but deserializing it will # still be supported. 'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config['class_name'].lower() in all_classes: config['class_name'] = config['class_name'].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name='optimizer') @keras_export('keras.optimizers.get') def get(identifier): """Retrieves a Keras Optimizer instance. Args: identifier: Optimizer identifier, one of - String: name of an optimizer - Dictionary: configuration dictionary. - Keras Optimizer instance (it will be returned unchanged). - TensorFlow Optimizer instance (it will be wrapped as a Keras Optimizer). Returns: A Keras Optimizer instance. Raises: ValueError: If `identifier` cannot be interpreted. """ if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)): return identifier # Wrap legacy TF optimizer instances elif isinstance(identifier, tf.compat.v1.train.Optimizer): opt = TFOptimizer(identifier) backend.track_tf_optimizer(opt) return opt elif isinstance(identifier, dict): return deserialize(identifier) elif isinstance(identifier, str): config = {'class_name': str(identifier), 'config': {}} return deserialize(config) else: raise ValueError( 'Could not interpret optimizer identifier: {}'.format(identifier))
4,989
36.238806
95
py
keras
keras-master/keras/backend_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras backend.""" import tensorflow.compat.v2 as tf import gc import warnings from absl.testing import parameterized import numpy as np import scipy.sparse from tensorflow.python.eager import context from tensorflow.python.eager.context import get_config from tensorflow.python.framework import test_util from keras import activations from keras import backend from keras import combinations from keras.engine import input_layer from keras.layers import advanced_activations from keras.layers.normalization import batch_normalization_v1 from keras.utils import tf_inspect def compare_single_input_op_to_numpy(keras_op, np_op, input_shape, dtype='float32', negative_values=True, keras_args=None, keras_kwargs=None, np_args=None, np_kwargs=None): keras_args = keras_args or [] keras_kwargs = keras_kwargs or {} np_args = np_args or [] np_kwargs = np_kwargs or {} inputs = 2. * np.random.random(input_shape) if negative_values: inputs -= 1. keras_output = keras_op( backend.variable(inputs, dtype=dtype), *keras_args, **keras_kwargs) keras_output = backend.eval(keras_output) np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs) try: np.testing.assert_allclose(keras_output, np_output, atol=1e-4) except AssertionError: raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; ' 'Expected ' + str(np_output) + ' but got ' + str(keras_output)) def compare_two_inputs_op_to_numpy(keras_op, np_op, input_shape_a, input_shape_b, dtype='float32', keras_args=None, keras_kwargs=None, np_args=None, np_kwargs=None): keras_args = keras_args or [] keras_kwargs = keras_kwargs or {} np_args = np_args or [] np_kwargs = np_kwargs or {} input_a = np.random.random(input_shape_a) input_b = np.random.random(input_shape_b) keras_output = keras_op( backend.variable(input_a, dtype=dtype), backend.variable(input_b, dtype=dtype), *keras_args, **keras_kwargs) keras_output = backend.eval(keras_output) np_output = np_op( input_a.astype(dtype), input_b.astype(dtype), *np_args, **np_kwargs) try: np.testing.assert_allclose(keras_output, np_output, atol=1e-4) except AssertionError: raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; ' 'Expected ' + str(np_output) + ' but got ' + str(keras_output)) class BackendResetTest(tf.test.TestCase, parameterized.TestCase): def test_new_config(self): # User defined jit setting tf.config.optimizer.set_jit(False) sess = backend.get_session() default_config = get_config() self.assertEqual( sess._config.graph_options.optimizer_options.global_jit_level, default_config.graph_options.optimizer_options.global_jit_level) backend.clear_session() # New session has the same jit setting sess = backend.get_session() default_config = get_config() self.assertEqual( sess._config.graph_options.optimizer_options.global_jit_level, default_config.graph_options.optimizer_options.global_jit_level) backend.clear_session() # Change respected tf.config.optimizer.set_jit(True) sess = backend.get_session() default_config = get_config() self.assertEqual( sess._config.graph_options.optimizer_options.global_jit_level, default_config.graph_options.optimizer_options.global_jit_level) backend.clear_session() # We can't use the normal parameterized decorator because the test session # will block graph clearing. @parameterized.named_parameters(('_v1', context.graph_mode), ('_v2', tf.__internal__.eager_context.eager_mode)) def test_new_graph(self, test_context): with test_context(): g_old = backend.get_graph() backend.clear_session() g = backend.get_graph() assert g_old is not g @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendUtilsTest(tf.test.TestCase): def test_backend(self): self.assertEqual(backend.backend(), 'tensorflow') def test_get_reset_uids(self): self.assertEqual(backend.get_uid('foo'), 1) self.assertEqual(backend.get_uid('foo'), 2) backend.reset_uids() self.assertEqual(backend.get_uid('foo'), 1) def test_learning_phase(self): with self.cached_session() as sess: with self.assertRaises(ValueError): backend.set_learning_phase(2) # Test running with a learning-phase-consuming layer with backend.learning_phase_scope(0): x = input_layer.Input((3,)) y = batch_normalization_v1.BatchNormalization()(x) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) sess.run(y, feed_dict={x: np.random.random((2, 3))}) def test_learning_phase_name(self): with backend.name_scope('test_scope'): # Test that outer name scopes do not affect the learning phase's name. lp = backend.symbolic_learning_phase() self.assertEqual(lp.name, 'keras_learning_phase:0') def test_learning_phase_scope(self): initial_learning_phase = backend.learning_phase() with backend.learning_phase_scope(1): self.assertEqual(backend.learning_phase(), 1) self.assertEqual(backend.learning_phase(), initial_learning_phase) with backend.learning_phase_scope(0): self.assertEqual(backend.learning_phase(), 0) self.assertEqual(backend.learning_phase(), initial_learning_phase) with self.assertRaises(ValueError): with backend.learning_phase_scope(None): pass self.assertEqual(backend.learning_phase(), initial_learning_phase) new_learning_phase = 0 backend.set_learning_phase(new_learning_phase) self.assertEqual(backend.learning_phase(), new_learning_phase) with backend.learning_phase_scope(1): self.assertEqual(backend.learning_phase(), 1) self.assertEqual(backend.learning_phase(), new_learning_phase) def test_learning_phase_scope_in_graph(self): initial_learning_phase_outside_graph = backend.learning_phase() with backend.get_graph().as_default(): initial_learning_phase_in_graph = backend.learning_phase() self.assertEqual(backend.learning_phase(), initial_learning_phase_outside_graph) with backend.learning_phase_scope(1): self.assertEqual(backend.learning_phase(), 1) self.assertEqual(backend.learning_phase(), initial_learning_phase_outside_graph) with backend.get_graph().as_default(): self.assertIs(backend.learning_phase(), initial_learning_phase_in_graph) self.assertEqual(backend.learning_phase(), initial_learning_phase_outside_graph) def test_int_shape(self): x = backend.ones(shape=(3, 4)) self.assertEqual(backend.int_shape(x), (3, 4)) if not tf.executing_eagerly(): x = backend.placeholder(shape=(None, 4)) self.assertEqual(backend.int_shape(x), (None, 4)) def test_in_train_phase(self): y1 = backend.variable(1) y2 = backend.variable(2) if tf.executing_eagerly(): with backend.learning_phase_scope(0): y_val_test = backend.in_train_phase(y1, y2).numpy() with backend.learning_phase_scope(1): y_val_train = backend.in_train_phase(y1, y2).numpy() else: y = backend.in_train_phase(y1, y2) f = backend.function([backend.learning_phase()], [y]) y_val_test = f([0])[0] y_val_train = f([1])[0] self.assertAllClose(y_val_test, 2) self.assertAllClose(y_val_train, 1) def test_is_keras_tensor(self): x = backend.variable(1) self.assertEqual(backend.is_keras_tensor(x), False) x = input_layer.Input(shape=(1,)) self.assertEqual(backend.is_keras_tensor(x), True) x = input_layer.Input(shape=(None,), ragged=True) self.assertEqual(backend.is_keras_tensor(x), True) x = input_layer.Input(shape=(None, None), sparse=True) self.assertEqual(backend.is_keras_tensor(x), True) with self.assertRaises(ValueError): backend.is_keras_tensor(0) def test_stop_gradient(self): x = backend.variable(1) y = backend.stop_gradient(x) if not tf.executing_eagerly(): self.assertEqual(y.op.name[:12], 'StopGradient') xs = [backend.variable(1) for _ in range(3)] ys = backend.stop_gradient(xs) if not tf.executing_eagerly(): for y in ys: self.assertEqual(y.op.name[:12], 'StopGradient') def test_placeholder(self): x = backend.placeholder(shape=(3, 4)) self.assertEqual(x.shape.as_list(), [3, 4]) x = backend.placeholder(shape=(3, 4), sparse=True) self.assertEqual(x.shape.as_list(), [3, 4]) def test_is_placeholder(self): x = backend.placeholder(shape=(1,)) self.assertEqual(backend.is_placeholder(x), True) x = backend.variable(1) self.assertEqual(backend.is_placeholder(x), False) def test_print_tensor(self): # Unfortunately it seems impossible to use `mock` (or any other method) # to capture stdout when used inside a graph or graph function, thus # we cannot test correctness. # The message gets correctly printed in practice. x = backend.placeholder(shape=()) y = backend.print_tensor(x, 'eager=%s' % tf.executing_eagerly()) f = backend.function(x, y) f(0) def test_cast_to_floatx(self): x = backend.variable(1, dtype='float64') x = backend.cast_to_floatx(x) self.assertEqual(x.dtype.name, 'float32') x = backend.cast_to_floatx(2) self.assertEqual(x.dtype.name, 'float32') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendVariableTest(tf.test.TestCase): def test_zeros(self): x = backend.zeros((3, 4)) val = backend.eval(x) self.assertAllClose(val, np.zeros((3, 4))) def test_ones(self): x = backend.ones((3, 4)) val = backend.eval(x) self.assertAllClose(val, np.ones((3, 4))) def test_eye(self): x = backend.eye(4) val = backend.eval(x) self.assertAllClose(val, np.eye(4)) def test_zeros_like(self): x = backend.zeros((3, 4)) y = backend.zeros_like(x) val = backend.eval(y) self.assertAllClose(val, np.zeros((3, 4))) def test_ones_like(self): x = backend.zeros((3, 4)) y = backend.ones_like(x) val = backend.eval(y) self.assertAllClose(val, np.ones((3, 4))) def test_random_uniform_variable(self): x = backend.random_uniform_variable((30, 20), low=1., high=2., seed=0) val = backend.eval(x) self.assertAllClose(val.mean(), 1.5, atol=1e-1) self.assertAllClose(val.max(), 2., atol=1e-1) self.assertAllClose(val.min(), 1., atol=1e-1) def test_random_normal_variable(self): x = backend.random_normal_variable((30, 20), 1., 0.5, seed=0) val = backend.eval(x) self.assertAllClose(val.mean(), 1., atol=1e-1) self.assertAllClose(val.std(), 0.5, atol=1e-1) def test_count_params(self): x = backend.zeros((4, 5)) val = backend.count_params(x) self.assertAllClose(val, 20) def test_constant(self): ref_val = np.random.random((3, 4)).astype('float32') x = backend.constant(ref_val) val = backend.eval(x) self.assertAllClose(val, ref_val) def test_sparse_variable(self): val = scipy.sparse.eye(10) x = backend.variable(val) self.assertTrue(isinstance(x, tf.SparseTensor)) y = backend.to_dense(x) self.assertFalse(backend.is_sparse(y)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendLinearAlgebraTest(tf.test.TestCase, parameterized.TestCase): def test_dot(self): x = backend.ones(shape=(2, 3)) y = backend.ones(shape=(3, 4)) xy = backend.dot(x, y) self.assertEqual(xy.shape.as_list(), [2, 4]) x = backend.ones(shape=(32, 28, 3)) y = backend.ones(shape=(3, 4)) xy = backend.dot(x, y) self.assertEqual(xy.shape.as_list(), [32, 28, 4]) @parameterized.parameters( [(2, 3, 4, 5), (2, 5, 6, 7), (2, 3, 4, 6, 7), (3, 1)], [(2, 20, 1), (2, 30, 20), (2, 1, 30), (1, 2)], [(4, 2, 3), (4, 5, 3), (4, 2, 5), (2, 2)], [(4, 2), (4, 2, 3), (4, 3), (1, 1)], [(4, 2), (4, 2, 3), (4, 3), 1], [(4, 2, 3), (4, 3), (4, 2), (2, 1)], ) def test_batch_dot(self, x_shape, y_shape, output_shape, axes): x_val = np.random.random(x_shape) y_val = np.random.random(y_shape) x = backend.variable(x_val) y = backend.variable(y_val) xy = backend.batch_dot(x, y, axes=axes) self.assertEqual(tuple(xy.shape.as_list()), output_shape) xy_val = backend.eval(xy) ref_val = self._reference_batch_dot(x_val, y_val, axes) self.assertAllClose(xy_val, ref_val, atol=1e-5) def _reference_batch_dot(self, x, y, axes): if isinstance(axes, int): axes = [axes, axes] elif isinstance(axes, tuple): axes = list(axes) if axes is None: if y.ndim == 2: axes = [x.ndim - 1, y.ndim - 1] else: axes = [x.ndim - 1, y.ndim - 2] if axes[0] < 0: axes[0] += x.ndim if axes[1] < 0: axes[1] += y.ndim result = [] axes = [axes[0] - 1, axes[1] - 1] for xi, yi in zip(x, y): result.append(np.tensordot(xi, yi, axes)) result = np.array(result) if result.ndim == 1: result = np.expand_dims(result, -1) return result def test_reduction_ops(self): ops_to_test = [ (backend.max, np.max), (backend.min, np.min), (backend.sum, np.sum), (backend.prod, np.prod), (backend.var, np.var), (backend.std, np.std), (backend.mean, np.mean), (backend.argmin, np.argmin), (backend.argmax, np.argmax), ] for keras_op, np_op in ops_to_test: compare_single_input_op_to_numpy( keras_op, np_op, input_shape=(4, 7, 5), keras_kwargs={'axis': 1}, np_kwargs={'axis': 1}) compare_single_input_op_to_numpy( keras_op, np_op, input_shape=(4, 7, 5), keras_kwargs={'axis': -1}, np_kwargs={'axis': -1}) if 'keepdims' in tf_inspect.getargspec(keras_op).args: compare_single_input_op_to_numpy( keras_op, np_op, input_shape=(4, 7, 5), keras_kwargs={ 'axis': 1, 'keepdims': True }, np_kwargs={ 'axis': 1, 'keepdims': True }) def test_elementwise_ops(self): ops_to_test = [ (backend.square, np.square), (backend.abs, np.abs), (backend.round, np.round), (backend.sign, np.sign), (backend.sin, np.sin), (backend.cos, np.cos), (backend.exp, np.exp), ] for keras_op, np_op in ops_to_test: compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7)) ops_to_test = [ (backend.sqrt, np.sqrt), (backend.log, np.log), ] for keras_op, np_op in ops_to_test: compare_single_input_op_to_numpy( keras_op, np_op, input_shape=(4, 7), negative_values=False) compare_single_input_op_to_numpy( backend.clip, np.clip, input_shape=(6, 4), keras_kwargs={ 'min_value': 0.1, 'max_value': 2.4 }, np_kwargs={ 'a_min': 0.1, 'a_max': 1.4 }) compare_single_input_op_to_numpy( backend.pow, np.power, input_shape=(6, 4), keras_args=[3], np_args=[3]) def test_two_tensor_ops(self): ops_to_test = [ (backend.equal, np.equal), (backend.not_equal, np.not_equal), (backend.greater, np.greater), (backend.greater_equal, np.greater_equal), (backend.less, np.less), (backend.less_equal, np.less_equal), (backend.maximum, np.maximum), (backend.minimum, np.minimum), ] for keras_op, np_op in ops_to_test: compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 7), input_shape_b=(4, 7)) def test_relu(self): x = tf.convert_to_tensor([[-4, 0], [2, 7]], 'float32') # standard relu relu_op = backend.relu(x) self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]]) # alpha (leaky relu used) relu_op = backend.relu(x, alpha=0.5) if not tf.executing_eagerly(): self.assertTrue('LeakyRelu' in relu_op.name) self.assertAllClose(backend.eval(relu_op), [[-2, 0], [2, 7]]) # max_value < some elements relu_op = backend.relu(x, max_value=5.) self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 5]]) # nn.relu6 used relu_op = backend.relu(x, max_value=6.) if not tf.executing_eagerly(): self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6 self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 6]]) # max value > 6 relu_op = backend.relu(x, max_value=10.) self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]]) # max value is float relu_op = backend.relu(x, max_value=4.3) self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 4.3]]) # max value == 0 relu_op = backend.relu(x, max_value=0.) self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 0]]) # alpha and max_value relu_op = backend.relu(x, alpha=0.25, max_value=3.) self.assertAllClose(backend.eval(relu_op), [[-1, 0], [2, 3]]) # threshold relu_op = backend.relu(x, threshold=3) self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 7]]) # threshold is float relu_op = backend.relu(x, threshold=1.5) self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]]) # threshold is negative relu_op = backend.relu(x, threshold=-5) self.assertAllClose(backend.eval(relu_op), [[-4, 0], [2, 7]]) # threshold and max_value relu_op = backend.relu(x, threshold=3, max_value=5.) self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 5]]) # threshold and alpha relu_op = backend.relu(x, alpha=0.25, threshold=4.) self.assertAllClose(backend.eval(relu_op), [[-2, -1], [-0.5, 7]]) # threshold, alpha, and max_value relu_op = backend.relu(x, alpha=0.25, threshold=4., max_value=5.) self.assertAllClose(backend.eval(relu_op), [[-2, -1], [-0.5, 5]]) # Test case for GitHub issue 35430, with integer dtype x = input_layer.Input(shape=(), name='x', dtype='int64') _ = advanced_activations.ReLU(max_value=100., dtype='int64')(x) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendShapeOpsTest(tf.test.TestCase): def test_reshape(self): compare_single_input_op_to_numpy( backend.reshape, np.reshape, input_shape=(4, 7), keras_args=[(2, 14)], np_args=[(2, 14)]) def test_concatenate(self): a = backend.variable(np.ones((1, 2, 3))) b = backend.variable(np.ones((1, 2, 2))) y = backend.concatenate([a, b], axis=-1) self.assertEqual(y.shape.as_list(), [1, 2, 5]) def test_permute_dimensions(self): compare_single_input_op_to_numpy( backend.permute_dimensions, np.transpose, input_shape=(4, 7), keras_args=[(1, 0)], np_args=[(1, 0)]) def test_resize_images(self): height_factor = 2 width_factor = 2 data_format = 'channels_last' x = backend.variable(np.ones((1, 2, 2, 3))) y = backend.resize_images(x, height_factor, width_factor, data_format) self.assertEqual(y.shape.as_list(), [1, 4, 4, 3]) data_format = 'channels_first' x = backend.variable(np.ones((1, 3, 2, 2))) y = backend.resize_images(x, height_factor, width_factor, data_format) self.assertEqual(y.shape.as_list(), [1, 3, 4, 4]) # Use with a dynamic axis: if not tf.executing_eagerly(): x = backend.placeholder(shape=(1, 3, None, None)) y = backend.resize_images(x, height_factor, width_factor, data_format) self.assertEqual(y.shape.as_list(), [1, 3, None, None]) # Invalid use: with self.assertRaises(ValueError): backend.resize_images( x, height_factor, width_factor, data_format='unknown') def test_resize_volumes(self): height_factor = 2 width_factor = 2 depth_factor = 2 data_format = 'channels_last' x = backend.variable(np.ones((1, 2, 2, 2, 3))) y = backend.resize_volumes(x, depth_factor, height_factor, width_factor, data_format) self.assertEqual(y.shape.as_list(), [1, 4, 4, 4, 3]) data_format = 'channels_first' x = backend.variable(np.ones((1, 3, 2, 2, 2))) y = backend.resize_volumes(x, depth_factor, height_factor, width_factor, data_format) self.assertEqual(y.shape.as_list(), [1, 3, 4, 4, 4]) # Invalid use: with self.assertRaises(ValueError): backend.resize_volumes( x, depth_factor, height_factor, width_factor, data_format='unknown') def test_repeat_elements(self): x = backend.variable(np.ones((1, 3, 2))) y = backend.repeat_elements(x, 3, axis=1) self.assertEqual(y.shape.as_list(), [1, 9, 2]) # Use with a dynamic axis: if not tf.executing_eagerly(): x = backend.placeholder(shape=(2, None, 2)) y = backend.repeat_elements(x, 3, axis=1) self.assertEqual(y.shape.as_list(), [2, None, 2]) def test_repeat(self): x = backend.variable(np.ones((1, 3))) y = backend.repeat(x, 2) self.assertEqual(y.shape.as_list(), [1, 2, 3]) def test_flatten(self): compare_single_input_op_to_numpy( backend.flatten, np.reshape, input_shape=(4, 7, 6), np_args=[(4 * 7 * 6,)]) def test_batch_flatten(self): compare_single_input_op_to_numpy( backend.batch_flatten, np.reshape, input_shape=(4, 7, 6), np_args=[(4, 7 * 6)]) def test_temporal_padding(self): def ref_op(x, padding): shape = list(x.shape) shape[1] += padding[0] + padding[1] y = np.zeros(tuple(shape)) y[:, padding[0]:-padding[1], :] = x return y compare_single_input_op_to_numpy( backend.temporal_padding, ref_op, input_shape=(4, 7, 6), keras_args=[(2, 3)], np_args=[(2, 3)]) def test_spatial_2d_padding(self): def ref_op(x, padding, data_format='channels_last'): shape = list(x.shape) if data_format == 'channels_last': shape[1] += padding[0][0] + padding[0][1] shape[2] += padding[1][0] + padding[1][1] y = np.zeros(tuple(shape)) y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x else: shape[2] += padding[0][0] + padding[0][1] shape[3] += padding[1][0] + padding[1][1] y = np.zeros(tuple(shape)) y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x return y compare_single_input_op_to_numpy( backend.spatial_2d_padding, ref_op, input_shape=(2, 3, 2, 3), keras_args=[((2, 3), (1, 2))], keras_kwargs={'data_format': 'channels_last'}, np_args=[((2, 3), (1, 2))], np_kwargs={'data_format': 'channels_last'}) compare_single_input_op_to_numpy( backend.spatial_2d_padding, ref_op, input_shape=(2, 3, 2, 3), keras_args=[((2, 3), (1, 2))], keras_kwargs={'data_format': 'channels_first'}, np_args=[((2, 3), (1, 2))], np_kwargs={'data_format': 'channels_first'}) def test_spatial_3d_padding(self): def ref_op(x, padding, data_format='channels_last'): shape = list(x.shape) if data_format == 'channels_last': shape[1] += padding[0][0] + padding[0][1] shape[2] += padding[1][0] + padding[1][1] shape[3] += padding[2][0] + padding[2][1] y = np.zeros(tuple(shape)) y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], padding[2][0]:-padding[2][1], :] = x else: shape[2] += padding[0][0] + padding[0][1] shape[3] += padding[1][0] + padding[1][1] shape[4] += padding[2][0] + padding[2][1] y = np.zeros(tuple(shape)) y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], padding[2][0]:-padding[2][1]] = x return y compare_single_input_op_to_numpy( backend.spatial_3d_padding, ref_op, input_shape=(2, 3, 2, 3, 2), keras_args=[((2, 3), (1, 2), (2, 3))], keras_kwargs={'data_format': 'channels_last'}, np_args=[((2, 3), (1, 2), (2, 3))], np_kwargs={'data_format': 'channels_last'}) compare_single_input_op_to_numpy( backend.spatial_3d_padding, ref_op, input_shape=(2, 3, 2, 3, 2), keras_args=[((2, 3), (1, 2), (2, 3))], keras_kwargs={'data_format': 'channels_first'}, np_args=[((2, 3), (1, 2), (2, 3))], np_kwargs={'data_format': 'channels_first'}) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendNNOpsTest(tf.test.TestCase, parameterized.TestCase): def test_bias_add(self): keras_op = backend.bias_add np_op = np.add compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 7), input_shape_b=(7,)) compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 3, 7), input_shape_b=(7,)) compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 3, 5, 7), input_shape_b=(7,)) compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 3, 5, 2, 7), input_shape_b=(7,)) with self.assertRaises((ValueError, tf.errors.InvalidArgumentError)): x = backend.variable((3, 4)) b = backend.variable((3, 4)) backend.bias_add(x, b) with self.assertRaises(ValueError): x = backend.variable((3, 4)) b = backend.variable((4,)) backend.bias_add(x, b, data_format='unknown') def test_bias_add_channels_first(self): def keras_op(x, b): return backend.bias_add(x, b, data_format='channels_first') def np_op(x, b): if x.ndim == 3: b = b.reshape((1, b.shape[0], 1)) if x.ndim == 4: b = b.reshape((1, b.shape[0], 1, 1)) return x + b compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 3, 7), input_shape_b=(3,)) compare_two_inputs_op_to_numpy( keras_op, np_op, input_shape_a=(4, 3, 5, 7), input_shape_b=(3,)) def test_pool2d(self): val = np.random.random((10, 3, 10, 10)) x = backend.variable(val) y = backend.pool2d( x, (2, 2), strides=(1, 1), padding='valid', data_format='channels_first', pool_mode='max') self.assertEqual(y.shape.as_list(), [10, 3, 9, 9]) y = backend.pool2d( x, (2, 2), strides=(1, 1), padding='valid', data_format='channels_first', pool_mode='avg') self.assertEqual(y.shape.as_list(), [10, 3, 9, 9]) val = np.random.random((10, 10, 10, 3)) x = backend.variable(val) y = backend.pool2d( x, (2, 2), strides=(1, 1), padding='valid', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 9, 9, 3]) val = np.random.random((10, 10, 10, 3)) x = backend.variable(val) y = backend.pool2d( x, (2, 2), strides=(1, 1), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 10, 3]) val = np.random.random((10, 10, 10, 3)) x = backend.variable(val) y = backend.pool2d( x, (2, 2), strides=(2, 2), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 5, 5, 3]) with self.assertRaises(ValueError): y = backend.pool2d( x, (2, 2), strides=(2, 2), padding='other', data_format='channels_last') with self.assertRaises(ValueError): y = backend.pool2d(x, (2, 2), strides=(2, 2), data_format='other') with self.assertRaises(ValueError): y = backend.pool2d(x, (2, 2, 2), strides=(2, 2)) with self.assertRaises(ValueError): y = backend.pool2d(x, (2, 2), strides=(2, 2, 2)) with self.assertRaises(ValueError): y = backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other') def test_pool3d(self): val = np.random.random((10, 3, 10, 10, 10)) x = backend.variable(val) y = backend.pool3d( x, (2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_first', pool_mode='max') self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9]) y = backend.pool3d( x, (2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_first', pool_mode='avg') self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9]) val = np.random.random((10, 10, 10, 10, 3)) x = backend.variable(val) y = backend.pool3d( x, (2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 9, 9, 9, 3]) val = np.random.random((10, 10, 10, 10, 3)) x = backend.variable(val) y = backend.pool3d( x, (2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 3]) val = np.random.random((10, 10, 10, 10, 3)) x = backend.variable(val) y = backend.pool3d( x, (2, 2, 2), strides=(2, 2, 2), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 3]) def test_conv1d(self): val = np.random.random((10, 4, 10)) x = backend.variable(val) kernel_val = np.random.random((3, 4, 5)) k = backend.variable(kernel_val) y = backend.conv1d( x, k, strides=(1,), padding='valid', data_format='channels_first') self.assertEqual(y.shape.as_list(), [10, 5, 8]) val = np.random.random((10, 10, 4)) x = backend.variable(val) y = backend.conv1d( x, k, strides=(1,), padding='valid', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 8, 5]) val = np.random.random((10, 10, 4)) x = backend.variable(val) y = backend.conv1d( x, k, strides=(1,), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 5]) val = np.random.random((10, 10, 4)) x = backend.variable(val) y = backend.conv1d( x, k, strides=(2,), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 5, 5]) def test_local_conv_channels_dim(self): filters = 3 batch_size = 2 for input_shape in [(3, 5), (2, 3, 5), (2, 5, 3, 4)]: channels_in = input_shape[0] input_spatial_shape = input_shape[1:] dim = len(input_spatial_shape) inputs = np.random.normal(0, 1, (batch_size,) + input_shape) inputs_cf = backend.variable(inputs) for kernel_size in [1, 2]: for stride in [1, 2]: kernel_sizes = (kernel_size,) * dim strides = (stride,) * dim output_shape = tuple([ (i - kernel_size + stride) // stride for i in input_spatial_shape ]) kernel_shape = (np.prod(output_shape), np.prod(kernel_sizes) * channels_in, filters) kernel = np.random.normal( 0, 1, output_shape + (channels_in, np.prod(kernel_sizes), filters)) kernel_cf = np.reshape(kernel, kernel_shape) kernel_cf = backend.variable(kernel_cf) conv_cf = backend.local_conv(inputs_cf, kernel_cf, kernel_sizes, strides, output_shape, 'channels_first') inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) + [1]) inputs_cl = backend.variable(inputs_cl) kernel_cl = np.reshape( np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]), kernel_shape) kernel_cl = backend.variable(kernel_cl) conv_cl = backend.local_conv(inputs_cl, kernel_cl, kernel_sizes, strides, output_shape, 'channels_last') conv_cf = backend.eval(conv_cf) conv_cl = backend.eval(conv_cl) self.assertAllCloseAccordingToType( conv_cf, np.transpose(conv_cl, [0, dim + 1] + list(range(1, dim + 1))), atol=1e-5) @parameterized.named_parameters( ('local_conv1d', (5, 6), (3,), (1,), (3,)), ('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3))) def test_local_conv_1d_and_2d(self, input_shape, kernel_sizes, strides, output_shape): filters = 3 batch_size = 2 inputs = np.random.normal(0, 1, (batch_size,) + input_shape) inputs = backend.variable(inputs) kernel = np.random.normal(0, 1, (np.prod(output_shape), np.prod(kernel_sizes) * input_shape[-1], filters)) kernel = backend.variable(kernel) local_conv = backend.local_conv(inputs, kernel, kernel_sizes, strides, output_shape, 'channels_last') if len(output_shape) == 1: local_conv_dim = backend.local_conv1d(inputs, kernel, kernel_sizes, strides, 'channels_last') else: local_conv_dim = backend.local_conv2d(inputs, kernel, kernel_sizes, strides, output_shape, 'channels_last') local_conv = backend.eval(local_conv) local_conv_dim = backend.eval(local_conv_dim) self.assertAllCloseAccordingToType(local_conv, local_conv_dim) def test_conv2d(self): kernel_val = np.random.random((3, 3, 4, 5)) k = backend.variable(kernel_val) # Test channels_first val = np.random.random((10, 4, 10, 10)) x = backend.variable(val) y = backend.conv2d(x, k, padding='valid', data_format='channels_first') self.assertEqual(y.shape.as_list(), [10, 5, 8, 8]) # Test channels_last val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.conv2d( x, k, strides=(1, 1), padding='valid', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 8, 8, 5]) # Test same padding val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.conv2d(x, k, padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 10, 5]) # Test dilation_rate val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.conv2d( x, k, dilation_rate=(2, 2), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 10, 5]) # Test strides val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.conv2d( x, k, strides=(2, 2), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 5, 5, 5]) # Test invalid arguments with self.assertRaises(ValueError): y = backend.conv2d( x, k, (2, 2), padding='other', data_format='channels_last') with self.assertRaises(ValueError): y = backend.conv2d(x, k, (2, 2), data_format='other') with self.assertRaises(ValueError): y = backend.conv2d(x, k, (2, 2, 2)) def test_conv2d_transpose(self): input_size = (7, 8) kernel_size = (3, 3) input_depth = 6 filters = 6 batch_size = 2 kernel_val = np.random.random(kernel_size + (input_depth, filters)) k = backend.variable(kernel_val) # Test channels_first input_val = np.random.random((batch_size, input_depth) + input_size) x = backend.variable(input_val) y = backend.conv2d_transpose( x, k, (batch_size, filters) + input_size, padding='same', data_format='channels_first') self.assertEqual( tuple(y.shape.as_list()), (batch_size, filters) + input_size) # Test channels_last input_val = np.random.random((batch_size,) + input_size + (input_depth,)) x = backend.variable(input_val) y = backend.conv2d_transpose( x, k, (batch_size,) + input_size + (filters,), padding='same', data_format='channels_last') self.assertEqual( tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,)) # Test dilation_rate y = backend.conv2d_transpose( x, k, (batch_size,) + input_size + (filters,), padding='same', data_format='channels_last', dilation_rate=(2, 2)) self.assertEqual( tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,)) # Test batch size of None in output_shape y = backend.conv2d_transpose( x, k, (None,) + input_size + (filters,), padding='same', data_format='channels_last') self.assertEqual( tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,)) # Test invalid values with self.assertRaises(ValueError): y = backend.conv2d_transpose( x, k, (2, 2, 8, 9), padding='other', data_format='channels_last') with self.assertRaises(ValueError): y = backend.conv2d_transpose(x, k, (2, 2, 8, 9), data_format='other') def test_separable_conv2d(self): val = np.random.random((10, 4, 10, 10)) x = backend.variable(val) depthwise_kernel_val = np.random.random((3, 3, 4, 1)) pointwise_kernel_val = np.random.random((1, 1, 4, 5)) dk = backend.variable(depthwise_kernel_val) pk = backend.variable(pointwise_kernel_val) y = backend.separable_conv2d( x, dk, pk, padding='valid', data_format='channels_first') self.assertEqual(y.shape.as_list(), [10, 5, 8, 8]) val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.separable_conv2d( x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 8, 8, 5]) val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.separable_conv2d( x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 10, 5]) val = np.random.random((10, 10, 10, 4)) x = backend.variable(val) y = backend.separable_conv2d( x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 5, 5, 5]) with self.assertRaises(ValueError): y = backend.separable_conv2d( x, dk, pk, (2, 2), padding='other', data_format='channels_last') with self.assertRaises(ValueError): y = backend.separable_conv2d(x, dk, pk, (2, 2), data_format='other') with self.assertRaises(ValueError): y = backend.separable_conv2d(x, dk, pk, (2, 2, 2)) def test_conv3d(self): val = np.random.random((10, 4, 10, 10, 10)) x = backend.variable(val) kernel_val = np.random.random((3, 3, 3, 4, 5)) k = backend.variable(kernel_val) y = backend.conv3d(x, k, padding='valid', data_format='channels_first') self.assertEqual(y.shape.as_list(), [10, 5, 8, 8, 8]) val = np.random.random((10, 10, 10, 10, 4)) x = backend.variable(val) y = backend.conv3d( x, k, strides=(1, 1, 1), padding='valid', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 8, 8, 8, 5]) val = np.random.random((10, 10, 10, 10, 4)) x = backend.variable(val) y = backend.conv3d( x, k, strides=(1, 1, 1), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 5]) val = np.random.random((10, 10, 10, 10, 4)) x = backend.variable(val) y = backend.conv3d( x, k, strides=(2, 2, 2), padding='same', data_format='channels_last') self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 5]) with self.assertRaises(ValueError): y = backend.conv3d( x, k, (2, 2, 2), padding='other', data_format='channels_last') with self.assertRaises(ValueError): y = backend.conv3d(x, k, (2, 2, 2), data_format='other') with self.assertRaises(ValueError): y = backend.conv3d(x, k, (2, 2)) def test_rnn(self): # implement a simple RNN num_samples = 4 input_dim = 5 output_dim = 3 timesteps = 6 input_val = np.random.random( (num_samples, timesteps, input_dim)).astype(np.float32) init_state_val = np.random.random( (num_samples, output_dim)).astype(np.float32) w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32) w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32) np_mask = np.random.randint(2, size=(num_samples, timesteps)) def rnn_step_fn(): w_i = backend.variable(w_i_val) w_o = backend.variable(w_o_val) def step_function(x, states): assert len(states) == 1 prev_output = states[0] output = backend.dot(x, w_i) + backend.dot(prev_output, w_o) return output, [output] return step_function # test default setup last_output_list = [[], [], [], [], [], []] outputs_list = [[], [], [], [], [], []] state_list = [[], [], [], [], [], []] rnn_fn = rnn_step_fn() inputs = backend.variable(input_val) initial_states = [backend.variable(init_state_val)] mask = backend.variable(np_mask) kwargs_list = [ { 'go_backwards': False, 'mask': None }, { 'go_backwards': False, 'mask': None, 'unroll': True }, { 'go_backwards': True, 'mask': None }, { 'go_backwards': True, 'mask': None, 'unroll': True }, { 'go_backwards': False, 'mask': mask }, { 'go_backwards': False, 'mask': mask, 'unroll': True }, ] for i, kwargs in enumerate(kwargs_list): last_output, outputs, new_states = backend.rnn(rnn_fn, inputs, initial_states, **kwargs) # check static shape inference self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim]) self.assertEqual(outputs.shape.as_list(), [num_samples, timesteps, output_dim]) for state in new_states: self.assertEqual(state.shape.as_list(), [num_samples, output_dim]) last_output_list[i].append(backend.eval(last_output)) outputs_list[i].append(backend.eval(outputs)) self.assertLen(new_states, 1) state_list[i].append(backend.eval(new_states[0])) def assert_list_pairwise(z_list, atol=1e-05): for (z1, z2) in zip(z_list[1:], z_list[:-1]): self.assertAllClose(z1, z2, atol=atol) assert_list_pairwise(last_output_list[0], atol=1e-04) assert_list_pairwise(outputs_list[0], atol=1e-04) assert_list_pairwise(state_list[0], atol=1e-04) assert_list_pairwise(last_output_list[2], atol=1e-04) assert_list_pairwise(outputs_list[2], atol=1e-04) assert_list_pairwise(state_list[2], atol=1e-04) for l, u_l in zip(last_output_list[0], last_output_list[1]): self.assertAllClose(l, u_l, atol=1e-04) for o, u_o in zip(outputs_list[0], outputs_list[1]): self.assertAllClose(o, u_o, atol=1e-04) for s, u_s in zip(state_list[0], state_list[1]): self.assertAllClose(s, u_s, atol=1e-04) for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]): self.assertAllClose(b_l, b_u_l, atol=1e-04) for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]): self.assertAllClose(b_o, b_u_o, atol=1e-04) for b_s, b_u_s in zip(state_list[2], state_list[3]): self.assertAllClose(b_s, b_u_s, atol=1e-04) def test_rnn_additional_states(self): # implement a simple RNN num_samples = 4 input_dim = 5 output_dim = 3 timesteps = 6 input_val = np.random.random( (num_samples, timesteps, input_dim)).astype(np.float32) init_state_val = np.random.random( (num_samples, output_dim)).astype(np.float32) w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32) w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32) np_mask = np.random.randint(2, size=(num_samples, timesteps)) def rnn_step_fn(): w_i = backend.variable(w_i_val) w_o = backend.variable(w_o_val) def step_function(x, states): assert len(states) == 2 prev_output = states[0] output = backend.dot(x, w_i) + backend.dot(prev_output, w_o) return output, [output, backend.concatenate([output, output], axis=-1)] return step_function # test default setup last_output_list = [[], [], [], [], [], []] outputs_list = [[], [], [], [], [], []] state_list = [[], [], [], [], [], []] additional_state_list = [[], [], [], [], [], []] rnn_fn = rnn_step_fn() inputs = backend.variable(input_val) initial_states = [ backend.variable(init_state_val), tf.convert_to_tensor( np.concatenate([init_state_val, init_state_val], axis=-1)) ] mask = backend.variable(np_mask) kwargs_list = [ { 'go_backwards': False, 'mask': None }, { 'go_backwards': False, 'mask': None, 'unroll': True }, { 'go_backwards': True, 'mask': None }, { 'go_backwards': True, 'mask': None, 'unroll': True }, { 'go_backwards': False, 'mask': mask }, { 'go_backwards': False, 'mask': mask, 'unroll': True }, ] for i, kwargs in enumerate(kwargs_list): last_output, outputs, new_states = backend.rnn(rnn_fn, inputs, initial_states, **kwargs) # check static shape inference self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim]) self.assertEqual(outputs.shape.as_list(), [num_samples, timesteps, output_dim]) # for state in new_states: # self.assertEqual(state.shape.as_list(), # [num_samples, output_dim]) self.assertEqual(new_states[0].shape.as_list(), [num_samples, output_dim]) self.assertEqual(new_states[1].shape.as_list(), [num_samples, 2 * output_dim]) last_output_list[i].append(backend.eval(last_output)) outputs_list[i].append(backend.eval(outputs)) self.assertLen(new_states, 2) state_list[i].append(backend.eval(new_states[0])) additional_state_list[i].append(backend.eval(new_states[1])) def assert_list_pairwise(z_list, atol=1e-05): for (z1, z2) in zip(z_list[1:], z_list[:-1]): self.assertAllClose(z1, z2, atol=atol) assert_list_pairwise(last_output_list[0], atol=1e-04) assert_list_pairwise(outputs_list[0], atol=1e-04) assert_list_pairwise(state_list[0], atol=1e-04) assert_list_pairwise(additional_state_list[0], atol=1e-04) assert_list_pairwise(last_output_list[2], atol=1e-04) assert_list_pairwise(outputs_list[2], atol=1e-04) assert_list_pairwise(state_list[2], atol=1e-04) assert_list_pairwise(additional_state_list[2], atol=1e-04) for l, u_l in zip(last_output_list[0], last_output_list[1]): self.assertAllClose(l, u_l, atol=1e-04) for o, u_o in zip(outputs_list[0], outputs_list[1]): self.assertAllClose(o, u_o, atol=1e-04) for s, u_s in zip(state_list[0], state_list[1]): self.assertAllClose(s, u_s, atol=1e-04) for s, u_s in zip(additional_state_list[0], additional_state_list[1]): self.assertAllClose(s, u_s, atol=1e-04) for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]): self.assertAllClose(b_l, b_u_l, atol=1e-04) for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]): self.assertAllClose(b_o, b_u_o, atol=1e-04) for b_s, b_u_s in zip(state_list[2], state_list[3]): self.assertAllClose(b_s, b_u_s, atol=1e-04) for s, u_s in zip(additional_state_list[2], additional_state_list[3]): self.assertAllClose(s, u_s, atol=1e-04) def test_rnn_output_and_state_masking_independent(self): num_samples = 2 num_timesteps = 4 state_and_io_size = 2 mask_last_num_timesteps = 2 # for second sample only # a step function that just outputs inputs, # but increments states +1 per timestep def step_function(inputs, states): return inputs, [s + 1 for s in states] inputs_vals = np.random.random( (num_samples, num_timesteps, state_and_io_size)) initial_state_vals = np.random.random((num_samples, state_and_io_size)) # masking of two last timesteps for second sample only mask_vals = np.ones((num_samples, num_timesteps)) mask_vals[1, -mask_last_num_timesteps:] = 0 # outputs expected to be same as inputs for the first sample expected_outputs = inputs_vals.copy() # but for the second sample all outputs in masked region should be the same # as last output before masked region expected_outputs[1, -mask_last_num_timesteps:] = \ expected_outputs[1, -(mask_last_num_timesteps + 1)] expected_last_state = initial_state_vals.copy() # first state should be incremented for every timestep (no masking) expected_last_state[0] += num_timesteps # second state should not be incremented for last two timesteps expected_last_state[1] += (num_timesteps - mask_last_num_timesteps) # verify same expected output for `unroll=true/false` inputs = backend.variable(inputs_vals) initial_states = [backend.variable(initial_state_vals)] mask = backend.variable(mask_vals) for unroll in [True, False]: _, outputs, last_states = backend.rnn( step_function, inputs, initial_states, mask=mask, unroll=unroll, input_length=num_timesteps if unroll else None) self.assertAllClose(backend.eval(outputs), expected_outputs) self.assertAllClose(backend.eval(last_states[0]), expected_last_state) def test_rnn_output_num_dim_larger_than_2_masking(self): num_samples = 3 num_timesteps = 4 num_features = 5 def step_function(inputs, states): outputs = backend.tile(backend.expand_dims(inputs), [1, 1, 2]) return outputs, [backend.identity(s) for s in states] # Note: cannot just return states (which can be a problem) -> # tensorflow/python/ops/resource_variable_ops.py", line 824, in set_shape # NotImplementedError: ResourceVariable does not implement set_shape() inputs_vals = np.random.random((num_samples, num_timesteps, num_features)) initial_state_vals = np.random.random((num_samples, 6)) mask_vals = np.ones((num_samples, num_timesteps)) mask_vals[-1, -1] = 0 # final timestep masked for last sample expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1) # for the last sample, the final timestep (in masked region) should be the # same as the second to final output (before masked region) expected_outputs[-1, -1] = expected_outputs[-1, -2] inputs = backend.variable(inputs_vals) initial_states = [backend.variable(initial_state_vals)] mask = backend.variable(mask_vals) for unroll in [True, False]: _, outputs, _ = backend.rnn( step_function, inputs, initial_states, mask=mask, unroll=unroll, input_length=num_timesteps if unroll else None) self.assertAllClose(backend.eval(outputs), expected_outputs) def test_rnn_state_num_dim_larger_than_2_masking(self): num_samples = 3 num_timesteps = 4 def step_function(inputs, states): return inputs, [s + 1 for s in states] inputs_vals = np.random.random((num_samples, num_timesteps, 5)) initial_state_vals = np.random.random((num_samples, 6, 7)) mask_vals = np.ones((num_samples, num_timesteps)) mask_vals[0, -2:] = 0 # final two timesteps masked for first sample expected_last_state = initial_state_vals.copy() expected_last_state[0] += (num_timesteps - 2) expected_last_state[1:] += num_timesteps inputs = backend.variable(inputs_vals) initial_states = [backend.variable(initial_state_vals)] mask = backend.variable(mask_vals) for unroll in [True, False]: _, _, last_states = backend.rnn( step_function, inputs, initial_states, mask=mask, unroll=unroll, input_length=num_timesteps if unroll else None) self.assertAllClose(backend.eval(last_states[0]), expected_last_state) def test_batch_normalization(self): g_val = np.random.random((3,)) b_val = np.random.random((3,)) gamma = backend.variable(g_val) beta = backend.variable(b_val) # 3D NHC case val = np.random.random((10, 5, 3)) x = backend.variable(val) mean, var = tf.nn.moments(x, (0, 1), None, None, False) normed = backend.batch_normalization( x, mean, var, beta, gamma, axis=-1, epsilon=1e-3) self.assertEqual(normed.shape.as_list(), [10, 5, 3]) # 4D NHWC case val = np.random.random((10, 5, 5, 3)) x = backend.variable(val) mean, var = tf.nn.moments(x, (0, 1, 2), None, None, False) normed = backend.batch_normalization( x, mean, var, beta, gamma, axis=-1, epsilon=1e-3) self.assertEqual(normed.shape.as_list(), [10, 5, 5, 3]) # 4D NCHW case if not tf.executing_eagerly(): # Eager CPU kernel for NCHW does not exist. val = np.random.random((10, 3, 5, 5)) x = backend.variable(val) mean, var = tf.nn.moments(x, (0, 2, 3), None, None, False) normed = backend.batch_normalization( x, mean, var, beta, gamma, axis=1, epsilon=1e-3) self.assertEqual(normed.shape.as_list(), [10, 3, 5, 5]) def test_normalize_batch_in_training(self): val = np.random.random((10, 3, 10, 10)) x = backend.variable(val) reduction_axes = (0, 2, 3) g_val = np.random.random((3,)) b_val = np.random.random((3,)) gamma = backend.variable(g_val) beta = backend.variable(b_val) normed, mean, var = backend.normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=1e-3) self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10]) self.assertEqual(mean.shape.as_list(), [ 3, ]) self.assertEqual(var.shape.as_list(), [ 3, ]) # case: gamma=None gamma = None normed, mean, var = backend.normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=1e-3) self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10]) self.assertEqual(mean.shape.as_list(), [ 3, ]) self.assertEqual(var.shape.as_list(), [ 3, ]) # case: beta=None beta = None normed, mean, var = backend.normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=1e-3) self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10]) self.assertEqual(mean.shape.as_list(), [ 3, ]) self.assertEqual(var.shape.as_list(), [ 3, ]) def test_dropout(self): inputs = tf.ones((200, 200)) outputs = backend.dropout(inputs, 0.2) outputs_val = backend.eval(outputs) self.assertEqual(np.min(outputs_val), 0) self.assertAllClose(np.count_nonzero(outputs_val), 32000, atol=1000) # Test noise shape outputs = backend.dropout(inputs, 0.2, noise_shape=(200, 1)) outputs_val = backend.eval(outputs) self.assertAllClose(outputs_val[2, :], outputs_val[3, :], atol=1e-5) class BackendCrossEntropyLossesTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_binary_crossentropy_with_sigmoid(self): t = backend.constant([[0, 1, 0]]) logits = backend.constant([[8., 1., 1.]]) p = backend.sigmoid(logits) p = tf.identity(tf.identity(p)) result = self.evaluate(backend.binary_crossentropy(t, p)) self.assertArrayNear(result[0], [8., 0.313, 1.313], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_loss(self): t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) p = backend.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) result = backend.categorical_crossentropy(t, p) self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3) p = backend.constant([[.9, .05, .05], [.05, .89, .01], [.05, .06, .94]]) result = backend.categorical_crossentropy(t, p, axis=0) self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3) p = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) result = backend.categorical_crossentropy(t, p, from_logits=True), self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3) p = backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]]) result = backend.categorical_crossentropy(t, p, from_logits=True, axis=0), self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self): t = backend.placeholder() p = backend.placeholder() o = backend.categorical_crossentropy(t, p) t_val = tf.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) p_val = tf.convert_to_tensor([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .116, .062], 1e-3) # With axis set o = backend.categorical_crossentropy(t, p, axis=0) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .065, .111], 1e-3) # from logits p_val = tf.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) o = backend.categorical_crossentropy(t, p, from_logits=True) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, 0, .17], 1e-3) # from logits and axis set o = backend.categorical_crossentropy(t, p, from_logits=True, axis=0) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, .003, .036], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_with_softmax(self): t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) p = backend.softmax(logits) p = tf.identity(tf.identity(p)) result = self.evaluate(backend.categorical_crossentropy(t, p)) self.assertArrayNear(result, [0.002, 0.0005, 0.17], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_sparse_categorical_crossentropy_loss(self): t = backend.constant([0, 1, 2]) p = backend.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) result = backend.sparse_categorical_crossentropy(t, p) self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3) p = backend.constant([[.9, .05, .05], [.05, .89, .01], [.05, .06, .94]]) result = backend.sparse_categorical_crossentropy(t, p, axis=0) self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3) p = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) result = backend.sparse_categorical_crossentropy(t, p, from_logits=True), self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3) p = backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]]) result = backend.sparse_categorical_crossentropy( t, p, from_logits=True, axis=0), self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3) @combinations.generate(combinations.combine(mode=['graph'])) def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self): # This test only runs in graph because the TF op layer is not supported yet # for sparse ops. t = backend.placeholder() p = backend.placeholder() o = backend.sparse_categorical_crossentropy(t, p) t_val = tf.convert_to_tensor([0, 1, 2]) p_val = tf.convert_to_tensor([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .116, .062], 1e-3) # With axis set with self.assertRaisesRegex( ValueError, 'Cannot compute sparse categorical crossentropy with `axis=0`'): o = backend.sparse_categorical_crossentropy(t, p, axis=0) f = backend.function([t, p], o) _ = f([t_val, p_val]) # from logits p_val = tf.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) o = backend.sparse_categorical_crossentropy(t, p, from_logits=True) f = backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, 0, .17], 1e-3) # from logits and axis set with self.assertRaisesRegex( ValueError, 'Cannot compute sparse categorical crossentropy with `axis=0`'): o = backend.sparse_categorical_crossentropy( t, p, from_logits=True, axis=0) f = backend.function([t, p], o) _ = f([t_val, p_val]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_sparse_categorical_crossentropy_with_softmax(self): t = backend.constant([0, 1, 2]) logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) p = backend.softmax(logits) p = tf.identity(tf.identity(p)) result = self.evaluate(backend.sparse_categorical_crossentropy(t, p)) self.assertArrayNear(result, [0.002, 0.0005, 0.17], 1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_binary_crossentropy_from_logits_no_warnings(self): t = backend.constant([[0, 1, 0]]) logits = backend.constant([[8., 1., 1.]]) with warnings.catch_warnings(record=True) as w: self.evaluate(backend.binary_crossentropy(t, logits, from_logits=True)) self.assertEmpty(w) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_binary_crossentropy_from_logits_with_sigmoid(self): t = backend.constant([[0, 1, 0]]) logits = backend.constant([[8., 1., 1.]]) p = activations.sigmoid(logits) with warnings.catch_warnings(record=True) as w: self.evaluate(backend.binary_crossentropy(t, p, from_logits=True)) self.assertLen(w, 1) self.assertIn('received `from_logits=True`', str(w[0].message)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_categorical_crossentropy_from_logits_with_softmax(self): t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) p = activations.softmax(logits) with warnings.catch_warnings(record=True) as w: self.evaluate(backend.categorical_crossentropy(t, p, from_logits=True)) self.assertLen(w, 1) self.assertIn('received `from_logits=True`', str(w[0].message)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_sparse_categorical_crossentropy_from_logits_with_softmax(self): t = backend.constant([0, 1, 2]) logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) p = activations.softmax(logits) with warnings.catch_warnings(record=True) as w: self.evaluate( backend.sparse_categorical_crossentropy(t, p, from_logits=True)) self.assertLen(w, 1) self.assertIn('received `from_logits=True`', str(w[0].message)) @test_util.with_control_flow_v2 @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class TestCTC(tf.test.TestCase): def test_ctc_decode(self): depth = 6 seq_len_0 = 5 input_prob_matrix_0 = np.asarray( [ [0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908], [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517], [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763], [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655], [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878], # Random entry added in at time=5 [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671] ], dtype=np.float32) # len max_time_steps array of batch_size x depth matrices inputs = ( [input_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0) ] + # Pad to max_time_steps = 8 2 * [np.zeros((1, depth), dtype=np.float32)]) inputs = backend.variable(np.asarray(inputs).transpose((1, 0, 2))) # batch_size length vector of sequence_lengths input_length = backend.variable(np.array([seq_len_0], dtype=np.int32)) # batch_size length vector of negative log probabilities log_prob_truth = np.array( [ -3.5821197, # output beam 0 -3.777835 # output beam 1 ], np.float32)[np.newaxis, :] decode_truth = [ np.array([1, 0, -1, -1, -1, -1, -1]), np.array([0, 1, 0, -1, -1, -1, -1]) ] beam_width = 2 top_paths = 2 decode_pred_tf, log_prob_pred_tf = backend.ctc_decode( inputs, input_length, greedy=False, beam_width=beam_width, top_paths=top_paths) self.assertEqual(len(decode_pred_tf), top_paths) log_prob_pred = backend.eval(log_prob_pred_tf) for i in range(top_paths): self.assertTrue( np.alltrue(decode_truth[i] == backend.eval(decode_pred_tf[i]))) self.assertAllClose(log_prob_truth, log_prob_pred) def test_ctc_batch_cost(self): with self.cached_session(): label_lens = np.expand_dims(np.asarray([5, 4]), 1) input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps loss_log_probs = [3.34211, 5.42262] # dimensions are batch x time x categories labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]]) inputs = np.asarray( [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688], [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533], [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]], [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508], [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549], [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456], [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345], [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]], dtype=np.float32) labels = backend.variable(labels, dtype='int32') inputs = backend.variable(inputs, dtype='float32') input_lens = backend.variable(input_lens, dtype='int32') label_lens = backend.variable(label_lens, dtype='int32') res = backend.eval( backend.ctc_batch_cost(labels, inputs, input_lens, label_lens)) self.assertAllClose(res[:, 0], loss_log_probs, atol=1e-05) # test when batch_size = 1, that is, one sample only ref = [3.34211] input_lens = np.expand_dims(np.asarray([5]), 1) label_lens = np.expand_dims(np.asarray([5]), 1) labels = np.asarray([[0, 1, 2, 1, 0]]) inputs = np.asarray( [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688], [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533], [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]] ], dtype=np.float32) k_labels = backend.variable(labels, dtype='int32') k_inputs = backend.variable(inputs, dtype='float32') k_input_lens = backend.variable(input_lens, dtype='int32') k_label_lens = backend.variable(label_lens, dtype='int32') res = backend.eval( backend.ctc_batch_cost(k_labels, k_inputs, k_input_lens, k_label_lens)) self.assertAllClose(res[:, 0], ref, atol=1e-05) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class TestRandomOps(tf.test.TestCase): def test_random_normal(self): np.random.seed(123) x = backend.random_normal((500, 500)) val = backend.eval(x) self.assertAllClose(np.mean(val), 0., atol=0.01) self.assertAllClose(np.std(val), 1., atol=0.01) def test_random_uniform(self): np.random.seed(123) x = backend.random_uniform((500, 500)) val = backend.eval(x) self.assertAllClose(np.mean(val), 0.5, atol=0.01) self.assertAllClose(np.max(val), 1., atol=0.01) self.assertAllClose(np.min(val), 0., atol=0.01) def test_random_binomial(self): np.random.seed(123) x = backend.random_binomial((500, 500), p=0.5) self.assertAllClose(np.mean(backend.eval(x)), 0.5, atol=0.01) def test_truncated_normal(self): np.random.seed(123) x = backend.truncated_normal((500, 500), mean=0.0, stddev=1.0) x = backend.truncated_normal((1000, 1000), mean=0.0, stddev=1.0) y = backend.eval(x) self.assertAllClose(np.mean(y), 0., atol=0.01) self.assertAllClose(np.std(y), 0.88, atol=0.01) self.assertAllClose(np.max(y), 2., atol=0.01) self.assertAllClose(np.min(y), -2., atol=0.01) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class FunctionTest(tf.test.TestCase): def test_function_basics(self): if tf.executing_eagerly(): self.skipTest('eager backend.function does not support updates') x1 = backend.placeholder(shape=(), dtype='float32') x2 = backend.placeholder(shape=(), dtype='int32') v = backend.variable(10.) y1 = x1 + backend.cast(x2, 'float32') + v y2 = x1 * backend.cast(x2, 'float32') with tf.control_dependencies([y1]): u = backend.update(v, x1) f = backend.function([x1, x2], [y1, y2], updates=[u]) output_values = f([2, 3]) self.assertEqual(output_values, [15., 6.]) self.assertEqual(backend.eval(v), 2.) def test_function_dict_outputs(self): x_ph = backend.placeholder(shape=(), name='x') y_ph = backend.placeholder(shape=(), name='y') outputs = {'x*y': y_ph * x_ph, 'x*x': x_ph * x_ph} f = backend.function(inputs=[x_ph, y_ph], outputs=outputs) x, y = 2., 5. results = f([x, y]) self.assertEqual(results['x*y'], 10.) self.assertEqual(results['x*x'], 4) def test_function_dict_inputs(self): placeholders = { 'x': backend.placeholder(shape=()), 'y': backend.placeholder(shape=()) } outputs = [placeholders['x'] * placeholders['y']] f = backend.function(inputs=placeholders, outputs=outputs) results = f({'x': 2., 'y': 3.}) self.assertEqual(results[0], 6.) def test_function_single_input_output(self): x_ph = backend.placeholder(shape=(), name='x') output = x_ph * x_ph f = backend.function(x_ph, output) result = f(2.) self.assertEqual(result, 4.) def test_tuple_updates(self): if tf.executing_eagerly(): self.skipTest('eager backend.function does not support updates') x_ph = backend.placeholder(ndim=2) v = backend.variable(np.ones((4, 2))) output = x_ph**2 + v new_v = v + x_ph f = backend.function(x_ph, output, updates=[(v, new_v)]) input_val = np.random.random((4, 2)) result = f(input_val) self.assertAllClose(result, input_val**2 + 1) self.assertAllClose(backend.get_value(v), np.ones((4, 2)) + input_val) class BackendGraphTests(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph'])) def test_function_placeholder_with_default(self): with backend.get_graph().as_default(): x1 = tf.compat.v1.placeholder_with_default( np.array(2., dtype='float32'), shape=()) x2 = tf.compat.v1.placeholder_with_default( np.array(3, dtype='int32'), shape=()) y1 = x1 + backend.cast(x2, 'float32') y2 = x1 * backend.cast(x2, 'float32') f = backend.function([x1, x2], [y1, y2]) output_values = f([4, 5]) self.assertEqual(output_values, [9., 20.]) output_values = f([None, None]) self.assertEqual(output_values, [5., 6.]) def test_function_tf_feed_symbols(self): # Test Keras backend functions with TF tensor inputs. with tf.Graph().as_default(), self.cached_session(): # Test feeding a resource variable to `function`. x1 = backend.placeholder(shape=()) x2 = backend.placeholder(shape=()) lr = backend.learning_phase() # Include a placeholder_with_default. y1 = backend.variable(10.) y2 = 3 f = backend.function( inputs=[x1, x2, lr], outputs=[x1 + 1, backend.in_train_phase(x2 + 2, x2 - 1)]) outs = f([y1, y2, None]) # Use default learning_phase value. self.assertEqual(outs, [11., 2.]) outs = f([y1, y2, 1]) # Set learning phase value. self.assertEqual(outs, [11., 5.]) # Test triggering a callable refresh by changing the input. y3 = backend.constant(20.) # Test with tensor outs = f([y3, y2, None]) self.assertEqual(outs, [21., 2.]) y4 = 4 # Test with non-symbol outs = f([y4, y2, None]) self.assertEqual(outs, [5., 2.]) # Test with a different dtype y5 = backend.constant(10., dtype='float64') outs = f([y5, y2, None]) self.assertEqual(outs, [11., 2.]) def test_function_tf_fetches(self): # Additional operations can be passed to tf.compat.v1.Session().run() via # its `fetches` arguments. In contrast to `updates` argument of # backend.function() these do not have control dependency on `outputs` # so they can run in parallel. Also they should not contribute to output of # backend.function(). with tf.Graph().as_default(), self.cached_session(): x = backend.variable(0.) y = backend.variable(0.) x_placeholder = backend.placeholder(shape=()) y_placeholder = backend.placeholder(shape=()) f = backend.function( inputs=[x_placeholder, y_placeholder], outputs=[x_placeholder + y_placeholder], updates=[(x, x_placeholder + 1.)], fetches=[backend.update(y, 5.)]) output = f([10., 20.]) self.assertEqual(output, [30.]) self.assertEqual(backend.get_session().run(fetches=[x, y]), [11., 5.]) def test_function_tf_feed_dict(self): # Additional substitutions can be passed to `tf.compat.v1.Session().run()` # via its `feed_dict` arguments. Note that the feed_dict is passed once in # the constructor but we can modify the values in the dictionary. Through # this feed_dict we can provide additional substitutions besides Keras # inputs. with tf.Graph().as_default(), self.cached_session(): x = backend.variable(0.) y = backend.variable(0.) x_placeholder = backend.placeholder(shape=()) y_placeholder = backend.placeholder(shape=()) feed_dict = {y_placeholder: 3.} fetches = [backend.update(y, y_placeholder * 10.)] f = backend.function( inputs=[x_placeholder], outputs=[x_placeholder + 1.], updates=[(x, x_placeholder + 10.)], feed_dict=feed_dict, fetches=fetches) output = f([10.]) self.assertEqual(output, [11.]) self.assertEqual(backend.get_session().run(fetches=[x, y]), [20., 30.]) # updated value in feed_dict will be modified within the K.function() feed_dict[y_placeholder] = 4. output = f([20.]) self.assertEqual(output, [21.]) self.assertEqual(backend.get_session().run(fetches=[x, y]), [30., 40.]) def test_function_tf_run_options_with_run_metadata(self): with tf.Graph().as_default(), self.cached_session(): x_placeholder = backend.placeholder(shape=()) y_placeholder = backend.placeholder(shape=()) run_options = tf.compat.v1.RunOptions(output_partition_graphs=True) run_metadata = tf.compat.v1.RunMetadata() # enable run_options. f = backend.function( inputs=[x_placeholder, y_placeholder], outputs=[x_placeholder + y_placeholder], options=run_options, run_metadata=run_metadata) output = f([10., 20.]) self.assertEqual(output, [30.]) self.assertNotEmpty(run_metadata.partition_graphs) # disable run_options. f1 = backend.function( inputs=[x_placeholder, y_placeholder], outputs=[x_placeholder + y_placeholder], run_metadata=run_metadata) output1 = f1([10., 20.]) self.assertEqual(output1, [30.]) self.assertEmpty(run_metadata.partition_graphs) def test_function_fetch_callbacks(self): class CallbackStub: def __init__(self): self.times_called = 0 self.callback_result = 0 def _fetch_callback(self, result): self.times_called += 1 self.callback_result = result with tf.Graph().as_default(), self.cached_session(): callback = CallbackStub() x_placeholder = backend.placeholder(shape=()) y_placeholder = backend.placeholder(shape=()) callback_op = x_placeholder * y_placeholder f = backend.function( inputs=[x_placeholder, y_placeholder], outputs=[x_placeholder + y_placeholder]) f.fetches.append(callback_op) f.fetch_callbacks[callback_op] = callback._fetch_callback _ = f([10., 20.]) self.assertEqual(callback.times_called, 1) self.assertEqual(callback.callback_result, 200) def test_get_session_different_graphs(self): with tf.Graph().as_default(): x = backend.constant(1) session = backend.get_session() self.assertIs(session, backend.get_session((x,))) self.assertIs(session, backend.get_session()) with tf.Graph().as_default(): self.assertIs(session, backend.get_session((x,))) self.assertIsNot(session, backend.get_session()) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class ControlOpsTests(tf.test.TestCase): def test_function_switch_basics(self): x = tf.constant(2.0) y = tf.constant(3.0) def xpowy(): return backend.pow(x, y) def ypowx(): return backend.pow(y, x) tensor = backend.switch(backend.less(x, y), xpowy, ypowx) self.assertEqual(backend.eval(tensor), [8.0]) tensor = backend.switch(backend.greater(x, y), xpowy, ypowx) self.assertEqual(backend.eval(tensor), [9.0]) def test_unequal_rank(self): x = tf.convert_to_tensor( np.array([[1, 2, 3], [4, 5, 6]]), dtype='float32') y = tf.convert_to_tensor( np.array([1, 2, 3]), dtype='float32') def true_func(): return x def false_func(): return y with self.assertRaisesRegex(ValueError, 'Rank of `condition` should be less than'): backend.switch(backend.equal(x, x), false_func, true_func) class ContextValueCacheTest(tf.test.TestCase): def test_cache(self): cache = backend.ContextValueCache(list) graph1 = tf.Graph() graph2 = tf.Graph() cache[graph1].append(1) with graph1.as_default(): cache[None].append(2) with graph2.as_default(): cache[None].append(3) cache[graph2].append(4) self.assertAllEqual(cache[graph1], [1, 2]) self.assertAllEqual(cache[graph2], [3, 4]) with tf.__internal__.eager_context.eager_mode(): cache[None].append(5) cache[None].append(6) self.assertAllEqual(cache[None], [5, 6]) self.assertLen(cache, 3) del graph1 gc.collect() self.assertLen(cache, 2) def test_cache_in_parent_graph(self): cache = backend.ContextValueCache(int) cache.setdefault(None, backend.constant(5)) with tf.Graph().as_default() as g: # g is not a child graph of the default test context, so the recursive # lookup will create a new default value. self.assertAllEqual(cache[g], 0) @tf.function def fn(): # The function graph is a child of the default test context, so # __getitem__ will return the previously saved value. return cache[tf.compat.v1.get_default_graph()] self.assertEqual(self.evaluate(fn()), 5) if __name__ == '__main__': tf.test.main()
82,976
35.553744
84
py
keras
keras-master/keras/optimizer_v1.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name # pylint: disable=g-classes-have-attributes """Legacy v1 optimizer classes. For more examples see the base class `tf.compat.v1.keras.optimizers.Optimizer`. """ import tensorflow.compat.v2 as tf from keras import backend class Optimizer: """Abstract optimizer base class. Note: this is the parent class of all optimizers, not an actual optimizer that can be used for training models. All Keras optimizers support the following keyword arguments: clipnorm: float >= 0. Gradients will be clipped when their L2 norm exceeds this value. clipvalue: float >= 0. Gradients will be clipped when their absolute value exceeds this value. """ def __init__(self, **kwargs): allowed_kwargs = {'clipnorm', 'clipvalue'} for k in kwargs: if k not in allowed_kwargs: raise TypeError('Unexpected keyword argument ' 'passed to optimizer: ' + str(k)) # checks that clipnorm >= 0 and clipvalue >= 0 if kwargs[k] < 0: raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k])) self.__dict__.update(kwargs) self.updates = [] self.weights = [] # Set this to False, indicating `apply_gradients` does not take the # `experimental_aggregate_gradients` argument. _HAS_AGGREGATE_GRAD = False def _create_all_weights(self, params): """Creates and sets all optimizer weights. Args: params: list or tuple of `Variable` objects that will be minimized using this optimizer. Returns: Specific weight values that are used in `get_updates` """ raise NotImplementedError def get_updates(self, loss, params): raise NotImplementedError def get_gradients(self, loss, params): """Returns gradients of `loss` with respect to `params`. Args: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented). """ grads = backend.gradients(loss, params) if any(g is None for g in grads): raise ValueError('An operation has `None` for gradient. ' 'Please make sure that all of your ops have a ' 'gradient defined (i.e. are differentiable). ' 'Common ops without gradient: ' 'backend.argmax, backend.round, backend.eval.') if hasattr(self, 'clipnorm'): grads = [tf.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, 'clipvalue'): grads = [ tf.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] return grads def set_weights(self, weights): """Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). Raises: ValueError: in case of incompatible weight shapes. """ params = self.weights if len(params) != len(weights): raise ValueError('Length of the specified weight list (' + str(len(weights)) + ') does not match the number of weights ' 'of the optimizer (' + str(len(params)) + ')') weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError('Optimizer weight shape ' + str(pv.shape) + ' not compatible with ' 'provided weight shape ' + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def get_weights(self): """Returns the current value of the weights of the optimizer. Returns: A list of numpy arrays. """ return backend.batch_get_value(self.weights) def get_config(self): config = {} if hasattr(self, 'clipnorm'): config['clipnorm'] = self.clipnorm if hasattr(self, 'clipvalue'): config['clipvalue'] = self.clipvalue return config @classmethod def from_config(cls, config): return cls(**config) class SGD(Optimizer): """Stochastic gradient descent optimizer. Includes support for momentum, learning rate decay, and Nesterov momentum. Args: lr: float >= 0. Learning rate. momentum: float >= 0. Parameter that accelerates SGD in the relevant direction and dampens oscillations. decay: float >= 0. Learning rate decay over each update. nesterov: boolean. Whether to apply Nesterov momentum. """ def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs): super(SGD, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.iterations = backend.variable(0, dtype='int64', name='iterations') self.lr = backend.variable(lr, name='lr') self.momentum = backend.variable(momentum, name='momentum') self.decay = backend.variable(decay, name='decay') self.initial_decay = decay self.nesterov = nesterov def _create_all_weights(self, params): shapes = [backend.int_shape(p) for p in params] moments = [backend.zeros(shape) for shape in shapes] self.weights = [self.iterations] + moments return moments def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [tf.compat.v1.assign_add(self.iterations, 1)] lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * tf.cast(self.iterations, backend.dtype(self.decay)))) # momentum moments = self._create_all_weights(params) for p, g, m in zip(params, grads, moments): v = self.momentum * m - lr * g # velocity self.updates.append(tf.compat.v1.assign(m, v)) if self.nesterov: new_p = p + self.momentum * v - lr * g else: new_p = p + v # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'momentum': float(backend.get_value(self.momentum)), 'decay': float(backend.get_value(self.decay)), 'nesterov': self.nesterov } base_config = super(SGD, self).get_config() return dict(list(base_config.items()) + list(config.items())) class RMSprop(Optimizer): """RMSProp optimizer. It is recommended to leave the parameters of this optimizer at their default values (except the learning rate, which can be freely tuned). Args: lr: float >= 0. Learning rate. rho: float >= 0. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `backend.epsilon()`. decay: float >= 0. Learning rate decay over each update. """ def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs): super(RMSprop, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.lr = backend.variable(lr, name='lr') self.rho = backend.variable(rho, name='rho') self.decay = backend.variable(decay, name='decay') self.iterations = backend.variable(0, dtype='int64', name='iterations') if epsilon is None: epsilon = backend.epsilon() self.epsilon = epsilon self.initial_decay = decay def _create_all_weights(self, params): accumulators = [ backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params] self.weights = accumulators return accumulators def get_updates(self, loss, params): grads = self.get_gradients(loss, params) accumulators = self._create_all_weights(params) self.updates = [tf.compat.v1.assign_add(self.iterations, 1)] lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * tf.cast(self.iterations, backend.dtype(self.decay)))) for p, g, a in zip(params, grads, accumulators): # update accumulator new_a = self.rho * a + (1. - self.rho) * tf.square(g) self.updates.append(tf.compat.v1.assign(a, new_a)) new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon) # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'rho': float(backend.get_value(self.rho)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon } base_config = super(RMSprop, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adagrad(Optimizer): """Adagrad optimizer. Adagrad is an optimizer with parameter-specific learning rates, which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives, the smaller the updates. It is recommended to leave the parameters of this optimizer at their default values. # Arguments lr: float >= 0. Initial learning rate. epsilon: float >= 0. If `None`, defaults to `backend.epsilon()`. decay: float >= 0. Learning rate decay over each update. # References - [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) """ def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs): super(Adagrad, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.lr = backend.variable(lr, name='lr') self.decay = backend.variable(decay, name='decay') self.iterations = backend.variable(0, dtype='int64', name='iterations') if epsilon is None: epsilon = backend.epsilon() self.epsilon = epsilon self.initial_decay = decay def _create_all_weights(self, params): shapes = [backend.int_shape(p) for p in params] accumulators = [backend.zeros(shape) for shape in shapes] self.weights = accumulators return accumulators def get_updates(self, loss, params): grads = self.get_gradients(loss, params) accumulators = self._create_all_weights(params) self.updates = [tf.compat.v1.assign_add(self.iterations, 1)] lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * tf.cast(self.iterations, backend.dtype(self.decay)))) for p, g, a in zip(params, grads, accumulators): new_a = a + tf.square(g) # update accumulator self.updates.append(tf.compat.v1.assign(a, new_a)) new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon) # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon } base_config = super(Adagrad, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adadelta(Optimizer): """Adadelta optimizer. Adadelta is a more robust extension of Adagrad that adapts learning rates based on a moving window of gradient updates, instead of accumulating all past gradients. This way, Adadelta continues learning even when many updates have been done. Compared to Adagrad, in the original version of Adadelta you don't have to set an initial learning rate. In this version, initial learning rate and decay factor can be set, as in most other Keras optimizers. It is recommended to leave the parameters of this optimizer at their default values. Arguments: lr: float >= 0. Initial learning rate, defaults to 1. It is recommended to leave it at the default value. rho: float >= 0. Adadelta decay factor, corresponding to fraction of gradient to keep at each time step. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `backend.epsilon()`. decay: float >= 0. Initial learning rate decay. References: - [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701) """ def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs): super(Adadelta, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.lr = backend.variable(lr, name='lr') self.decay = backend.variable(decay, name='decay') self.iterations = backend.variable(0, dtype='int64', name='iterations') if epsilon is None: epsilon = backend.epsilon() self.rho = rho self.epsilon = epsilon self.initial_decay = decay def _create_all_weights(self, params): shapes = [backend.int_shape(p) for p in params] accumulators = [backend.zeros(shape) for shape in shapes] delta_accumulators = [backend.zeros(shape) for shape in shapes] self.weights = accumulators + delta_accumulators return accumulators, delta_accumulators def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [tf.compat.v1.assign_add(self.iterations, 1)] accumulators, delta_accumulators = self._create_all_weights(params) lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * tf.cast(self.iterations, backend.dtype(self.decay)))) for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators): # update accumulator new_a = self.rho * a + (1. - self.rho) * tf.square(g) self.updates.append(tf.compat.v1.assign(a, new_a)) # use the new accumulator and the *old* delta_accumulator update = g * backend.sqrt(d_a + self.epsilon) / backend.sqrt( new_a + self.epsilon) new_p = p - lr * update # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) # update delta_accumulator new_d_a = self.rho * d_a + (1 - self.rho) * tf.square(update) self.updates.append(tf.compat.v1.assign(d_a, new_d_a)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'rho': self.rho, 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon } base_config = super(Adadelta, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adam(Optimizer): """Adam optimizer. Default parameters follow those provided in the original paper. Args: lr: float >= 0. Learning rate. beta_1: float, 0 < beta < 1. Generally close to 1. beta_2: float, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `backend.epsilon()`. decay: float >= 0. Learning rate decay over each update. amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm from the paper "On the Convergence of Adam and Beyond". """ def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., amsgrad=False, **kwargs): super(Adam, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.iterations = backend.variable(0, dtype='int64', name='iterations') self.lr = backend.variable(lr, name='lr') self.beta_1 = backend.variable(beta_1, name='beta_1') self.beta_2 = backend.variable(beta_2, name='beta_2') self.decay = backend.variable(decay, name='decay') if epsilon is None: epsilon = backend.epsilon() self.epsilon = epsilon self.initial_decay = decay self.amsgrad = amsgrad def _create_all_weights(self, params): ms = [ backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params] vs = [ backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params] if self.amsgrad: vhats = [ backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params] else: vhats = [backend.zeros(1) for _ in params] self.weights = [self.iterations] + ms + vs + vhats return ms, vs, vhats def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [] lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * tf.cast(self.iterations, backend.dtype(self.decay)))) with tf.control_dependencies([tf.compat.v1.assign_add(self.iterations, 1)]): t = tf.cast(self.iterations, backend.floatx()) lr_t = lr * ( backend.sqrt(1. - tf.pow(self.beta_2, t)) / (1. - tf.pow(self.beta_1, t))) ms, vs, vhats = self._create_all_weights(params) for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * tf.square(g) if self.amsgrad: vhat_t = tf.maximum(vhat, v_t) p_t = p - lr_t * m_t / (backend.sqrt(vhat_t) + self.epsilon) self.updates.append(tf.compat.v1.assign(vhat, vhat_t)) else: p_t = p - lr_t * m_t / (backend.sqrt(v_t) + self.epsilon) self.updates.append(tf.compat.v1.assign(m, m_t)) self.updates.append(tf.compat.v1.assign(v, v_t)) new_p = p_t # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'beta_1': float(backend.get_value(self.beta_1)), 'beta_2': float(backend.get_value(self.beta_2)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad } base_config = super(Adam, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Adamax(Optimizer): """Adamax optimizer from Adam paper's Section 7. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Args: lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `backend.epsilon()`. decay: float >= 0. Learning rate decay over each update. """ def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., **kwargs): super(Adamax, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.iterations = backend.variable(0, dtype='int64', name='iterations') self.lr = backend.variable(lr, name='lr') self.beta_1 = backend.variable(beta_1, name='beta_1') self.beta_2 = backend.variable(beta_2, name='beta_2') self.decay = backend.variable(decay, name='decay') if epsilon is None: epsilon = backend.epsilon() self.epsilon = epsilon self.initial_decay = decay def _create_all_weights(self, params): shapes = [backend.int_shape(p) for p in params] # zero init of 1st moment ms = [backend.zeros(shape) for shape in shapes] # zero init of exponentially weighted infinity norm us = [backend.zeros(shape) for shape in shapes] self.weights = [self.iterations] + ms + us return ms, us def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [] lr = self.lr if self.initial_decay > 0: lr = lr * ( # pylint: disable=g-no-augmented-assignment 1. / (1. + self.decay * tf.cast(self.iterations, backend.dtype(self.decay)))) with tf.control_dependencies([tf.compat.v1.assign_add(self.iterations, 1)]): t = tf.cast(self.iterations, backend.floatx()) lr_t = lr / (1. - tf.pow(self.beta_1, t)) ms, us = self._create_all_weights(params) for p, g, m, u in zip(params, grads, ms, us): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g u_t = tf.maximum(self.beta_2 * u, tf.abs(g)) p_t = p - lr_t * m_t / (u_t + self.epsilon) self.updates.append(tf.compat.v1.assign(m, m_t)) self.updates.append(tf.compat.v1.assign(u, u_t)) new_p = p_t # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'beta_1': float(backend.get_value(self.beta_1)), 'beta_2': float(backend.get_value(self.beta_2)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon } base_config = super(Adamax, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Nadam(Optimizer): """Nesterov Adam optimizer. Much like Adam is essentially RMSprop with momentum, Nadam is Adam RMSprop with Nesterov momentum. Default parameters follow those provided in the paper. It is recommended to leave the parameters of this optimizer at their default values. Args: lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `backend.epsilon()`. """ def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004, **kwargs): super(Nadam, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.iterations = backend.variable(0, dtype='int64', name='iterations') self.m_schedule = backend.variable(1., name='m_schedule') self.lr = backend.variable(lr, name='lr') self.beta_1 = backend.variable(beta_1, name='beta_1') self.beta_2 = backend.variable(beta_2, name='beta_2') if epsilon is None: epsilon = backend.epsilon() self.epsilon = epsilon self.schedule_decay = schedule_decay def _create_all_weights(self, params): shapes = [backend.int_shape(p) for p in params] ms = [backend.zeros(shape) for shape in shapes] vs = [backend.zeros(shape) for shape in shapes] self.weights = [self.iterations, self.m_schedule] + ms + vs return ms, vs def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [] with tf.control_dependencies([tf.compat.v1.assign_add(self.iterations, 1)]): t = tf.cast(self.iterations, backend.floatx()) # Due to the recommendations in [2], i.e. warming momentum schedule momentum_cache_t = self.beta_1 * ( 1. - 0.5 * (tf.pow(backend.cast_to_floatx(0.96), t * self.schedule_decay))) momentum_cache_t_1 = self.beta_1 * ( 1. - 0.5 * (tf.pow(backend.cast_to_floatx(0.96), (t + 1) * self.schedule_decay))) m_schedule_new = self.m_schedule * momentum_cache_t m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1 self.updates.append((self.m_schedule, m_schedule_new)) ms, vs = self._create_all_weights(params) for p, g, m, v in zip(params, grads, ms, vs): # the following equations given in [1] g_prime = g / (1. - m_schedule_new) m_t = self.beta_1 * m + (1. - self.beta_1) * g m_t_prime = m_t / (1. - m_schedule_next) v_t = self.beta_2 * v + (1. - self.beta_2) * tf.square(g) v_t_prime = v_t / (1. - tf.pow(self.beta_2, t)) m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime self.updates.append(tf.compat.v1.assign(m, m_t)) self.updates.append(tf.compat.v1.assign(v, v_t)) p_t = p - self.lr * m_t_bar / (backend.sqrt(v_t_prime) + self.epsilon) new_p = p_t # Apply constraints. if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(tf.compat.v1.assign(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(backend.get_value(self.lr)), 'beta_1': float(backend.get_value(self.beta_1)), 'beta_2': float(backend.get_value(self.beta_2)), 'epsilon': self.epsilon, 'schedule_decay': self.schedule_decay } base_config = super(Nadam, self).get_config() return dict(list(base_config.items()) + list(config.items())) class TFOptimizer(Optimizer, tf.__internal__.tracking.Trackable): """Wrapper class for native TensorFlow optimizers.""" def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called self.optimizer = optimizer self._track_trackable(optimizer, name='optimizer') if iterations is None: with backend.name_scope(self.__class__.__name__): self.iterations = backend.variable(0, dtype='int64', name='iterations') else: self.iterations = iterations self._track_trackable(self.iterations, name='global_step') def _clip_gradients(self, grads): """Clip gradients according to the clipnorm and clipvalue attributes.""" # TFOptimizer wrapper has no gradient clipping options. return grads def minimize(self, loss, var_list, grad_loss=None, tape=None): """Mimics the `OptimizerV2.minimize` API.""" if not callable(loss) and tape is None: raise ValueError('`tape` is required when a `Tensor` loss is passed.') tape = tape if tape is not None else tf.GradientTape() if callable(loss): with tape: if not callable(var_list): tape.watch(var_list) loss = loss() if callable(var_list): var_list = var_list() var_list = tf.nest.flatten(var_list) if var_list: grads = tape.gradient(loss, var_list, grad_loss) grads_and_vars = list(zip(grads, var_list)) self.apply_gradients(grads_and_vars) def apply_gradients(self, grads_and_vars): self.optimizer.apply_gradients(grads_and_vars, global_step=self.iterations) def get_grads(self, loss, params): return self.optimizer.compute_gradients(loss, params) def get_updates(self, loss, params): if tf.distribute.has_strategy(): self.updates = [] if not params: # After the model vars have been created, the second call to get_updates # is called with params as an empty list. This ensures that we call # compute_gradients with params=None. grads = self.optimizer.compute_gradients(loss) else: grads = self.optimizer.compute_gradients(loss, params) global_step = tf.compat.v1.train.get_global_step() opt_update = self.optimizer.apply_gradients(grads, global_step) else: if not params: self.updates = [tf.compat.v1.assign_add(self.iterations, 1)] return self.updates # Updates list starts out empty because the iterations variable is # incremented in optimizer.apply_gradients() self.updates = [] grads = self.optimizer.compute_gradients(loss, params) opt_update = self.optimizer.apply_gradients( grads, global_step=self.iterations) self.updates.append(opt_update) return self.updates @property def weights(self): raise NotImplementedError def get_config(self): raise NotImplementedError def from_config(self, config): raise NotImplementedError # Aliases. sgd = SGD rmsprop = RMSprop adagrad = Adagrad adadelta = Adadelta adam = Adam adamax = Adamax nadam = Nadam
29,617
33.926887
90
py
keras
keras-master/keras/keras_parameterized_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras testing_utils.""" import tensorflow.compat.v2 as tf import unittest from absl.testing import parameterized import keras from keras import keras_parameterized from keras import testing_utils class KerasParameterizedTest(keras_parameterized.TestCase): def test_run_with_all_model_types(self): model_types = [] models = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_with_all_model_types def testBody(self): model_types.append(testing_utils.get_model_type()) models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) e = ExampleTest() e.testBody_functional() e.testBody_subclass() e.testBody_sequential() self.assertLen(model_types, 3) self.assertAllEqual(model_types, [ "functional", "subclass", "sequential" ]) # Validate that the models are what they should be self.assertTrue(models[0]._is_graph_network) self.assertFalse(models[1]._is_graph_network) self.assertNotIsInstance(models[0], keras.models.Sequential) self.assertNotIsInstance(models[1], keras.models.Sequential) self.assertIsInstance(models[2], keras.models.Sequential) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(model_types, 6) def test_run_with_all_model_types_and_extra_params(self): model_types = [] models = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_with_all_model_types @parameterized.named_parameters( [dict(testcase_name="_0", with_brackets=True), dict(testcase_name="_1", with_brackets=False)]) def testBody(self, with_brackets): with_brackets = "with_brackets" if with_brackets else "without_brackets" model_types.append((with_brackets, testing_utils.get_model_type())) models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) e = ExampleTest() e.testBody_0_functional() e.testBody_0_subclass() e.testBody_0_sequential() e.testBody_1_functional() e.testBody_1_subclass() e.testBody_1_sequential() self.assertLen(model_types, 6) self.assertAllEqual(model_types, [ ("with_brackets", "functional"), ("with_brackets", "subclass"), ("with_brackets", "sequential"), ("without_brackets", "functional"), ("without_brackets", "subclass"), ("without_brackets", "sequential"), ]) # Validate that the models are what they should be self.assertTrue(models[0]._is_graph_network) self.assertFalse(models[1]._is_graph_network) self.assertNotIsInstance(models[0], keras.models.Sequential) self.assertNotIsInstance(models[1], keras.models.Sequential) self.assertIsInstance(models[2], keras.models.Sequential) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(model_types, 12) def test_run_with_all_model_types_exclude_one(self): model_types = [] models = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_with_all_model_types(exclude_models="sequential") def testBody(self): model_types.append(testing_utils.get_model_type()) models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) e = ExampleTest() if hasattr(e, "testBody_functional"): e.testBody_functional() if hasattr(e, "testBody_subclass"): e.testBody_subclass() if hasattr(e, "testBody_sequential"): e.testBody_sequential() self.assertLen(model_types, 2) self.assertAllEqual(model_types, [ "functional", "subclass" ]) # Validate that the models are what they should be self.assertTrue(models[0]._is_graph_network) self.assertFalse(models[1]._is_graph_network) self.assertNotIsInstance(models[0], keras.models.Sequential) self.assertNotIsInstance(models[1], keras.models.Sequential) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(model_types, 4) def test_run_with_all_model_types_exclude_multiple(self): model_types = [] models = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_with_all_model_types( exclude_models=["sequential", "functional"]) def testBody(self): model_types.append(testing_utils.get_model_type()) models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) e = ExampleTest() if hasattr(e, "testBody_functional"): e.testBody_functional() if hasattr(e, "testBody_subclass"): e.testBody_subclass() if hasattr(e, "testBody_sequential"): e.testBody_sequential() self.assertLen(model_types, 1) self.assertAllEqual(model_types, [ "subclass" ]) # Validate that the models are what they should be self.assertFalse(models[0]._is_graph_network) self.assertNotIsInstance(models[0], keras.models.Sequential) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(model_types, 2) def test_run_all_keras_modes(self): l = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_all_keras_modes() def testBody(self): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() l.append((mode, should_run_eagerly)) e = ExampleTest() if not tf.__internal__.tf2.enabled(): e.testBody_v1_session() e.testBody_v2_eager() e.testBody_v2_function() if not tf.__internal__.tf2.enabled(): self.assertLen(l, 3) self.assertAllEqual(l, [ ("graph", False), ("eager", True), ("eager", False), ]) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, 6) else: self.assertLen(l, 2) self.assertAllEqual(l, [ ("eager", True), ("eager", False), ]) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, 4) def test_run_all_keras_modes_extra_params(self): l = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_all_keras_modes() @parameterized.named_parameters( [dict(testcase_name="_0", with_brackets=True), dict(testcase_name="_1", with_brackets=False)]) def testBody(self, with_brackets): mode = "eager" if tf.executing_eagerly() else "graph" with_brackets = "with_brackets" if with_brackets else "without_brackets" should_run_eagerly = testing_utils.should_run_eagerly() l.append((with_brackets, mode, should_run_eagerly)) e = ExampleTest() if not tf.__internal__.tf2.enabled(): e.testBody_0_v1_session() e.testBody_1_v1_session() e.testBody_0_v2_eager() e.testBody_0_v2_function() e.testBody_1_v2_eager() e.testBody_1_v2_function() expected_combinations = { ("with_brackets", "eager", True), ("with_brackets", "eager", False), ("without_brackets", "eager", True), ("without_brackets", "eager", False), } if not tf.__internal__.tf2.enabled(): expected_combinations = expected_combinations.union({ ("with_brackets", "graph", False), ("without_brackets", "graph", False), }) self.assertLen(l, len(expected_combinations)) self.assertEqual(set(l), expected_combinations) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, len(expected_combinations) * 2) def test_run_all_keras_modes_always_skip_v1(self): l = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def testBody(self): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() l.append((mode, should_run_eagerly)) e = ExampleTest() if hasattr(e, "testBody_v1_session"): e.testBody_v1_session() if hasattr(e, "testBody_v2_eager"): e.testBody_v2_eager() if hasattr(e, "testBody_v2_function"): e.testBody_v2_function() self.assertLen(l, 2) self.assertEqual( set(l), { ("eager", True), ("eager", False), }) def test_run_all_keras_modes_with_all_model_types(self): l = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def testBody(self): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() l.append((mode, should_run_eagerly, testing_utils.get_model_type())) e = ExampleTest() e.testBody_v2_eager_functional() e.testBody_v2_function_functional() e.testBody_v2_eager_sequential() e.testBody_v2_function_sequential() e.testBody_v2_eager_subclass() e.testBody_v2_function_subclass() if not tf.__internal__.tf2.enabled(): e.testBody_v1_session_functional() e.testBody_v1_session_sequential() e.testBody_v1_session_subclass() expected_combinations = { ("eager", True, "functional"), ("eager", False, "functional"), ("eager", True, "sequential"), ("eager", False, "sequential"), ("eager", True, "subclass"), ("eager", False, "subclass"), } if not tf.__internal__.tf2.enabled(): expected_combinations = expected_combinations.union({ ("graph", False, "functional"), ("graph", False, "sequential"), ("graph", False, "subclass"), }) self.assertLen(l, len(expected_combinations)) self.assertEqual(set(l), expected_combinations) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, len(expected_combinations) * 2) def test_run_all_model_types_with_all_keras_modes(self): l = [] class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def testBody(self): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() l.append((mode, should_run_eagerly, testing_utils.get_model_type())) e = ExampleTest() e.testBody_functional_v2_eager() e.testBody_functional_v2_function() e.testBody_sequential_v2_eager() e.testBody_sequential_v2_function() e.testBody_subclass_v2_eager() e.testBody_subclass_v2_function() if not tf.__internal__.tf2.enabled(): e.testBody_functional_v1_session() e.testBody_sequential_v1_session() e.testBody_subclass_v1_session() expected_combinations = { ("eager", True, "functional"), ("eager", False, "functional"), ("eager", True, "sequential"), ("eager", False, "sequential"), ("eager", True, "subclass"), ("eager", False, "subclass"), } if not tf.__internal__.tf2.enabled(): expected_combinations = expected_combinations.union({ ("graph", False, "functional"), ("graph", False, "sequential"), ("graph", False, "subclass"), }) self.assertLen(l, len(expected_combinations)) self.assertEqual(set(l), expected_combinations) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, len(expected_combinations) * 2) def test_run_all_keras_modes_with_all_model_types_annotate_class(self): l = [] @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @parameterized.named_parameters(dict(testcase_name="_arg", arg=True)) def testBody(self, arg): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() l.append((mode, should_run_eagerly, testing_utils.get_model_type())) e = ExampleTest() e.testBody_arg_v2_eager_functional() e.testBody_arg_v2_function_functional() e.testBody_arg_v2_eager_sequential() e.testBody_arg_v2_function_sequential() e.testBody_arg_v2_eager_subclass() e.testBody_arg_v2_function_subclass() if not tf.__internal__.tf2.enabled(): e.testBody_arg_v1_session_functional() e.testBody_arg_v1_session_sequential() e.testBody_arg_v1_session_subclass() expected_combinations = { ("eager", True, "functional"), ("eager", False, "functional"), ("eager", True, "sequential"), ("eager", False, "sequential"), ("eager", True, "subclass"), ("eager", False, "subclass"), } if not tf.__internal__.tf2.enabled(): expected_combinations = expected_combinations.union({ ("graph", False, "functional"), ("graph", False, "sequential"), ("graph", False, "subclass"), }) self.assertLen(l, len(expected_combinations)) self.assertEqual(set(l), expected_combinations) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, len(expected_combinations) * 2) def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self): l = [] @keras_parameterized.run_with_all_model_types class ExampleTest(keras_parameterized.TestCase): def runTest(self): pass @keras_parameterized.run_all_keras_modes @parameterized.named_parameters(dict(testcase_name="_arg", arg=True)) def testBody(self, arg): mode = "eager" if tf.executing_eagerly() else "graph" should_run_eagerly = testing_utils.should_run_eagerly() l.append((mode, should_run_eagerly, testing_utils.get_model_type())) e = ExampleTest() e.testBody_arg_v2_eager_functional() e.testBody_arg_v2_function_functional() e.testBody_arg_v2_eager_sequential() e.testBody_arg_v2_function_sequential() e.testBody_arg_v2_eager_subclass() e.testBody_arg_v2_function_subclass() if not tf.__internal__.tf2.enabled(): e.testBody_arg_v1_session_functional() e.testBody_arg_v1_session_sequential() e.testBody_arg_v1_session_subclass() expected_combinations = { ("eager", True, "functional"), ("eager", False, "functional"), ("eager", True, "sequential"), ("eager", False, "sequential"), ("eager", True, "subclass"), ("eager", False, "subclass"), } if not tf.__internal__.tf2.enabled(): expected_combinations = expected_combinations.union({ ("graph", False, "functional"), ("graph", False, "sequential"), ("graph", False, "subclass"), }) self.assertLen(l, len(expected_combinations)) self.assertEqual(set(l), expected_combinations) ts = unittest.makeSuite(ExampleTest) res = unittest.TestResult() ts.run(res) self.assertLen(l, len(expected_combinations) * 2) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters(dict(testcase_name="argument", arg=True)) def test_run_all_keras_modes_extra_params_2(self, arg): self.assertEqual(arg, True) @keras_parameterized.run_with_all_model_types @parameterized.named_parameters(dict(testcase_name="argument", arg=True)) def test_run_with_all_model_types_extra_params_2(self, arg): self.assertEqual(arg, True) if __name__ == "__main__": tf.test.main()
17,056
30.069217
80
py
keras
keras-master/keras/models_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `models.py` (model cloning, mainly).""" import tensorflow.compat.v2 as tf import functools import os from absl.testing import parameterized import numpy as np import keras from keras import backend from keras import keras_parameterized from keras import metrics from keras import models from keras import optimizer_v1 from keras import testing_utils class TestModel(keras.Model): """A model subclass.""" def __init__(self, n_outputs=4, trainable=True): """A test class with one dense layer and number of outputs as a variable.""" super(TestModel, self).__init__() self.layer1 = keras.layers.Dense(n_outputs) self.n_outputs = tf.Variable(n_outputs, trainable=trainable) def call(self, x): return self.layer1(x) def _get_layers(input_shape=(4,), add_input_layer=False): if add_input_layer: model_layers = [keras.layers.InputLayer(input_shape=input_shape), keras.layers.Dense(4)] elif input_shape: model_layers = [keras.layers.Dense(4, input_shape=input_shape)] else: model_layers = [keras.layers.Dense(4)] model_layers += [ keras.layers.BatchNormalization(), keras.layers.Dropout(0.5), keras.layers.Dense(4)] return model_layers def _get_model(input_shape=(4,)): model_layers = _get_layers(input_shape=None, add_input_layer=False) return testing_utils.get_model_from_layers( model_layers, input_shape=input_shape) class TestModelCloning(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ {'testcase_name': 'has_input_layer', 'input_shape': (4,), 'add_input_layer': True, 'share_weights': False}, {'testcase_name': 'no_input_layer', 'input_shape': None, 'add_input_layer': False, 'share_weights': False}, {'testcase_name': 'has_input_layer_share_weights', 'input_shape': (4,), 'add_input_layer': True, 'share_weights': True}, {'testcase_name': 'no_input_layer_share_weights', 'input_shape': None, 'add_input_layer': False, 'share_weights': True}, ]) def test_clone_sequential_model( self, input_shape, add_input_layer, share_weights): if share_weights: clone_fn = functools.partial( keras.models._clone_sequential_model, layer_fn=models.share_weights) else: clone_fn = keras.models.clone_model val_a = np.random.random((10, 4)) model = models.Sequential(_get_layers(input_shape, add_input_layer)) # Sanity check self.assertEqual( isinstance( list(model._flatten_layers(include_self=False, recursive=False))[0], keras.layers.InputLayer), add_input_layer) self.assertEqual(model._is_graph_network, add_input_layer) # With placeholder creation -- clone model should have an InputLayer # if the original model has one. new_model = clone_fn(model) self.assertEqual( isinstance( list( new_model._flatten_layers(include_self=False, recursive=False))[0], keras.layers.InputLayer), add_input_layer) self.assertEqual(new_model._is_graph_network, model._is_graph_network) if input_shape and not tf.compat.v1.executing_eagerly_outside_functions(): # update ops from batch norm needs to be included self.assertGreaterEqual(len(new_model.updates), 2) # On top of new tensor -- clone model should always have an InputLayer. input_a = keras.Input(shape=(4,), name='a') new_model = clone_fn(model, input_tensors=input_a) self.assertIsInstance( list(new_model._flatten_layers(include_self=False, recursive=False))[0], keras.layers.InputLayer) # The new models inputs should have the properties of the new input tensor self.assertEqual(new_model.input_names[0], input_a.name) self.assertEqual(new_model.inputs[0].shape, input_a.shape) self.assertTrue(new_model._is_graph_network) # On top of new, non-Keras tensor -- clone model should always have an # InputLayer. if not tf.executing_eagerly(): # TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error # saying they should not be used with EagerTensors input_a = keras.backend.variable(val_a) new_model = clone_fn(model, input_tensors=input_a) self.assertIsInstance( list(new_model._flatten_layers(include_self=False, recursive=False))[0], keras.layers.InputLayer) self.assertTrue(new_model._is_graph_network) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ {'testcase_name': 'clone_weights', 'share_weights': False}, {'testcase_name': 'share_weights', 'share_weights': True}, ]) def test_clone_functional_model(self, share_weights): if share_weights: clone_fn = functools.partial( keras.models._clone_functional_model, layer_fn=models.share_weights) else: clone_fn = keras.models.clone_model val_a = np.random.random((10, 4)) val_b = np.random.random((10, 4)) val_out = np.random.random((10, 4)) input_a = keras.Input(shape=(4,)) input_b = keras.Input(shape=(4,)) dense_1 = keras.layers.Dense(4,) dense_2 = keras.layers.Dense(4,) x_a = dense_1(input_a) x_a = keras.layers.Dropout(0.5)(x_a) x_a = keras.layers.BatchNormalization()(x_a) x_b = dense_1(input_b) x_a = dense_2(x_a) outputs = keras.layers.add([x_a, x_b]) model = keras.models.Model([input_a, input_b], outputs) # With placeholder creation new_model = clone_fn(model) if not tf.compat.v1.executing_eagerly_outside_functions(): self.assertGreaterEqual(len(new_model.updates), 2) new_model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', run_eagerly=testing_utils.should_run_eagerly()) new_model.train_on_batch([val_a, val_b], val_out) # On top of new tensors input_a = keras.Input(shape=(4,), name='a') input_b = keras.Input(shape=(4,), name='b') new_input_tensors = [input_a, input_b] new_model = keras.models.clone_model(model, input_tensors=new_input_tensors) if not tf.compat.v1.executing_eagerly_outside_functions(): self.assertLen(new_model.updates, 2) new_model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', run_eagerly=testing_utils.should_run_eagerly()) new_model.train_on_batch([val_a, val_b], val_out) # New model should use provided input tensors self.assertListEqual(new_model.inputs, new_input_tensors) # On top of new, non-Keras tensors if not tf.executing_eagerly(): # TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error # saying they should not be used with EagerTensors input_a = keras.backend.variable(val_a) input_b = keras.backend.variable(val_b) new_model = clone_fn(model, input_tensors=[input_a, input_b]) self.assertGreaterEqual(len(new_model.updates), 2) new_model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', run_eagerly=testing_utils.should_run_eagerly()) new_model.train_on_batch(None, val_out) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ {'testcase_name': 'clone_weights', 'share_weights': False}, {'testcase_name': 'share_weights', 'share_weights': True}, ]) def test_clone_functional_with_masking(self, share_weights): if share_weights: clone_fn = functools.partial( keras.models._clone_functional_model, layer_fn=models.share_weights) else: clone_fn = keras.models.clone_model x = np.array([[[1.], [1.]], [[0.], [0.]]]) inputs = keras.Input((2, 1)) outputs = keras.layers.Masking(mask_value=0)(inputs) outputs = keras.layers.TimeDistributed( keras.layers.Dense(1, kernel_initializer='one'))(outputs) model = keras.Model(inputs, outputs) model = clone_fn(model) model.compile( loss='mse', optimizer=testing_utils.get_v2_optimizer('adam'), run_eagerly=testing_utils.should_run_eagerly()) y = np.array([[[1], [1]], [[1], [1]]]) loss = model.train_on_batch(x, y) self.assertEqual(float(loss), 0.) def test_clone_rnn(self): # Test cloning a model with multiple cells in an RNN. This exercises a # few "fancier" features such as the `Bidrectional` wrapper and # `StackedRNNCells` under the hood. inputs = keras.Input(shape=(3, 3)) cells = [ keras.layers.LSTMCell( units=32, enable_caching_device=True, implementation=2, activation='relu')] rnn = keras.layers.RNN(cells, return_sequences=True) outputs = keras.layers.Bidirectional(rnn)(inputs) outputs = keras.layers.Dense( 12, activation='softmax', name='scores')(outputs) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( loss=keras.losses.CategoricalCrossentropy(), optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.01), metrics=['accuracy']) keras.models.clone_model(model) def test_model_cloning_invalid_use_cases(self): seq_model = keras.models.Sequential() seq_model.add(keras.layers.Dense(4, input_shape=(4,))) x = keras.Input((4,)) y = keras.layers.Dense(4)(x) fn_model = keras.models.Model(x, y) with self.assertRaises(ValueError): keras.models._clone_functional_model(seq_model) with self.assertRaises(ValueError): keras.models._clone_functional_model(None) with self.assertRaises(ValueError): keras.models._clone_sequential_model(fn_model) with self.assertRaises(ValueError): keras.models._clone_sequential_model(seq_model, input_tensors=[x, x]) with self.assertRaises(ValueError): keras.models._clone_sequential_model(seq_model, input_tensors=y) def test_functional_cloning_does_not_create_unnecessary_placeholders(self): with tf.Graph().as_default(): x = keras.Input((4,)) y = keras.layers.Dense(4)(x) model = keras.models.Model(x, y) graph = tf.Graph() with graph.as_default(): x = tf.ones((10, 4)) _ = keras.models.clone_model(model, input_tensors=[x]) has_placeholder = _has_placeholder(graph) self.assertFalse(has_placeholder) def test_sequential_cloning_does_not_create_unnecessary_placeholders(self): with tf.Graph().as_default(): model = keras.models.Sequential() model.add(keras.layers.Dense(4, input_shape=(4,))) graph = tf.Graph() with graph.as_default(): x = tf.ones((10, 4)) _ = keras.models.clone_model(model, input_tensors=[x]) has_placeholder = _has_placeholder(graph) self.assertFalse(has_placeholder) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ {'testcase_name': 'clone_weights', 'share_weights': False}, {'testcase_name': 'share_weights', 'share_weights': True}, ]) def test_functional_cloning_with_tensor_kwarg(self, share_weights): """Test that cloning works with models that use Tensor kwargs.""" if share_weights: clone_fn = functools.partial( keras.models.clone_model, clone_function=models.share_weights) else: clone_fn = keras.models.clone_model class LayerWithTensorKwarg(keras.layers.Layer): def call(self, inputs, tensor=None): if tensor is not None: return inputs * tf.cast(tensor, tf.float32) else: return inputs inputs = keras.layers.Input(shape=(3)) t = tf.sequence_mask(tf.shape(inputs)[1]) model = keras.models.Model(inputs, LayerWithTensorKwarg()(inputs, t)) model.add_loss(tf.reduce_sum(model.outputs)) input_arr = np.random.random((1, 3)).astype(np.float32) clone = clone_fn(model) if tf.executing_eagerly(): clone(input_arr) loss = clone.losses[0] else: with self.session() as sess: clone(input_arr) if share_weights: self.skipTest('Weight sharing with inputs in call **kwargs does ' 'not work correctly in v1') else: feed_dict = {clone.input: input_arr} loss = sess.run(clone.losses[0], feed_dict=feed_dict) self.assertAllClose(np.sum(input_arr), loss) def _has_placeholder(graph): ops_types = [op.type for op in graph.get_operations()] return any('Placeholder' in s for s in ops_types) class CheckpointingTests(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_optimizer_dependency(self): model = _get_model() opt = tf.compat.v1.train.AdamOptimizer(.01) model.compile( optimizer=opt, loss='mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit( x=np.array([[1., 2., 3., 4.]]), y=np.array([[1., 1., 1., 1.]]), epochs=2) save_prefix = os.path.join(self.get_temp_dir(), 'ckpt') beta1_power, _ = opt._get_beta_accumulators() self.evaluate(beta1_power.assign(12.)) model.save_weights(save_prefix) self.evaluate(beta1_power.assign(13.)) model.load_weights(save_prefix) self.assertEqual(12., self.evaluate(beta1_power)) @keras_parameterized.run_all_keras_modes class TestModelBackend(keras_parameterized.TestCase): def test_model_backend_float64_use_cases(self): # Test case for GitHub issue 19318 floatx = keras.backend.floatx() keras.backend.set_floatx('float64') x = keras.Input((5,)) y = keras.layers.Dense(1)(x) model = keras.models.Model(x, y) model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', run_eagerly=testing_utils.should_run_eagerly()) keras.backend.set_floatx(floatx) class TestCloneAndBuildModel(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_clone_and_build_non_compiled_model(self): inp = np.random.random((10, 4)) out = np.random.random((10, 4)) model = _get_model() with self.assertRaisesRegex(ValueError, 'has not been compiled'): models.clone_and_build_model(model, compile_clone=True) is_subclassed = (testing_utils.get_model_type() == 'subclass') # With placeholder creation new_model = models.clone_and_build_model( model, compile_clone=False, in_place_reset=is_subclassed) with self.assertRaisesRegex(RuntimeError, 'must compile'): new_model.evaluate(inp, out) with self.assertRaisesRegex(RuntimeError, 'must compile'): new_model.train_on_batch(inp, out) new_model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', run_eagerly=testing_utils.should_run_eagerly()) new_model.train_on_batch(inp, out) # Create new tensors for inputs. input_a = keras.Input(shape=(4,)) new_model = models.clone_and_build_model( model, input_tensors=input_a, compile_clone=False, in_place_reset=is_subclassed) with self.assertRaisesRegex(RuntimeError, 'must compile'): new_model.evaluate(inp, out) with self.assertRaisesRegex(RuntimeError, 'must compile'): new_model.train_on_batch(inp, out) new_model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', run_eagerly=testing_utils.should_run_eagerly()) new_model.train_on_batch(inp, out) def _assert_same_compile_params(self, model): """Assert that two models have the same compile parameters.""" self.assertEqual('mse', model.loss) self.assertIsInstance( model.optimizer, (optimizer_v1.RMSprop, keras.optimizer_v2.rmsprop.RMSprop)) def _clone_and_build_test_helper(self, model, model_type): inp = np.random.random((10, 4)) out = np.random.random((10, 4)) is_subclassed = (model_type == 'subclass') # With placeholder creation new_model = models.clone_and_build_model( model, compile_clone=True, in_place_reset=is_subclassed) self._assert_same_compile_params(new_model) new_model.train_on_batch(inp, out) new_model.evaluate(inp, out) # Create new tensors for inputs. input_a = keras.Input(shape=(4,), name='a') new_model = models.clone_and_build_model( model, input_tensors=input_a, compile_clone=True, in_place_reset=is_subclassed) self._assert_same_compile_params(new_model) new_model.train_on_batch(inp, out) new_model.evaluate(inp, out) new_model = models.clone_and_build_model( model, input_tensors=input_a, target_tensors=None, compile_clone=True, in_place_reset=is_subclassed) self._assert_same_compile_params(new_model) new_model.train_on_batch(inp, out) new_model.evaluate(inp, out) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_clone_and_build_compiled(self): model = _get_model() model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', metrics=['acc', metrics.categorical_accuracy], run_eagerly=testing_utils.should_run_eagerly()) self._clone_and_build_test_helper(model, testing_utils.get_model_type()) @keras_parameterized.run_all_keras_modes def test_clone_and_build_sequential_without_inputs_defined(self): model = models.Sequential(_get_layers(input_shape=None)) model.compile( testing_utils.get_v2_optimizer('rmsprop'), 'mse', metrics=['acc', metrics.categorical_accuracy], run_eagerly=testing_utils.should_run_eagerly()) self._clone_and_build_test_helper(model, 'sequential') inp = np.random.random((10, 4)) out = np.random.random((10, 4)) model.train_on_batch(inp, out) self._clone_and_build_test_helper(model, 'sequential') def assert_optimizer_iterations_increases(self, optimizer): model = _get_model() model.compile( optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy], run_eagerly=testing_utils.should_run_eagerly()) global_step = keras.backend.variable(123, dtype=tf.int64) clone_model = models.clone_and_build_model( model, compile_clone=True, optimizer_iterations=global_step, in_place_reset=(testing_utils.get_model_type() == 'subclass')) inp = np.random.random((10, 4)) out = np.random.random((10, 4)) clone_model.train_on_batch(inp, out) self.assertEqual(backend.eval(global_step), 124) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_replace_tf_optimizer_iterations_variable(self): if tf.executing_eagerly(): self.skipTest('v1 optimizers not supported with eager.') self.assert_optimizer_iterations_increases(tf.compat.v1.train.AdamOptimizer(0.01)) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_replace_keras_optimizer_iterations_variable(self): self.assert_optimizer_iterations_increases('adam') def test_clone_optimizer_in_different_graph(self): with tf.Graph().as_default(): with self.session(): model = testing_utils.get_small_sequential_mlp(3, 4) optimizer = keras.optimizer_v2.adam.Adam() model.compile( optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy], ) model.fit( x=np.array([[1., 2., 3., 4.]]), y=np.array([[1., 1., 1., 1.]]), epochs=1) optimizer_config = optimizer.get_config() with tf.Graph().as_default(): with self.session(): with self.assertRaisesRegex(ValueError, 'Cannot use the given session'): models.clone_and_build_model(model, compile_clone=True) # The optimizer_config object allows the model to be cloned in a # different graph. models.clone_and_build_model(model, compile_clone=True, optimizer_config=optimizer_config) if __name__ == '__main__': tf.test.main()
20,901
35.605954
86
py
keras
keras-master/keras/metrics_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras metrics functions.""" import tensorflow.compat.v2 as tf import json import math import os from absl.testing import parameterized import numpy as np from keras import backend from keras import combinations from keras import keras_parameterized from keras import layers from keras import metrics from keras import Model from keras import testing_utils from keras.engine import base_layer from keras.engine import training as training_module @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class KerasSumTest(tf.test.TestCase, parameterized.TestCase): def test_sum(self): with self.test_session(): m = metrics.Sum(name='my_sum') # check config self.assertEqual(m.name, 'my_sum') self.assertTrue(m.stateful) self.assertEqual(m.dtype, tf.float32) self.assertLen(m.variables, 1) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # check initial state self.assertEqual(self.evaluate(m.total), 0) # check __call__() self.assertEqual(self.evaluate(m(100)), 100) self.assertEqual(self.evaluate(m.total), 100) # check update_state() and result() + state accumulation + tensor input update_op = m.update_state(tf.convert_to_tensor([1, 5])) self.evaluate(update_op) self.assertAlmostEqual(self.evaluate(m.result()), 106) self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5 # check reset_state() m.reset_state() self.assertEqual(self.evaluate(m.total), 0) def test_sum_with_sample_weight(self): m = metrics.Sum(dtype=tf.float64) self.assertEqual(m.dtype, tf.float64) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # check scalar weight result_t = m(100, sample_weight=0.5) self.assertEqual(self.evaluate(result_t), 50) self.assertEqual(self.evaluate(m.total), 50) # check weights not scalar and weights rank matches values rank result_t = m([1, 5], sample_weight=[1, 0.2]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 52., 4) # 50 + 1 + 5 * 0.2 self.assertAlmostEqual(self.evaluate(m.total), 52., 4) # check weights broadcast result_t = m([1, 2], sample_weight=0.5) self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1 self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1) # check weights squeeze result_t = m([1, 5], sample_weight=[[1], [0.2]]) self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1 self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1) # check weights expand result_t = m([[1], [5]], sample_weight=[1, 0.2]) self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1 self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1) # check values reduced to the dimensions of weight result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5]) result = np.round(self.evaluate(result_t), decimals=2) # result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2 self.assertAlmostEqual(result, 63.75, 2) self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2) def test_sum_graph_with_placeholder(self): with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: m = metrics.Sum() v = tf.compat.v1.placeholder(tf.float32) w = tf.compat.v1.placeholder(tf.float32) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # check __call__() result_t = m(v, sample_weight=w) result = sess.run(result_t, feed_dict=({v: 100, w: 0.5})) self.assertEqual(result, 50) self.assertEqual(self.evaluate(m.total), 50) # check update_state() and result() result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]})) self.assertAlmostEqual(result, 52., 2) # 50 + 1 + 5 * 0.2 self.assertAlmostEqual(self.evaluate(m.total), 52., 2) def test_save_restore(self): with self.test_session(): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt') m = metrics.Sum() checkpoint = tf.train.Checkpoint(sum=m) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # update state self.evaluate(m(100.)) self.evaluate(m(200.)) # save checkpoint and then add an update save_path = checkpoint.save(checkpoint_prefix) self.evaluate(m(1000.)) # restore to the same checkpoint sum object (= 300) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.evaluate(m(300.)) self.assertEqual(600., self.evaluate(m.result())) # restore to a different checkpoint sum object restore_sum = metrics.Sum() restore_checkpoint = tf.train.Checkpoint(sum=restore_sum) status = restore_checkpoint.restore(save_path) restore_update = restore_sum(300.) status.assert_consumed().run_restore_ops() self.evaluate(restore_update) self.assertEqual(600., self.evaluate(restore_sum.result())) class MeanTest(keras_parameterized.TestCase): # TODO(b/120949004): Re-enable garbage collection check # @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) @keras_parameterized.run_all_keras_modes def test_mean(self): m = metrics.Mean(name='my_mean') # check config self.assertEqual(m.name, 'my_mean') self.assertTrue(m.stateful) self.assertEqual(m.dtype, tf.float32) self.assertEqual(len(m.variables), 2) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # check initial state self.assertEqual(self.evaluate(m.total), 0) self.assertEqual(self.evaluate(m.count), 0) # check __call__() self.assertEqual(self.evaluate(m(100)), 100) self.assertEqual(self.evaluate(m.total), 100) self.assertEqual(self.evaluate(m.count), 1) # check update_state() and result() + state accumulation + tensor input update_op = m.update_state([ tf.convert_to_tensor(1), tf.convert_to_tensor(5) ]) self.evaluate(update_op) self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2) self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5 self.assertEqual(self.evaluate(m.count), 3) # check reset_state() m.reset_state() self.assertEqual(self.evaluate(m.total), 0) self.assertEqual(self.evaluate(m.count), 0) # Check save and restore config m2 = metrics.Mean.from_config(m.get_config()) self.assertEqual(m2.name, 'my_mean') self.assertTrue(m2.stateful) self.assertEqual(m2.dtype, tf.float32) self.assertEqual(len(m2.variables), 2) @testing_utils.run_v2_only def test_function_wrapped_reset_state(self): m = metrics.Mean(name='my_mean') # check reset_state in function. @tf.function def reset_in_fn(): m.reset_state() return m.update_state(100) for _ in range(5): self.evaluate(reset_in_fn()) self.assertEqual(self.evaluate(m.count), 1) @keras_parameterized.run_all_keras_modes def test_mean_with_sample_weight(self): m = metrics.Mean(dtype=tf.float64) self.assertEqual(m.dtype, tf.float64) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # check scalar weight result_t = m(100, sample_weight=0.5) self.assertEqual(self.evaluate(result_t), 50 / 0.5) self.assertEqual(self.evaluate(m.total), 50) self.assertEqual(self.evaluate(m.count), 0.5) # check weights not scalar and weights rank matches values rank result_t = m([1, 5], sample_weight=[1, 0.2]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 52 / 1.7, 2) self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2 self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2 # check weights broadcast result_t = m([1, 2], sample_weight=0.5) self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2) self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1 self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5 # check weights squeeze result_t = m([1, 5], sample_weight=[[1], [0.2]]) self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2) self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1 self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2 # check weights expand result_t = m([[1], [5]], sample_weight=[1, 0.2]) self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2) self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1 self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2 # check values reduced to the dimensions of weight result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5]) result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6 self.assertEqual(result, 10.45) self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54) self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6) @keras_parameterized.run_all_keras_modes def test_mean_graph_with_placeholder(self): with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: m = metrics.Mean() v = tf.compat.v1.placeholder(tf.float32) w = tf.compat.v1.placeholder(tf.float32) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # check __call__() result_t = m(v, sample_weight=w) result = sess.run(result_t, feed_dict=({v: 100, w: 0.5})) self.assertEqual(self.evaluate(m.total), 50) self.assertEqual(self.evaluate(m.count), 0.5) self.assertEqual(result, 50 / 0.5) # check update_state() and result() result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]})) self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2 self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2 self.assertAlmostEqual(result, 52 / 1.7, 2) @keras_parameterized.run_all_keras_modes def test_save_restore(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt') m = metrics.Mean() checkpoint = tf.train.Checkpoint(mean=m) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) # update state self.evaluate(m(100.)) self.evaluate(m(200.)) # save checkpoint and then add an update save_path = checkpoint.save(checkpoint_prefix) self.evaluate(m(1000.)) # restore to the same checkpoint mean object checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.evaluate(m(300.)) self.assertEqual(200., self.evaluate(m.result())) # restore to a different checkpoint mean object restore_mean = metrics.Mean() restore_checkpoint = tf.train.Checkpoint(mean=restore_mean) status = restore_checkpoint.restore(save_path) restore_update = restore_mean(300.) status.assert_consumed().run_restore_ops() self.evaluate(restore_update) self.assertEqual(200., self.evaluate(restore_mean.result())) self.assertEqual(3, self.evaluate(restore_mean.count)) @keras_parameterized.run_all_keras_modes def test_multiple_instances(self): m = metrics.Mean() m2 = metrics.Mean() self.assertEqual(m.name, 'mean') self.assertEqual(m2.name, 'mean') self.assertEqual([v.name for v in m.variables], testing_utils.get_expected_metric_variable_names( ['total', 'count'])) self.assertEqual([v.name for v in m2.variables], testing_utils.get_expected_metric_variable_names( ['total', 'count'], name_suffix='_1')) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) self.evaluate(tf.compat.v1.variables_initializer(m2.variables)) # check initial state self.assertEqual(self.evaluate(m.total), 0) self.assertEqual(self.evaluate(m.count), 0) self.assertEqual(self.evaluate(m2.total), 0) self.assertEqual(self.evaluate(m2.count), 0) # check __call__() self.assertEqual(self.evaluate(m(100)), 100) self.assertEqual(self.evaluate(m.total), 100) self.assertEqual(self.evaluate(m.count), 1) self.assertEqual(self.evaluate(m2.total), 0) self.assertEqual(self.evaluate(m2.count), 0) self.assertEqual(self.evaluate(m2([63, 10])), 36.5) self.assertEqual(self.evaluate(m2.total), 73) self.assertEqual(self.evaluate(m2.count), 2) self.assertEqual(self.evaluate(m.result()), 100) self.assertEqual(self.evaluate(m.total), 100) self.assertEqual(self.evaluate(m.count), 1) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class KerasAccuracyTest(tf.test.TestCase): def test_accuracy(self): acc_obj = metrics.Accuracy(name='my_acc') # check config self.assertEqual(acc_obj.name, 'my_acc') self.assertTrue(acc_obj.stateful) self.assertEqual(len(acc_obj.variables), 2) self.assertEqual(acc_obj.dtype, tf.float32) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]]) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # Check save and restore config a2 = metrics.Accuracy.from_config(acc_obj.get_config()) self.assertEqual(a2.name, 'my_acc') self.assertTrue(a2.stateful) self.assertEqual(len(a2.variables), 2) self.assertEqual(a2.dtype, tf.float32) # check with sample_weight result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7 def test_accuracy_ragged(self): acc_obj = metrics.Accuracy(name='my_acc') self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned rt1 = tf.ragged.constant([[1], [2], [3], [4]]) rt2 = tf.ragged.constant([[1], [2], [3], [4]]) update_op = acc_obj.update_state(rt1, rt2) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check with sample_weight rt1 = tf.ragged.constant([[2], [1]]) rt2 = tf.ragged.constant([[2], [0]]) sw_ragged = tf.ragged.constant([[0.5], [0.2]]) result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7 def test_binary_accuracy(self): acc_obj = metrics.BinaryAccuracy(name='my_acc') # check config self.assertEqual(acc_obj.name, 'my_acc') self.assertTrue(acc_obj.stateful) self.assertEqual(len(acc_obj.variables), 2) self.assertEqual(acc_obj.dtype, tf.float32) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned update_op = acc_obj.update_state([[1], [0]], [[1], [0]]) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check y_pred squeeze update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]]) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertAlmostEqual(result, 0.75, 2) # 3/4 # check y_true squeeze result_t = acc_obj([[[1]], [[1]]], [[1], [0]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.67, 2) # 4/6 # check with sample_weight result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7 def test_binary_accuracy_ragged(self): acc_obj = metrics.BinaryAccuracy(name='my_acc') self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned rt1 = tf.ragged.constant([[1], [0]]) rt2 = tf.ragged.constant([[1], [0]]) update_op = acc_obj.update_state(rt1, rt2) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check y_true squeeze only supported for dense tensors and is # not supported by ragged tensor (different ranks). --> error rt1 = tf.ragged.constant([[[1], [1]]]) rt2 = tf.ragged.constant([[1], [0]]) with self.assertRaises(ValueError): result_t = acc_obj(rt1, rt2) result = self.evaluate(result_t) def test_binary_accuracy_threshold(self): acc_obj = metrics.BinaryAccuracy(threshold=0.7) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.5, 2) def test_binary_accuracy_threshold_ragged(self): acc_obj = metrics.BinaryAccuracy(threshold=0.7) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) rt1 = tf.ragged.constant([[1], [1], [0], [0]]) rt2 = tf.ragged.constant([[0.9], [0.6], [0.4], [0.8]]) result_t = acc_obj(rt1, rt2) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.5, 2) def test_categorical_accuracy(self): acc_obj = metrics.CategoricalAccuracy(name='my_acc') # check config self.assertEqual(acc_obj.name, 'my_acc') self.assertTrue(acc_obj.stateful) self.assertEqual(len(acc_obj.variables), 2) self.assertEqual(acc_obj.dtype, tf.float32) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]]) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check with sample_weight result_t = acc_obj([[0, 0, 1], [0, 1, 0]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7 def test_categorical_accuracy_ragged(self): acc_obj = metrics.CategoricalAccuracy(name='my_acc') self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]]) rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]]) update_op = acc_obj.update_state(rt1, rt2) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check with sample_weight rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]]) rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]]) sample_weight = tf.ragged.constant([[0.5], [0.2]]) with self.assertRaises(tf.errors.InvalidArgumentError): result_t = acc_obj(rt1, rt2, sample_weight) result = self.evaluate(result_t) def test_sparse_categorical_accuracy(self): acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc') # check config self.assertEqual(acc_obj.name, 'my_acc') self.assertTrue(acc_obj.stateful) self.assertEqual(len(acc_obj.variables), 2) self.assertEqual(acc_obj.dtype, tf.float32) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned update_op = acc_obj.update_state([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]]) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check with sample_weight result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7 def test_sparse_categorical_accuracy_ragged(self): acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc') # verify that correct value is returned rt1 = tf.ragged.constant([[2], [1]]) rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]]) with self.assertRaises(tf.errors.InvalidArgumentError): # sparse_categorical_accuracy is not supported for composite/ragged # tensors. update_op = acc_obj.update_state(rt1, rt2) self.evaluate(update_op) def test_sparse_categorical_accuracy_mismatched_dims(self): acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc') # check config self.assertEqual(acc_obj.name, 'my_acc') self.assertTrue(acc_obj.stateful) self.assertEqual(len(acc_obj.variables), 2) self.assertEqual(acc_obj.dtype, tf.float32) self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) # verify that correct value is returned update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]]) self.evaluate(update_op) result = self.evaluate(acc_obj.result()) self.assertEqual(result, 1) # 2/2 # check with sample_weight result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]]) result = self.evaluate(result_t) self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7 def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self): with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc') self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables)) t = tf.compat.v1.placeholder(tf.float32) p = tf.compat.v1.placeholder(tf.float32) w = tf.compat.v1.placeholder(tf.float32) result_t = acc_obj(t, p, w) result = sess.run( result_t, feed_dict=({ t: [2, 1], p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], w: [[0.5], [0.2]] })) self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7 def test_get_acc(self): acc_fn = metrics.get('acc') self.assertEqual(acc_fn, metrics.accuracy) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CosineSimilarityTest(tf.test.TestCase): def l2_norm(self, x, axis): epsilon = 1e-12 square_sum = np.sum(np.square(x), axis=axis, keepdims=True) x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon)) return np.multiply(x, x_inv_norm) def setup(self, axis=1): self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32) self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32) y_true = self.l2_norm(self.np_y_true, axis) y_pred = self.l2_norm(self.np_y_pred, axis) self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,)) self.y_true = tf.constant(self.np_y_true) self.y_pred = tf.constant(self.np_y_pred) def test_config(self): cosine_obj = metrics.CosineSimilarity( axis=2, name='my_cos', dtype=tf.int32) self.assertEqual(cosine_obj.name, 'my_cos') self.assertEqual(cosine_obj._dtype, tf.int32) # Check save and restore config cosine_obj2 = metrics.CosineSimilarity.from_config(cosine_obj.get_config()) self.assertEqual(cosine_obj2.name, 'my_cos') self.assertEqual(cosine_obj2._dtype, tf.int32) def test_unweighted(self): self.setup() cosine_obj = metrics.CosineSimilarity() self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables)) loss = cosine_obj(self.y_true, self.y_pred) expected_loss = np.mean(self.expected_loss) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_weighted(self): self.setup() cosine_obj = metrics.CosineSimilarity() self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables)) sample_weight = np.asarray([1.2, 3.4]) loss = cosine_obj( self.y_true, self.y_pred, sample_weight=tf.constant(sample_weight)) expected_loss = np.sum( self.expected_loss * sample_weight) / np.sum(sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_axis(self): self.setup(axis=1) cosine_obj = metrics.CosineSimilarity(axis=1) self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables)) loss = cosine_obj(self.y_true, self.y_pred) expected_loss = np.mean(self.expected_loss) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanAbsoluteErrorTest(tf.test.TestCase): def test_config(self): mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=tf.int32) self.assertEqual(mae_obj.name, 'my_mae') self.assertEqual(mae_obj._dtype, tf.int32) # Check save and restore config mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config()) self.assertEqual(mae_obj2.name, 'my_mae') self.assertEqual(mae_obj2._dtype, tf.int32) def test_unweighted(self): mae_obj = metrics.MeanAbsoluteError() self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) update_op = mae_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = mae_obj.result() self.assertAllClose(0.5, result, atol=1e-5) def test_weighted(self): mae_obj = metrics.MeanAbsoluteError() self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) sample_weight = tf.constant((1., 1.5, 2., 2.5)) result = mae_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanAbsolutePercentageErrorTest(tf.test.TestCase): def test_config(self): mape_obj = metrics.MeanAbsolutePercentageError( name='my_mape', dtype=tf.int32) self.assertEqual(mape_obj.name, 'my_mape') self.assertEqual(mape_obj._dtype, tf.int32) # Check save and restore config mape_obj2 = metrics.MeanAbsolutePercentageError.from_config( mape_obj.get_config()) self.assertEqual(mape_obj2.name, 'my_mape') self.assertEqual(mape_obj2._dtype, tf.int32) def test_unweighted(self): mape_obj = metrics.MeanAbsolutePercentageError() self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) update_op = mape_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = mape_obj.result() self.assertAllClose(35e7, result, atol=1e-5) def test_weighted(self): mape_obj = metrics.MeanAbsolutePercentageError() self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) sample_weight = tf.constant((1., 1.5, 2., 2.5)) result = mape_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(40e7, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanSquaredErrorTest(tf.test.TestCase): def test_config(self): mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=tf.int32) self.assertEqual(mse_obj.name, 'my_mse') self.assertEqual(mse_obj._dtype, tf.int32) # Check save and restore config mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config()) self.assertEqual(mse_obj2.name, 'my_mse') self.assertEqual(mse_obj2._dtype, tf.int32) def test_unweighted(self): mse_obj = metrics.MeanSquaredError() self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) update_op = mse_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = mse_obj.result() self.assertAllClose(0.5, result, atol=1e-5) def test_weighted(self): mse_obj = metrics.MeanSquaredError() self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) sample_weight = tf.constant((1., 1.5, 2., 2.5)) result = mse_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanSquaredLogarithmicErrorTest(tf.test.TestCase): def test_config(self): msle_obj = metrics.MeanSquaredLogarithmicError( name='my_msle', dtype=tf.int32) self.assertEqual(msle_obj.name, 'my_msle') self.assertEqual(msle_obj._dtype, tf.int32) # Check save and restore config msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config( msle_obj.get_config()) self.assertEqual(msle_obj2.name, 'my_msle') self.assertEqual(msle_obj2._dtype, tf.int32) def test_unweighted(self): msle_obj = metrics.MeanSquaredLogarithmicError() self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) update_op = msle_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = msle_obj.result() self.assertAllClose(0.24022, result, atol=1e-5) def test_weighted(self): msle_obj = metrics.MeanSquaredLogarithmicError() self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) sample_weight = tf.constant((1., 1.5, 2., 2.5)) result = msle_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class HingeTest(tf.test.TestCase): def test_config(self): hinge_obj = metrics.Hinge(name='hinge', dtype=tf.int32) self.assertEqual(hinge_obj.name, 'hinge') self.assertEqual(hinge_obj._dtype, tf.int32) # Check save and restore config hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config()) self.assertEqual(hinge_obj2.name, 'hinge') self.assertEqual(hinge_obj2._dtype, tf.int32) def test_unweighted(self): hinge_obj = metrics.Hinge() self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables)) y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # metric = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # reduced metric = (0.6 + 0.4125) / 2 update_op = hinge_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = hinge_obj.result() self.assertAllClose(0.506, result, atol=1e-3) def test_weighted(self): hinge_obj = metrics.Hinge() self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables)) y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) sample_weight = tf.constant([1.5, 2.]) # metric = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # weighted metric = [0.6 * 1.5, 0.4125 * 2] # reduced metric = (0.6 * 1.5 + 0.4125 * 2) / (1.5 + 2) result = hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.493, self.evaluate(result), atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class SquaredHingeTest(tf.test.TestCase): def test_config(self): sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=tf.int32) self.assertEqual(sq_hinge_obj.name, 'sq_hinge') self.assertEqual(sq_hinge_obj._dtype, tf.int32) # Check save and restore config sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config()) self.assertEqual(sq_hinge_obj2.name, 'sq_hinge') self.assertEqual(sq_hinge_obj2._dtype, tf.int32) def test_unweighted(self): sq_hinge_obj = metrics.SquaredHinge() self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables)) y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # metric = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # reduced metric = (0.485 + 0.2431) / 2 update_op = sq_hinge_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = sq_hinge_obj.result() self.assertAllClose(0.364, result, atol=1e-3) def test_weighted(self): sq_hinge_obj = metrics.SquaredHinge() self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables)) y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]]) y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) sample_weight = tf.constant([1.5, 2.]) # metric = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # weighted metric = [0.485 * 1.5, 0.2431 * 2] # reduced metric = (0.485 * 1.5 + 0.2431 * 2) / (1.5 + 2) result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.347, self.evaluate(result), atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CategoricalHingeTest(tf.test.TestCase): def test_config(self): cat_hinge_obj = metrics.CategoricalHinge( name='cat_hinge', dtype=tf.int32) self.assertEqual(cat_hinge_obj.name, 'cat_hinge') self.assertEqual(cat_hinge_obj._dtype, tf.int32) # Check save and restore config cat_hinge_obj2 = metrics.CategoricalHinge.from_config( cat_hinge_obj.get_config()) self.assertEqual(cat_hinge_obj2.name, 'cat_hinge') self.assertEqual(cat_hinge_obj2._dtype, tf.int32) def test_unweighted(self): cat_hinge_obj = metrics.CategoricalHinge() self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) update_op = cat_hinge_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = cat_hinge_obj.result() self.assertAllClose(0.5, result, atol=1e-5) def test_weighted(self): cat_hinge_obj = metrics.CategoricalHinge() self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables)) y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))) y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))) sample_weight = tf.constant((1., 1.5, 2., 2.5)) result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.5, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class RootMeanSquaredErrorTest(tf.test.TestCase): def test_config(self): rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=tf.int32) self.assertEqual(rmse_obj.name, 'rmse') self.assertEqual(rmse_obj._dtype, tf.int32) rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config()) self.assertEqual(rmse_obj2.name, 'rmse') self.assertEqual(rmse_obj2._dtype, tf.int32) def test_unweighted(self): rmse_obj = metrics.RootMeanSquaredError() self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables)) y_true = tf.constant((2, 4, 6)) y_pred = tf.constant((1, 3, 2)) update_op = rmse_obj.update_state(y_true, y_pred) self.evaluate(update_op) result = rmse_obj.result() # error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6 self.assertAllClose(math.sqrt(6), result, atol=1e-3) def test_weighted(self): rmse_obj = metrics.RootMeanSquaredError() self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables)) y_true = tf.constant((2, 4, 6, 8)) y_pred = tf.constant((1, 3, 2, 3)) sample_weight = tf.constant((0, 1, 0, 1)) result = rmse_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class TopKCategoricalAccuracyTest(tf.test.TestCase): def test_config(self): a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=tf.int32) self.assertEqual(a_obj.name, 'topkca') self.assertEqual(a_obj._dtype, tf.int32) a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config()) self.assertEqual(a_obj2.name, 'topkca') self.assertEqual(a_obj2._dtype, tf.int32) def test_correctness(self): a_obj = metrics.TopKCategoricalAccuracy() self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) y_true = tf.constant([[0, 0, 1], [0, 1, 0]]) y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) result = a_obj(y_true, y_pred) self.assertEqual(1, self.evaluate(result)) # both the samples match # With `k` < 5. a_obj = metrics.TopKCategoricalAccuracy(k=1) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) result = a_obj(y_true, y_pred) self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches # With `k` > 5. y_true = tf.constant([[0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0]]) y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4], [0.05, 0.95, 0, 0, 0, 0, 0]]) a_obj = metrics.TopKCategoricalAccuracy(k=6) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) result = a_obj(y_true, y_pred) self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches. def test_weighted(self): a_obj = metrics.TopKCategoricalAccuracy(k=2) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) y_true = tf.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]]) sample_weight = tf.constant((1.0, 0.0, 1.0)) result = a_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(1.0, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class SparseTopKCategoricalAccuracyTest(tf.test.TestCase): def test_config(self): a_obj = metrics.SparseTopKCategoricalAccuracy( name='stopkca', dtype=tf.int32) self.assertEqual(a_obj.name, 'stopkca') self.assertEqual(a_obj._dtype, tf.int32) a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config( a_obj.get_config()) self.assertEqual(a_obj2.name, 'stopkca') self.assertEqual(a_obj2._dtype, tf.int32) def test_correctness(self): a_obj = metrics.SparseTopKCategoricalAccuracy() self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) y_true = tf.constant([2, 1]) y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) result = a_obj(y_true, y_pred) self.assertEqual(1, self.evaluate(result)) # both the samples match # With `k` < 5. a_obj = metrics.SparseTopKCategoricalAccuracy(k=1) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) result = a_obj(y_true, y_pred) self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches # With `k` > 5. y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4], [0.05, 0.95, 0, 0, 0, 0, 0]]) a_obj = metrics.SparseTopKCategoricalAccuracy(k=6) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) result = a_obj(y_true, y_pred) self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches. def test_weighted(self): a_obj = metrics.SparseTopKCategoricalAccuracy(k=2) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) y_true = tf.constant([1, 0, 2]) y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]]) sample_weight = tf.constant((1.0, 0.0, 1.0)) result = a_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(1.0, self.evaluate(result), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LogCoshErrorTest(tf.test.TestCase): def setup(self): y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3)) y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3)) self.batch_size = 6 error = y_pred - y_true self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2) self.y_pred = tf.constant(y_pred, dtype=tf.float32) self.y_true = tf.constant(y_true) def test_config(self): logcosh_obj = metrics.LogCoshError(name='logcosh', dtype=tf.int32) self.assertEqual(logcosh_obj.name, 'logcosh') self.assertEqual(logcosh_obj._dtype, tf.int32) def test_unweighted(self): self.setup() logcosh_obj = metrics.LogCoshError() self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables)) update_op = logcosh_obj.update_state(self.y_true, self.y_pred) self.evaluate(update_op) result = logcosh_obj.result() expected_result = np.sum(self.expected_results) / self.batch_size self.assertAllClose(result, expected_result, atol=1e-3) def test_weighted(self): self.setup() logcosh_obj = metrics.LogCoshError() self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables)) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)) expected_result = np.multiply(self.expected_results, sample_weight) expected_result = np.sum(expected_result) / np.sum(sample_weight) self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class PoissonTest(tf.test.TestCase): def setup(self): y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3)) y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3)) self.batch_size = 6 self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred)) self.y_pred = tf.constant(y_pred, dtype=tf.float32) self.y_true = tf.constant(y_true) def test_config(self): poisson_obj = metrics.Poisson(name='poisson', dtype=tf.int32) self.assertEqual(poisson_obj.name, 'poisson') self.assertEqual(poisson_obj._dtype, tf.int32) poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config()) self.assertEqual(poisson_obj2.name, 'poisson') self.assertEqual(poisson_obj2._dtype, tf.int32) def test_unweighted(self): self.setup() poisson_obj = metrics.Poisson() self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables)) update_op = poisson_obj.update_state(self.y_true, self.y_pred) self.evaluate(update_op) result = poisson_obj.result() expected_result = np.sum(self.expected_results) / self.batch_size self.assertAllClose(result, expected_result, atol=1e-3) def test_weighted(self): self.setup() poisson_obj = metrics.Poisson() self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables)) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)) expected_result = np.multiply(self.expected_results, sample_weight) expected_result = np.sum(expected_result) / np.sum(sample_weight) self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class KLDivergenceTest(tf.test.TestCase): def setup(self): y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3)) y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3)) self.batch_size = 2 self.expected_results = np.multiply(y_true, np.log(y_true / y_pred)) self.y_pred = tf.constant(y_pred, dtype=tf.float32) self.y_true = tf.constant(y_true) def test_config(self): k_obj = metrics.KLDivergence(name='kld', dtype=tf.int32) self.assertEqual(k_obj.name, 'kld') self.assertEqual(k_obj._dtype, tf.int32) k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config()) self.assertEqual(k_obj2.name, 'kld') self.assertEqual(k_obj2._dtype, tf.int32) def test_unweighted(self): self.setup() k_obj = metrics.KLDivergence() self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables)) update_op = k_obj.update_state(self.y_true, self.y_pred) self.evaluate(update_op) result = k_obj.result() expected_result = np.sum(self.expected_results) / self.batch_size self.assertAllClose(result, expected_result, atol=1e-3) def test_weighted(self): self.setup() k_obj = metrics.KLDivergence() self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables)) sample_weight = tf.constant([1.2, 3.4], shape=(2, 1)) result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)) expected_result = np.multiply(self.expected_results, sample_weight) expected_result = np.sum(expected_result) / (1.2 + 3.4) self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanRelativeErrorTest(tf.test.TestCase): def test_config(self): normalizer = tf.constant([1, 3], dtype=tf.float32) mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name='mre') self.assertEqual(mre_obj.name, 'mre') self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1) mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config()) self.assertEqual(mre_obj2.name, 'mre') self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1) def test_unweighted(self): np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32) np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32) expected_error = np.mean( np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)) y_pred = tf.constant(np_y_pred, shape=(1, 4), dtype=tf.float32) y_true = tf.constant(np_y_true, shape=(1, 4)) mre_obj = metrics.MeanRelativeError(normalizer=y_true) self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables)) result = mre_obj(y_true, y_pred) self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3) def test_weighted(self): np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32) np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32) sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32) rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true) expected_error = np.sum(rel_errors * sample_weight) y_pred = tf.constant(np_y_pred, dtype=tf.float32) y_true = tf.constant(np_y_true) mre_obj = metrics.MeanRelativeError(normalizer=y_true) self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables)) result = mre_obj( y_true, y_pred, sample_weight=tf.constant(sample_weight)) self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3) def test_zero_normalizer(self): y_pred = tf.constant([2, 4], dtype=tf.float32) y_true = tf.constant([1, 3]) mre_obj = metrics.MeanRelativeError(normalizer=tf.zeros_like(y_true)) self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables)) result = mre_obj(y_true, y_pred) self.assertEqual(self.evaluate(result), 0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MeanIoUTest(tf.test.TestCase): def test_config(self): m_obj = metrics.MeanIoU(num_classes=2, name='mean_iou') self.assertEqual(m_obj.name, 'mean_iou') self.assertEqual(m_obj.num_classes, 2) m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config()) self.assertEqual(m_obj2.name, 'mean_iou') self.assertEqual(m_obj2.num_classes, 2) def test_unweighted(self): y_pred = [0, 1, 0, 1] y_true = [0, 0, 1, 1] m_obj = metrics.MeanIoU(num_classes=2) self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables)) result = m_obj(y_true, y_pred) # cm = [[1, 1], # [1, 1]] # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] # iou = true_positives / (sum_row + sum_col - true_positives)) expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) def test_weighted(self): y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32) y_true = tf.constant([0, 0, 1, 1]) sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1]) m_obj = metrics.MeanIoU(num_classes=2) self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables)) result = m_obj(y_true, y_pred, sample_weight=sample_weight) # cm = [[0.2, 0.3], # [0.4, 0.1]] # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1] # iou = true_positives / (sum_row + sum_col - true_positives)) expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2 self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) def test_multi_dim_input(self): y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32) y_true = tf.constant([[0, 0], [1, 1]]) sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]]) m_obj = metrics.MeanIoU(num_classes=2) self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables)) result = m_obj(y_true, y_pred, sample_weight=sample_weight) # cm = [[0.2, 0.3], # [0.4, 0.1]] # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1] # iou = true_positives / (sum_row + sum_col - true_positives)) expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2 self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) def test_zero_valid_entries(self): m_obj = metrics.MeanIoU(num_classes=2) self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables)) self.assertAllClose(self.evaluate(m_obj.result()), 0, atol=1e-3) def test_zero_and_non_zero_entries(self): y_pred = tf.constant([1], dtype=tf.float32) y_true = tf.constant([1]) m_obj = metrics.MeanIoU(num_classes=2) self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables)) result = m_obj(y_true, y_pred) # cm = [[0, 0], # [0, 1]] # sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1] # iou = true_positives / (sum_row + sum_col - true_positives)) expected_result = (0 + 1 / (1 + 1 - 1)) / 1 self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3) class MeanTensorTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_config(self): with self.test_session(): m = metrics.MeanTensor(name='mean_by_element') # check config self.assertEqual(m.name, 'mean_by_element') self.assertTrue(m.stateful) self.assertEqual(m.dtype, tf.float32) self.assertEmpty(m.variables) with self.assertRaisesRegex(ValueError, 'does not have any value yet'): m.result() self.evaluate(m([[3], [5], [3]])) self.assertAllEqual(m._shape, [3, 1]) m2 = metrics.MeanTensor.from_config(m.get_config()) self.assertEqual(m2.name, 'mean_by_element') self.assertTrue(m2.stateful) self.assertEqual(m2.dtype, tf.float32) self.assertEmpty(m2.variables) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_unweighted(self): with self.test_session(): m = metrics.MeanTensor(dtype=tf.float64) # check __call__() self.assertAllClose(self.evaluate(m([100, 40])), [100, 40]) self.assertAllClose(self.evaluate(m.total), [100, 40]) self.assertAllClose(self.evaluate(m.count), [1, 1]) # check update_state() and result() + state accumulation + tensor input update_op = m.update_state([ tf.convert_to_tensor(1), tf.convert_to_tensor(5) ]) self.evaluate(update_op) self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5]) self.assertAllClose(self.evaluate(m.total), [101, 45]) self.assertAllClose(self.evaluate(m.count), [2, 2]) # check reset_state() m.reset_state() self.assertAllClose(self.evaluate(m.total), [0, 0]) self.assertAllClose(self.evaluate(m.count), [0, 0]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_weighted(self): with self.test_session(): m = metrics.MeanTensor(dtype=tf.float64) self.assertEqual(m.dtype, tf.float64) # check scalar weight result_t = m([100, 30], sample_weight=0.5) self.assertAllClose(self.evaluate(result_t), [100, 30]) self.assertAllClose(self.evaluate(m.total), [50, 15]) self.assertAllClose(self.evaluate(m.count), [0.5, 0.5]) # check weights not scalar and weights rank matches values rank result_t = m([1, 5], sample_weight=[1, 0.2]) result = self.evaluate(result_t) self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2) self.assertAllClose(self.evaluate(m.total), [51, 16]) self.assertAllClose(self.evaluate(m.count), [1.5, 0.7]) # check weights broadcast result_t = m([1, 2], sample_weight=0.5) self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2]) self.assertAllClose(self.evaluate(m.total), [51.5, 17]) self.assertAllClose(self.evaluate(m.count), [2, 1.2]) # check weights squeeze result_t = m([1, 5], sample_weight=[[1], [0.2]]) self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4]) self.assertAllClose(self.evaluate(m.total), [52.5, 18]) self.assertAllClose(self.evaluate(m.count), [3, 1.4]) # check weights expand m = metrics.MeanTensor(dtype=tf.float64) self.evaluate(tf.compat.v1.variables_initializer(m.variables)) result_t = m([[1], [5]], sample_weight=[1, 0.2]) self.assertAllClose(self.evaluate(result_t), [[1], [5]]) self.assertAllClose(self.evaluate(m.total), [[1], [1]]) self.assertAllClose(self.evaluate(m.count), [[1], [0.2]]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_invalid_value_shape(self): m = metrics.MeanTensor(dtype=tf.float64) m([1]) with self.assertRaisesRegex( ValueError, 'MeanTensor input values must always have the same shape'): m([1, 5]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_build_in_tf_function(self): """Ensure that variables are created correctly in a tf function.""" m = metrics.MeanTensor(dtype=tf.float64) @tf.function def call_metric(x): return m(x) with self.test_session(): self.assertAllClose(self.evaluate(call_metric([100, 40])), [100, 40]) self.assertAllClose(self.evaluate(m.total), [100, 40]) self.assertAllClose(self.evaluate(m.count), [1, 1]) self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21]) @combinations.generate(combinations.combine(mode=['eager'])) def test_in_keras_model(self): class ModelWithMetric(Model): def __init__(self): super(ModelWithMetric, self).__init__() self.dense1 = layers.Dense( 3, activation='relu', kernel_initializer='ones') self.dense2 = layers.Dense( 1, activation='sigmoid', kernel_initializer='ones') self.mean_tensor = metrics.MeanTensor() def call(self, x): x = self.dense1(x) x = self.dense2(x) self.mean_tensor(self.dense1.kernel) return x model = ModelWithMetric() model.compile( loss='mae', optimizer='rmsprop', run_eagerly=True) x = np.ones((100, 4)) y = np.zeros((100, 1)) model.evaluate(x, y, batch_size=50) self.assertAllClose(self.evaluate(model.mean_tensor.result()), np.ones((4, 3))) self.assertAllClose(self.evaluate(model.mean_tensor.total), np.full((4, 3), 2)) self.assertAllClose(self.evaluate(model.mean_tensor.count), np.full((4, 3), 2)) model.evaluate(x, y, batch_size=25) self.assertAllClose(self.evaluate(model.mean_tensor.result()), np.ones((4, 3))) self.assertAllClose(self.evaluate(model.mean_tensor.total), np.full((4, 3), 4)) self.assertAllClose(self.evaluate(model.mean_tensor.count), np.full((4, 3), 4)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BinaryCrossentropyTest(tf.test.TestCase): def test_config(self): bce_obj = metrics.BinaryCrossentropy( name='bce', dtype=tf.int32, label_smoothing=0.2) self.assertEqual(bce_obj.name, 'bce') self.assertEqual(bce_obj._dtype, tf.int32) old_config = bce_obj.get_config() self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3) # Check save and restore config bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config) self.assertEqual(bce_obj2.name, 'bce') self.assertEqual(bce_obj2._dtype, tf.int32) new_config = bce_obj2.get_config() self.assertDictEqual(old_config, new_config) def test_unweighted(self): bce_obj = metrics.BinaryCrossentropy() self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables)) y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) result = bce_obj(y_true, y_pred) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [(0 + 15.33) / 2, (0 + 0) / 2] # Reduced metric = 7.665 / 2 self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3) def test_unweighted_with_logits(self): bce_obj = metrics.BinaryCrossentropy(from_logits=True) self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables)) y_true = tf.constant([[1, 0, 1], [0, 1, 1]]) y_pred = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) result = bce_obj(y_true, y_pred) # Metric = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # = [((100 - 100 * 1 + log(1 + exp(-100))) + # (0 + 100 * 0 + log(1 + exp(-100))) + # (100 - 100 * 1 + log(1 + exp(-100))), # ((100 - 100 * 0 + log(1 + exp(-100))) + # (100 - 100 * 1 + log(1 + exp(-100))) + # (0 + 100 * 1 + log(1 + exp(-100))))] # = [(0 + 0 + 0) / 3, 200 / 3] # Reduced metric = (0 + 66.666) / 2 self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3) def test_weighted(self): bce_obj = metrics.BinaryCrossentropy() self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables)) y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) sample_weight = tf.constant([1.5, 2.]) result = bce_obj(y_true, y_pred, sample_weight=sample_weight) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [(0 + 15.33) / 2, (0 + 0) / 2] # Weighted metric = [7.665 * 1.5, 0] # Reduced metric = 7.665 * 1.5 / (1.5 + 2) self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3) def test_weighted_from_logits(self): bce_obj = metrics.BinaryCrossentropy(from_logits=True) self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables)) y_true = tf.constant([[1, 0, 1], [0, 1, 1]]) y_pred = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) sample_weight = tf.constant([2., 2.5]) result = bce_obj(y_true, y_pred, sample_weight=sample_weight) # Metric = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # = [(0 + 0 + 0) / 3, 200 / 3] # Weighted metric = [0, 66.666 * 2.5] # Reduced metric = 66.666 * 2.5 / (2 + 2.5) self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3) def test_label_smoothing(self): logits = tf.constant(((100., -100., -100.))) y_true = tf.constant(((1, 0, 1))) label_smoothing = 0.1 # Metric: max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Label smoothing: z' = z * (1 - L) + 0.5L # After label smoothing, label 1 becomes 1 - 0.5L # label 0 becomes 0.5L # Applying the above two fns to the given input: # (100 - 100 * (1 - 0.5 L) + 0 + # 0 + 100 * (0.5 L) + 0 + # 0 + 100 * (1 - 0.5 L) + 0) * (1/3) # = (100 + 50L) * 1/3 bce_obj = metrics.BinaryCrossentropy( from_logits=True, label_smoothing=label_smoothing) self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables)) result = bce_obj(y_true, logits) expected_value = (100.0 + 50.0 * label_smoothing) / 3.0 self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CategoricalCrossentropyTest(tf.test.TestCase): def test_config(self): cce_obj = metrics.CategoricalCrossentropy( name='cce', dtype=tf.int32, label_smoothing=0.2) self.assertEqual(cce_obj.name, 'cce') self.assertEqual(cce_obj._dtype, tf.int32) old_config = cce_obj.get_config() self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3) # Check save and restore config cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config) self.assertEqual(cce_obj2.name, 'cce') self.assertEqual(cce_obj2._dtype, tf.int32) new_config = cce_obj2.get_config() self.assertDictEqual(old_config, new_config) def test_unweighted(self): cce_obj = metrics.CategoricalCrossentropy() self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables)) y_true = np.asarray([[0, 1, 0], [0, 0, 1]]) y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) result = cce_obj(y_true, y_pred) # EPSILON = 1e-7, y = y_true, y` = y_pred # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # Metric = -sum(y * log(y'), axis = -1) # = -((log 0.95), (log 0.1)) # = [0.051, 2.302] # Reduced metric = (0.051 + 2.302) / 2 self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3) def test_unweighted_from_logits(self): cce_obj = metrics.CategoricalCrossentropy(from_logits=True) self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables)) y_true = np.asarray([[0, 1, 0], [0, 0, 1]]) logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32) result = cce_obj(y_true, logits) # softmax = exp(logits) / sum(exp(logits), axis=-1) # xent = -sum(labels * log(softmax), 1) # exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]] # sum(exp(logits), axis=-1) = [8106.802, 2986.394] # softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]] # log(softmax) = [[-8.00045, -0.00045, -9.00045], # [-7.00182, -0.00182, -7.00182]] # labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]] # xent = [0.00045, 7.00182] # Reduced xent = (0.00045 + 7.00182) / 2 self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3) def test_weighted(self): cce_obj = metrics.CategoricalCrossentropy() self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables)) y_true = np.asarray([[0, 1, 0], [0, 0, 1]]) y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) sample_weight = tf.constant([1.5, 2.]) result = cce_obj(y_true, y_pred, sample_weight=sample_weight) # EPSILON = 1e-7, y = y_true, y` = y_pred # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # Metric = -sum(y * log(y'), axis = -1) # = -((log 0.95), (log 0.1)) # = [0.051, 2.302] # Weighted metric = [0.051 * 1.5, 2.302 * 2.] # Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5 self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3) def test_weighted_from_logits(self): cce_obj = metrics.CategoricalCrossentropy(from_logits=True) self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables)) y_true = np.asarray([[0, 1, 0], [0, 0, 1]]) logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32) sample_weight = tf.constant([1.5, 2.]) result = cce_obj(y_true, logits, sample_weight=sample_weight) # softmax = exp(logits) / sum(exp(logits), axis=-1) # xent = -sum(labels * log(softmax), 1) # xent = [0.00045, 7.00182] # weighted xent = [0.000675, 14.00364] # Reduced xent = (0.000675 + 14.00364) / (1.5 + 2) self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3) def test_label_smoothing(self): y_true = np.asarray([[0, 1, 0], [0, 0, 1]]) logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32) label_smoothing = 0.1 # Label smoothing: z' = z * (1 - L) + L/n, # where L = label smoothing value and n = num classes # Label value 1 becomes: 1 - L + L/n # Label value 0 becomes: L/n # y_true with label_smoothing = [[0.0333, 0.9333, 0.0333], # [0.0333, 0.0333, 0.9333]] # softmax = exp(logits) / sum(exp(logits), axis=-1) # xent = -sum(labels * log(softmax), 1) # log(softmax) = [[-8.00045, -0.00045, -9.00045], # [-7.00182, -0.00182, -7.00182]] # labels * log(softmax) = [[-0.26641, -0.00042, -0.29971], # [-0.23316, -0.00006, -6.53479]] # xent = [0.56654, 6.76801] # Reduced xent = (0.56654 + 6.76801) / 2 cce_obj = metrics.CategoricalCrossentropy( from_logits=True, label_smoothing=label_smoothing) self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables)) loss = cce_obj(y_true, logits) self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class SparseCategoricalCrossentropyTest(tf.test.TestCase): def test_config(self): scce_obj = metrics.SparseCategoricalCrossentropy( name='scce', dtype=tf.int32) self.assertEqual(scce_obj.name, 'scce') self.assertEqual(scce_obj.dtype, tf.int32) old_config = scce_obj.get_config() self.assertDictEqual(old_config, json.loads(json.dumps(old_config))) # Check save and restore config scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(old_config) self.assertEqual(scce_obj2.name, 'scce') self.assertEqual(scce_obj2.dtype, tf.int32) new_config = scce_obj2.get_config() self.assertDictEqual(old_config, new_config) def test_unweighted(self): scce_obj = metrics.SparseCategoricalCrossentropy() self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables)) y_true = np.asarray([1, 2]) y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) result = scce_obj(y_true, y_pred) # EPSILON = 1e-7, y = y_true, y` = y_pred # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # logits = log(y`) = [[-2.9957, -0.0513, -16.1181], # [-2.3026, -0.2231, -2.3026]] # softmax = exp(logits) / sum(exp(logits), axis=-1) # y = one_hot(y) = [[0, 1, 0], [0, 0, 1]] # xent = -sum(y * log(softmax), 1) # exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # sum(exp(logits), axis=-1) = [1, 1] # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # log(softmax) = [[-2.9957, -0.0513, -16.1181], # [-2.3026, -0.2231, -2.3026]] # y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] # xent = [0.0513, 2.3026] # Reduced xent = (0.0513 + 2.3026) / 2 self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3) def test_unweighted_from_logits(self): scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True) self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables)) y_true = np.asarray([1, 2]) logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32) result = scce_obj(y_true, logits) # softmax = exp(logits) / sum(exp(logits), axis=-1) # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] # xent = -sum(y_true * log(softmax), 1) # exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]] # sum(exp(logits), axis=-1) = [8106.802, 2986.394] # softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]] # log(softmax) = [[-8.00045, -0.00045, -9.00045], # [-7.00182, -0.00182, -7.00182]] # y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]] # xent = [0.00045, 7.00182] # Reduced xent = (0.00045 + 7.00182) / 2 self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3) def test_weighted(self): scce_obj = metrics.SparseCategoricalCrossentropy() self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables)) y_true = np.asarray([1, 2]) y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) sample_weight = tf.constant([1.5, 2.]) result = scce_obj(y_true, y_pred, sample_weight=sample_weight) # EPSILON = 1e-7, y = y_true, y` = y_pred # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # logits = log(y`) = [[-2.9957, -0.0513, -16.1181], # [-2.3026, -0.2231, -2.3026]] # softmax = exp(logits) / sum(exp(logits), axis=-1) # y = one_hot(y) = [[0, 1, 0], [0, 0, 1]] # xent = -sum(y * log(softmax), 1) # exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # sum(exp(logits), axis=-1) = [1, 1] # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] # log(softmax) = [[-2.9957, -0.0513, -16.1181], # [-2.3026, -0.2231, -2.3026]] # y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] # xent = [0.0513, 2.3026] # Weighted xent = [0.051 * 1.5, 2.302 * 2.] # Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5 self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3) def test_weighted_from_logits(self): scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True) self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables)) y_true = np.asarray([1, 2]) logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32) sample_weight = tf.constant([1.5, 2.]) result = scce_obj(y_true, logits, sample_weight=sample_weight) # softmax = exp(logits) / sum(exp(logits), axis=-1) # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] # xent = -sum(y_true * log(softmax), 1) # xent = [0.00045, 7.00182] # weighted xent = [0.000675, 14.00364] # Reduced xent = (0.000675 + 14.00364) / (1.5 + 2) self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3) def test_axis(self): scce_obj = metrics.SparseCategoricalCrossentropy(axis=0) self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables)) y_true = np.asarray([1, 2]) y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]]) result = scce_obj(y_true, y_pred) # EPSILON = 1e-7, y = y_true, y` = y_pred # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]] # logits = log(y`) = [[-2.9957, -2.3026], # [-0.0513, -0.2231], # [-16.1181, -2.3026]] # softmax = exp(logits) / sum(exp(logits), axis=-1) # y = one_hot(y) = [[0, 0], [1, 0], [0, 1]] # xent = -sum(y * log(softmax), 1) # exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]] # sum(exp(logits)) = [1, 1] # softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]] # log(softmax) = [[-2.9957, -2.3026], # [-0.0513, -0.2231], # [-16.1181, -2.3026]] # y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]] # xent = [0.0513, 2.3026] # Reduced xent = (0.0513 + 2.3026) / 2 self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3) class BinaryTruePositives(metrics.Metric): def __init__(self, name='binary_true_positives', **kwargs): super(BinaryTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name='tp', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) values = tf.logical_and( tf.equal(y_true, True), tf.equal(y_pred, True)) values = tf.cast(values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, dtype=self.dtype) sample_weight = tf.__internal__.ops.broadcast_weights( sample_weight, values) values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives class BinaryTruePositivesViaControlFlow(metrics.Metric): def __init__(self, name='binary_true_positives', **kwargs): super(BinaryTruePositivesViaControlFlow, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name='tp', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) for i in range(len(y_true)): for j in range(len(y_true[i])): if y_true[i][j] and y_pred[i][j]: if sample_weight is None: self.true_positives.assign_add(1) else: self.true_positives.assign_add(sample_weight[i][0]) def result(self): if tf.constant(True): return self.true_positives return 0.0 @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CustomMetricsTest(tf.test.TestCase): def test_config(self): btp_obj = BinaryTruePositives(name='btp', dtype=tf.int32) self.assertEqual(btp_obj.name, 'btp') self.assertEqual(btp_obj.dtype, tf.int32) # Check save and restore config btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config()) self.assertEqual(btp_obj2.name, 'btp') self.assertEqual(btp_obj2.dtype, tf.int32) def test_unweighted(self): btp_obj = BinaryTruePositives() self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables)) y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]]) y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]]) update_op = btp_obj.update_state(y_true, y_pred) # pylint: disable=assignment-from-no-return self.evaluate(update_op) result = btp_obj.result() self.assertEqual(7, self.evaluate(result)) def test_weighted(self): btp_obj = BinaryTruePositives() self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables)) y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]]) y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]]) sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]]) result = btp_obj(y_true, y_pred, sample_weight=sample_weight) self.assertEqual(12, self.evaluate(result)) def test_autograph(self): metric = BinaryTruePositivesViaControlFlow() self.evaluate(tf.compat.v1.variables_initializer(metric.variables)) y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]]) y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]]) sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]]) @tf.function def compute_metric(y_true, y_pred, sample_weight): metric(y_true, y_pred, sample_weight) return metric.result() result = compute_metric(y_true, y_pred, sample_weight) self.assertEqual(12, self.evaluate(result)) def test_metric_wrappers_autograph(self): def metric_fn(y_true, y_pred): x = tf.constant(0.0) for i in range(len(y_true)): for j in range(len(y_true[i])): if tf.equal(y_true[i][j], y_pred[i][j]) and y_true[i][j] > 0: x += 1.0 return x mean_metric = metrics.MeanMetricWrapper(metric_fn) sum_metric = metrics.SumOverBatchSizeMetricWrapper(metric_fn) self.evaluate(tf.compat.v1.variables_initializer(mean_metric.variables)) self.evaluate(tf.compat.v1.variables_initializer(sum_metric.variables)) y_true = tf.constant([[0, 0, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [1, 1, 1, 0, 1]]) y_pred = tf.constant([[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]) @tf.function def tf_functioned_metric_fn(metric, y_true, y_pred): return metric(y_true, y_pred) metric_result = tf_functioned_metric_fn(mean_metric, y_true, y_pred) self.assertAllClose(self.evaluate(metric_result), 10, 1e-2) metric_result = tf_functioned_metric_fn(sum_metric, y_true, y_pred) self.assertAllClose(self.evaluate(metric_result), 10, 1e-2) def test_metric_not_tracked_as_sublayer_in_layer(self): class MyLayer(base_layer.Layer): def __init__(self, **kwargs): super(MyLayer, self).__init__(**kwargs) self.mean_obj = metrics.Mean(name='my_mean_obj') def call(self, x): self.add_metric( tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor') self.add_metric(self.mean_obj(x)) return x layer = MyLayer() x = np.ones((1, 1)) layer(x) self.assertLen(list(layer._flatten_layers(include_self=False)), 0) self.assertLen(layer.metrics, 2) def test_metric_not_tracked_as_sublayer_in_model(self): class MyModel(training_module.Model): def __init__(self, **kwargs): super(MyModel, self).__init__(**kwargs) self.mean_obj = metrics.Mean(name='my_mean_obj') def call(self, x): self.add_metric( tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor') self.add_metric(self.mean_obj(x)) return x model = MyModel() x = np.ones((1, 1)) model(x) self.assertLen(list(model._flatten_layers(include_self=False)), 0) self.assertLen(model.layers, 0) self.assertLen(model.metrics, 2) def test_invalid_custom_metric_class_error_msg(self): x = layers.Input(shape=(2,)) y = layers.Dense(3)(x) model = training_module.Model(x, y) class BadMetric(metrics.Metric): def update_state(self, y_true, y_pred, sample_weight=None): return def result(self): return with self.assertRaisesRegex(RuntimeError, 'can only be a single'): model.compile('sgd', 'mse', metrics=[BadMetric()]) model.fit(np.ones((10, 2)), np.ones((10, 3))) def test_invalid_custom_metric_fn_error_msg(self): x = layers.Input(shape=(2,)) y = layers.Dense(3)(x) model = training_module.Model(x, y) def bad_metric(y_true, y_pred, sample_weight=None): # pylint: disable=unused-argument return None def dict_metric(y_true, y_pred, sample_weight=None): # pylint: disable=unused-argument return {'value': 0.} with self.assertRaisesRegex(RuntimeError, 'The output of a metric function can only be'): model.compile('sgd', 'mse', metrics=[bad_metric]) model.fit(np.ones((10, 2)), np.ones((10, 3))) with self.assertRaisesRegex(RuntimeError, 'To return a dict of values, implement'): model.compile('sgd', 'mse', metrics=[dict_metric]) model.fit(np.ones((10, 2)), np.ones((10, 3))) def _get_model(compile_metrics): model_layers = [ layers.Dense(3, activation='relu', kernel_initializer='ones'), layers.Dense(1, activation='sigmoid', kernel_initializer='ones')] model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,)) model.compile( loss='mae', metrics=compile_metrics, optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) return model @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class ResetStatesTest(keras_parameterized.TestCase): def test_reset_state_false_positives(self): fp_obj = metrics.FalsePositives() model = _get_model([fp_obj]) x = np.ones((100, 4)) y = np.zeros((100, 1)) model.evaluate(x, y) self.assertEqual(self.evaluate(fp_obj.accumulator), 100.) model.evaluate(x, y) self.assertEqual(self.evaluate(fp_obj.accumulator), 100.) def test_reset_state_false_negatives(self): fn_obj = metrics.FalseNegatives() model = _get_model([fn_obj]) x = np.zeros((100, 4)) y = np.ones((100, 1)) model.evaluate(x, y) self.assertEqual(self.evaluate(fn_obj.accumulator), 100.) model.evaluate(x, y) self.assertEqual(self.evaluate(fn_obj.accumulator), 100.) def test_reset_state_true_negatives(self): tn_obj = metrics.TrueNegatives() model = _get_model([tn_obj]) x = np.zeros((100, 4)) y = np.zeros((100, 1)) model.evaluate(x, y) self.assertEqual(self.evaluate(tn_obj.accumulator), 100.) model.evaluate(x, y) self.assertEqual(self.evaluate(tn_obj.accumulator), 100.) def test_reset_state_true_positives(self): tp_obj = metrics.TruePositives() model = _get_model([tp_obj]) x = np.ones((100, 4)) y = np.ones((100, 1)) model.evaluate(x, y) self.assertEqual(self.evaluate(tp_obj.accumulator), 100.) model.evaluate(x, y) self.assertEqual(self.evaluate(tp_obj.accumulator), 100.) def test_reset_state_precision(self): p_obj = metrics.Precision() model = _get_model([p_obj]) x = np.concatenate((np.ones((50, 4)), np.ones((50, 4)))) y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1)))) model.evaluate(x, y) self.assertEqual(self.evaluate(p_obj.true_positives), 50.) self.assertEqual(self.evaluate(p_obj.false_positives), 50.) model.evaluate(x, y) self.assertEqual(self.evaluate(p_obj.true_positives), 50.) self.assertEqual(self.evaluate(p_obj.false_positives), 50.) def test_reset_state_recall(self): r_obj = metrics.Recall() model = _get_model([r_obj]) x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4)))) y = np.concatenate((np.ones((50, 1)), np.ones((50, 1)))) model.evaluate(x, y) self.assertEqual(self.evaluate(r_obj.true_positives), 50.) self.assertEqual(self.evaluate(r_obj.false_negatives), 50.) model.evaluate(x, y) self.assertEqual(self.evaluate(r_obj.true_positives), 50.) self.assertEqual(self.evaluate(r_obj.false_negatives), 50.) def test_reset_state_sensitivity_at_specificity(self): s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1) model = _get_model([s_obj]) x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(s_obj.true_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_negatives), 25.) self.assertEqual(self.evaluate(s_obj.true_negatives), 25.) def test_reset_state_specificity_at_sensitivity(self): s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1) model = _get_model([s_obj]) x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(s_obj.true_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_negatives), 25.) self.assertEqual(self.evaluate(s_obj.true_negatives), 25.) def test_reset_state_precision_at_recall(self): s_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1) model = _get_model([s_obj]) x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(s_obj.true_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_negatives), 25.) self.assertEqual(self.evaluate(s_obj.true_negatives), 25.) def test_reset_state_recall_at_precision(self): s_obj = metrics.RecallAtPrecision(precision=0.5, num_thresholds=1) model = _get_model([s_obj]) x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(s_obj.true_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_positives), 25.) self.assertEqual(self.evaluate(s_obj.false_negatives), 25.) self.assertEqual(self.evaluate(s_obj.true_negatives), 25.) def test_reset_state_auc(self): auc_obj = metrics.AUC(num_thresholds=3) model = _get_model([auc_obj]) x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.) def test_reset_state_auc_from_logits(self): auc_obj = metrics.AUC(num_thresholds=3, from_logits=True) model_layers = [layers.Dense(1, kernel_initializer='ones', use_bias=False)] model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,)) model.compile( loss='mae', metrics=[auc_obj], optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) x = np.concatenate((np.ones((25, 4)), -np.ones((25, 4)), -np.ones( (25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones( (25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.) def test_reset_state_auc_manual_thresholds(self): auc_obj = metrics.AUC(thresholds=[0.5]) model = _get_model([auc_obj]) x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)), np.ones((25, 4)))) y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)), np.zeros((25, 1)))) for _ in range(2): model.evaluate(x, y) self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.) self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.) def test_reset_state_mean_iou(self): m_obj = metrics.MeanIoU(num_classes=2) model = _get_model([m_obj]) x = np.asarray([[0, 0, 0, 0], [1, 1, 1, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=np.float32) y = np.asarray([[0], [1], [1], [1]], dtype=np.float32) model.evaluate(x, y) self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1) self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1) model.evaluate(x, y) self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1) self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1) def test_reset_state_recall_float64(self): # Test case for GitHub issue 36790. try: backend.set_floatx('float64') r_obj = metrics.Recall() model = _get_model([r_obj]) x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4)))) y = np.concatenate((np.ones((50, 1)), np.ones((50, 1)))) model.evaluate(x, y) self.assertEqual(self.evaluate(r_obj.true_positives), 50.) self.assertEqual(self.evaluate(r_obj.false_negatives), 50.) model.evaluate(x, y) self.assertEqual(self.evaluate(r_obj.true_positives), 50.) self.assertEqual(self.evaluate(r_obj.false_negatives), 50.) finally: backend.set_floatx('float32') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class MergeStateTest(keras_parameterized.TestCase): def test_merge_state_incompatible_metrics(self): with self.assertRaisesRegex(ValueError, 'Metric .* is not compatible with .*'): obj1 = metrics.FalsePositives() self.evaluate(tf.compat.v1.variables_initializer(obj1.variables)) obj2 = metrics.Accuracy() self.evaluate(tf.compat.v1.variables_initializer(obj2.variables)) self.evaluate(obj1.merge_state([obj2])) def test_merge_state_accuracy(self): a_objs = [] for y_true, y_pred in zip([[[1], [2]], [[3], [4]]], [[[0], [2]], [[3], [4]]]): a_obj = metrics.Accuracy() a_objs.append(a_obj) self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables)) self.evaluate(a_obj.update_state(y_true, y_pred)) self.evaluate(a_objs[0].merge_state(a_objs[1:])) self.assertEqual(self.evaluate(a_objs[0].total), 3.) self.assertEqual(self.evaluate(a_objs[0].count), 4.) self.assertEqual(self.evaluate(a_objs[0].result()), 0.75) def test_merge_state_false_positives(self): fp_objs = [] for _ in range(4): fp_obj = metrics.FalsePositives() fp_objs.append(fp_obj) self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables)) y_true = np.zeros((25, 1)) y_pred = np.ones((25, 1)) self.evaluate(fp_obj.update_state(y_true, y_pred)) self.evaluate(fp_objs[0].merge_state(fp_objs[1:])) self.assertEqual(self.evaluate(fp_objs[0].accumulator), 100.) def test_merge_state_false_negatives(self): fn_objs = [] for _ in range(4): fn_obj = metrics.FalseNegatives() fn_objs.append(fn_obj) self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables)) y_true = np.ones((25, 1)) y_pred = np.zeros((25, 1)) self.evaluate(fn_obj.update_state(y_true, y_pred)) self.evaluate(fn_objs[0].merge_state(fn_objs[1:])) self.assertEqual(self.evaluate(fn_objs[0].accumulator), 100.) def test_merge_state_true_negatives(self): tn_objs = [] for _ in range(4): tn_obj = metrics.TrueNegatives() tn_objs.append(tn_obj) self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables)) y_true = np.zeros((25, 1)) y_pred = np.zeros((25, 1)) self.evaluate(tn_obj.update_state(y_true, y_pred)) self.evaluate(tn_objs[0].merge_state(tn_objs[1:])) self.assertEqual(self.evaluate(tn_objs[0].accumulator), 100.) def test_merge_state_true_positives(self): tp_objs = [] for _ in range(4): tp_obj = metrics.TruePositives() tp_objs.append(tp_obj) self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables)) y_true = np.ones((25, 1)) y_pred = np.ones((25, 1)) self.evaluate(tp_obj.update_state(y_true, y_pred)) self.evaluate(tp_objs[0].merge_state(tp_objs[1:])) self.assertEqual(self.evaluate(tp_objs[0].accumulator), 100.) def test_merge_state_precision(self): p_objs = [] for _ in range(5): p_obj = metrics.Precision() p_objs.append(p_obj) self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables)) y_true = np.concatenate((np.ones((10, 1)), np.zeros((10, 1)))) y_pred = np.concatenate((np.ones((10, 1)), np.ones((10, 1)))) self.evaluate(p_obj.update_state(y_true, y_pred)) self.evaluate(p_objs[0].merge_state(p_objs[1:])) self.assertEqual(self.evaluate(p_objs[0].true_positives), 50.) self.assertEqual(self.evaluate(p_objs[0].false_positives), 50.) def test_merge_state_recall(self): r_objs = [] for _ in range(5): r_obj = metrics.Recall() r_objs.append(r_obj) self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables)) y_true = np.concatenate((np.ones((10, 1)), np.ones((10, 1)))) y_pred = np.concatenate((np.ones((10, 1)), np.zeros((10, 1)))) self.evaluate(r_obj.update_state(y_true, y_pred)) self.evaluate(r_objs[0].merge_state(r_objs[1:])) self.assertEqual(self.evaluate(r_objs[0].true_positives), 50.) self.assertEqual(self.evaluate(r_objs[0].false_negatives), 50.) def test_merge_state_sensitivity_at_specificity(self): sas_objs = [] for _ in range(5): sas_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1) sas_objs.append(sas_obj) self.evaluate(tf.compat.v1.variables_initializer(sas_obj.variables)) y_true = np.concatenate((np.ones((5, 1)), np.zeros((5, 1)), np.ones( (5, 1)), np.zeros((5, 1)))) y_pred = np.concatenate((np.ones((5, 1)), np.zeros( (5, 1)), np.zeros((5, 1)), np.ones((5, 1)))) self.evaluate(sas_obj.update_state(y_true, y_pred)) self.evaluate(sas_objs[0].merge_state(sas_objs[1:])) self.assertEqual(self.evaluate(sas_objs[0].true_positives), 25.) self.assertEqual(self.evaluate(sas_objs[0].false_positives), 25.) self.assertEqual(self.evaluate(sas_objs[0].false_negatives), 25.) self.assertEqual(self.evaluate(sas_objs[0].true_negatives), 25.) def test_merge_state_specificity_at_sensitivity(self): sas_objs = [] for _ in range(5): sas_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1) sas_objs.append(sas_obj) self.evaluate(tf.compat.v1.variables_initializer(sas_obj.variables)) y_true = np.concatenate((np.ones((5, 1)), np.zeros((5, 1)), np.ones( (5, 1)), np.zeros((5, 1)))) y_pred = np.concatenate((np.ones((5, 1)), np.zeros( (5, 1)), np.zeros((5, 1)), np.ones((5, 1)))) self.evaluate(sas_obj.update_state(y_true, y_pred)) self.evaluate(sas_objs[0].merge_state(sas_objs[1:])) self.assertEqual(self.evaluate(sas_objs[0].true_positives), 25.) self.assertEqual(self.evaluate(sas_objs[0].false_positives), 25.) self.assertEqual(self.evaluate(sas_objs[0].false_negatives), 25.) self.assertEqual(self.evaluate(sas_objs[0].true_negatives), 25.) def test_merge_state_precision_at_recall(self): par_objs = [] for _ in range(5): par_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1) par_objs.append(par_obj) self.evaluate(tf.compat.v1.variables_initializer(par_obj.variables)) y_true = np.concatenate((np.ones((5, 1)), np.zeros((5, 1)), np.ones( (5, 1)), np.zeros((5, 1)))) y_pred = np.concatenate((np.ones((5, 1)), np.zeros( (5, 1)), np.zeros((5, 1)), np.ones((5, 1)))) self.evaluate(par_obj.update_state(y_true, y_pred)) self.evaluate(par_objs[0].merge_state(par_objs[1:])) self.assertEqual(self.evaluate(par_objs[0].true_positives), 25.) self.assertEqual(self.evaluate(par_objs[0].false_positives), 25.) self.assertEqual(self.evaluate(par_objs[0].false_negatives), 25.) self.assertEqual(self.evaluate(par_objs[0].true_negatives), 25.) def test_merge_state_recall_at_precision(self): rap_objs = [] for _ in range(5): rap_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1) rap_objs.append(rap_obj) self.evaluate(tf.compat.v1.variables_initializer(rap_obj.variables)) y_true = np.concatenate((np.ones((5, 1)), np.zeros((5, 1)), np.ones( (5, 1)), np.zeros((5, 1)))) y_pred = np.concatenate((np.ones((5, 1)), np.zeros( (5, 1)), np.zeros((5, 1)), np.ones((5, 1)))) self.evaluate(rap_obj.update_state(y_true, y_pred)) self.evaluate(rap_objs[0].merge_state(rap_objs[1:])) self.assertEqual(self.evaluate(rap_objs[0].true_positives), 25.) self.assertEqual(self.evaluate(rap_objs[0].false_positives), 25.) self.assertEqual(self.evaluate(rap_objs[0].false_negatives), 25.) self.assertEqual(self.evaluate(rap_objs[0].true_negatives), 25.) def test_merge_state_auc(self): auc_objs = [] for _ in range(5): auc_obj = metrics.AUC(num_thresholds=3) auc_objs.append(auc_obj) self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables)) y_true = np.concatenate((np.ones((5, 1)), np.zeros((5, 1)), np.ones( (5, 1)), np.zeros((5, 1)))) y_pred = np.concatenate((np.ones((5, 1)), np.zeros( (5, 1)), np.zeros((5, 1)), np.ones((5, 1)))) self.evaluate(auc_obj.update_state(y_true, y_pred)) self.evaluate(auc_objs[0].merge_state(auc_objs[1:])) self.assertEqual(self.evaluate(auc_objs[0].true_positives[1]), 25.) self.assertEqual(self.evaluate(auc_objs[0].false_positives[1]), 25.) self.assertEqual(self.evaluate(auc_objs[0].false_negatives[1]), 25.) self.assertEqual(self.evaluate(auc_objs[0].true_negatives[1]), 25.) def test_merge_state_mean_iou(self): m_objs = [] for y_true, y_pred in zip([[0], [1], [1], [1]], [[0.5], [1.0], [1.0], [1.0]]): m_obj = metrics.MeanIoU(num_classes=2) m_objs.append(m_obj) self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables)) self.evaluate(m_obj.update_state(y_true, y_pred)) self.evaluate(m_objs[0].merge_state(m_objs[1:])) self.assertArrayNear(self.evaluate(m_objs[0].total_cm)[0], [1, 0], 1e-1) self.assertArrayNear(self.evaluate(m_objs[0].total_cm)[1], [0, 3], 1e-1) if __name__ == '__main__': tf.test.main()
104,339
39.66251
97
py
keras
keras-master/keras/backend_config_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for backend_config.""" import tensorflow.compat.v2 as tf from keras import backend from keras import backend_config from keras import combinations @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendConfigTest(tf.test.TestCase): def test_backend(self): self.assertEqual(backend.backend(), 'tensorflow') def test_epsilon(self): epsilon = 1e-2 backend_config.set_epsilon(epsilon) self.assertEqual(backend_config.epsilon(), epsilon) backend_config.set_epsilon(1e-7) self.assertEqual(backend_config.epsilon(), 1e-7) def test_floatx(self): floatx = 'float64' backend_config.set_floatx(floatx) self.assertEqual(backend_config.floatx(), floatx) backend_config.set_floatx('float32') self.assertEqual(backend_config.floatx(), 'float32') def test_image_data_format(self): image_data_format = 'channels_first' backend_config.set_image_data_format(image_data_format) self.assertEqual(backend_config.image_data_format(), image_data_format) backend_config.set_image_data_format('channels_last') self.assertEqual(backend_config.image_data_format(), 'channels_last') if __name__ == '__main__': tf.test.main()
1,906
34.314815
80
py
keras
keras-master/keras/callbacks_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras callbacks.""" import tensorflow.compat.v2 as tf import collections import csv import json import os import re import shutil import sys import threading import time import unittest from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import testing_utils from keras.callbacks import BackupAndRestore from keras.engine import sequential from keras.layers import Activation from keras.layers import Dense from keras.optimizer_v2 import gradient_descent from keras.optimizer_v2 import learning_rate_schedule from keras.utils import np_utils from tensorflow.python.platform import tf_logging as logging try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None try: import requests # pylint:disable=g-import-not-at-top except ImportError: requests = None TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 NUM_CLASSES = 2 INPUT_DIM = 3 NUM_HIDDEN = 5 BATCH_SIZE = 5 CALLBACK_HOOKS = [ 'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end', 'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin', 'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end', 'on_test_begin', 'on_test_end', 'on_train_batch_begin', 'on_train_batch_end', 'on_train_begin', 'on_train_end' ] class Counter(keras.callbacks.Callback): """Counts the number of times each callback method was run. Attributes: method_counts: dict. Contains the counts of time each callback method was run. """ def __init__(self): self.method_counts = collections.defaultdict(int) for method_name in CALLBACK_HOOKS: setattr(self, method_name, self.wrap_with_counts(method_name, getattr(self, method_name))) def wrap_with_counts(self, method_name, method): def _call_and_count(*args, **kwargs): self.method_counts[method_name] += 1 return method(*args, **kwargs) return _call_and_count class CallAllHooks(keras.callbacks.Callback): """A callback that calls self._run for all hooks""" def __init__(self): for method_name in CALLBACK_HOOKS: setattr(self, method_name, self._run) def _run(self, *args, logs=None): raise NotImplementedError def _get_numpy(): return np.ones((10, 10)), np.ones((10, 1)) def _get_sequence(): class MySequence(keras.utils.data_utils.Sequence): def __getitem__(self, _): return np.ones((2, 10)), np.ones((2, 1)) def __len__(self): return 5 return MySequence(), None @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class CallbackCountsTest(keras_parameterized.TestCase): def _check_counts(self, counter, expected_counts): """Checks that the counts registered by `counter` are those expected.""" for method_name, expected_count in expected_counts.items(): self.assertEqual( counter.method_counts[method_name], expected_count, msg='For method {}: expected {}, got: {}'.format( method_name, expected_count, counter.method_counts[method_name])) def _get_model(self): layers = [ keras.layers.Dense(10, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ] model = testing_utils.get_model_from_layers(layers, input_shape=(10,)) model.compile( tf.compat.v1.train.AdamOptimizer(0.001), 'binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly()) return model @parameterized.named_parameters(('with_numpy', _get_numpy()), ('with_sequence', _get_sequence())) def test_callback_hooks_are_called_in_fit(self, data): if not tf.executing_eagerly(): self.skipTest('Behavior changed in v2.') x, y = data val_x, val_y = np.ones((4, 10)), np.ones((4, 1)) model = self._get_model() counter = Counter() model.fit( x, y, validation_data=(val_x, val_y), batch_size=2, steps_per_epoch=5, epochs=5, callbacks=[counter]) self._check_counts( counter, { 'on_batch_begin': 25, 'on_batch_end': 25, 'on_epoch_begin': 5, 'on_epoch_end': 5, 'on_predict_batch_begin': 0, 'on_predict_batch_end': 0, 'on_predict_begin': 0, 'on_predict_end': 0, 'on_test_batch_begin': 10, 'on_test_batch_end': 10, 'on_test_begin': 5, 'on_test_end': 5, 'on_train_batch_begin': 25, 'on_train_batch_end': 25, 'on_train_begin': 1, 'on_train_end': 1 }) @parameterized.named_parameters(('with_numpy', _get_numpy()), ('with_sequence', _get_sequence())) def test_callback_hooks_are_called_in_evaluate(self, data): x, y = data is_sequence = isinstance(x, keras.utils.data_utils.Sequence) model = self._get_model() counter = Counter() model.evaluate( x, y, batch_size=2 if not is_sequence else None, steps=5 if is_sequence else None, callbacks=[counter]) self._check_counts( counter, { 'on_test_batch_begin': 5, 'on_test_batch_end': 5, 'on_test_begin': 1, 'on_test_end': 1 }) @parameterized.named_parameters(('with_numpy', _get_numpy()), ('with_sequence', _get_sequence())) def test_callback_hooks_are_called_in_predict(self, data): x = data[0] is_sequence = isinstance(x, keras.utils.data_utils.Sequence) model = self._get_model() counter = Counter() model.predict( x, batch_size=2 if not is_sequence else None, steps=5 if is_sequence else None, callbacks=[counter]) self._check_counts( counter, { 'on_predict_batch_begin': 5, 'on_predict_batch_end': 5, 'on_predict_begin': 1, 'on_predict_end': 1 }) def test_callback_list_methods(self): counter = Counter() callback_list = keras.callbacks.CallbackList([counter]) batch = 0 callback_list.on_test_batch_begin(batch) callback_list.on_test_batch_end(batch) callback_list.on_predict_batch_begin(batch) callback_list.on_predict_batch_end(batch) self._check_counts( counter, { 'on_test_batch_begin': 1, 'on_test_batch_end': 1, 'on_predict_batch_begin': 1, 'on_predict_batch_end': 1 }) class KerasCallbacksTest(keras_parameterized.TestCase): def _get_model(self, input_shape=None, additional_metrics=None): additional_metrics = additional_metrics or [] layers = [ keras.layers.Dense(3, activation='relu'), keras.layers.Dense(2, activation='softmax') ] model = testing_utils.get_model_from_layers(layers, input_shape=input_shape) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')] + additional_metrics, run_eagerly=testing_utils.should_run_eagerly()) return model @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_progbar_logging(self): model = self._get_model(input_shape=(3,)) x = tf.ones((200, 3)) y = tf.zeros((200, 2)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) expected_log = r'(.*- loss:.*- my_acc:.*)+' with self.captureWritesToStream(sys.stdout) as printed: model.fit(dataset, epochs=2, steps_per_epoch=10) self.assertRegex(printed.contents(), expected_log) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_progbar_logging_with_stateful_metrics(self): class AddAllOnes(keras.metrics.Metric): """A simple metric that adds all the one's in `y_true`.""" def __init__(self, name='add_all_ones', **kwargs): super(AddAllOnes, self).__init__(name=name, **kwargs) self.total = self.add_weight(name='total', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): self.total.assign_add( tf.cast(tf.reduce_sum(y_true), dtype=tf.float32)) def result(self): return self.total x_train = np.array([[0, 1, 0, 1, 0, 1, 0, 1]] * 8).astype(float) y_train = np.array([[1, 0], [0, 0], [1, 1], [1, 0], [0, 1], [1, 0], [1, 0], [0, 0]]) # There are 7 ones in total in `y_train` after two batches. expected_log = r'(.*- loss:.*- my_acc:.*- add_all_ones: 7.0000)+' with self.captureWritesToStream(sys.stdout) as printed: model = self._get_model( input_shape=(8,), additional_metrics=[AddAllOnes()]) model.fit(x_train, y_train, verbose=1, batch_size=4, shuffle=False) self.assertRegex(printed.contents(), expected_log) # When not executing eagerly, `model.evaluate` does not have the metrics # results printed. if tf.executing_eagerly(): with self.captureWritesToStream(sys.stdout) as printed: model = self._get_model( input_shape=(8,), additional_metrics=[AddAllOnes()]) model.evaluate(x_train, y_train, verbose=1, batch_size=4) self.assertRegex(printed.contents(), expected_log) @keras_parameterized.run_all_keras_modes def test_trivial_backup_restore(self): if testing_utils.should_run_eagerly(): model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') cbk = BackupAndRestore(self.get_temp_dir()) model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=0, callbacks=[cbk]) def test_backup_restore_train_counter(self): model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') cbk = BackupAndRestore(self.get_temp_dir()) class InterruptingCallback(keras.callbacks.Callback): """A callback to intentionally introduce interruption to training.""" def on_epoch_end(self, epoch, log=None): logging.info(f'counter: {model._train_counter}') if epoch == 5 or epoch == 12: raise RuntimeError('Interruption') log_dir = self.get_temp_dir() # The following asserts that the train counter is fault tolerant. self.assertEqual(model._train_counter.numpy(), 0) try: model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20, callbacks=[cbk, InterruptingCallback()]) except RuntimeError: pass self.assertEqual(model._train_counter.numpy(), 6) try: model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20, callbacks=[cbk, InterruptingCallback()]) except RuntimeError: pass self.assertEqual(model._train_counter.numpy(), 13) @keras_parameterized.run_all_keras_modes def test_callback_warning(self): class SleepCallback(keras.callbacks.Callback): def on_train_batch_end(self, batch, logs=None): time.sleep(0.1) model = sequential.Sequential() model.add(keras.layers.Dense(1)) model.compile( 'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) warning_messages = [] def warning(msg): warning_messages.append(msg) with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning): model.fit( np.ones((16, 1), 'float32'), np.ones((16, 1), 'float32'), batch_size=3, epochs=1, callbacks=[SleepCallback()]) warning_msg = ('Callback method `on_train_batch_end` is slow compared ' 'to the batch time') self.assertIn(warning_msg, '\n'.join(warning_messages)) @keras_parameterized.run_all_keras_modes def test_default_callbacks_no_warning(self): # Test that without the callback no warning is raised model = sequential.Sequential() model.add(keras.layers.Dense(1)) model.compile( 'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) warning_messages = [] def warning(msg): warning_messages.append(msg) with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning): model.fit( np.ones((16, 1), 'float32'), np.ones((16, 1), 'float32'), batch_size=3, epochs=1) self.assertListEqual(warning_messages, []) @keras_parameterized.run_with_all_model_types(exclude_models='functional') @keras_parameterized.run_all_keras_modes def test_progbar_logging_deferred_model_build(self): model = self._get_model() self.assertFalse(model.built) x = tf.ones((200, 3)) y = tf.zeros((200, 2)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) expected_log = r'(.*- loss:.*- my_acc:.*)+' with self.captureWritesToStream(sys.stdout) as printed: model.fit(dataset, epochs=2, steps_per_epoch=10) self.assertRegex(printed.contents(), expected_log) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_progbar_logging_validation_data(self): model = self._get_model(input_shape=(3,)) x = tf.ones((50, 3)) y = tf.zeros((50, 2)) training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+' with self.captureWritesToStream(sys.stdout) as printed: model.fit(training_dataset, epochs=2, validation_data=val_dataset) self.assertRegex(printed.contents(), expected_log) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_progbar_logging_validation_split(self): model = self._get_model(input_shape=(3,)) x = np.ones((100, 3)) y = np.zeros((100, 2)) expected_log = ( r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:' r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*') with self.captureWritesToStream(sys.stdout) as printed: model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2) self.assertRegex(printed.contents(), expected_log) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_progbar_logging_training_validation(self): model = self._get_model(input_shape=(2,)) def generator(): for _ in range(100): yield [1, 1], 1 training = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) \ .repeat() validation = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) expected_log = ( r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:' r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*') with self.captureWritesToStream(sys.stdout) as printed: model.fit( x=training, validation_data=validation, epochs=2, steps_per_epoch=20) self.assertRegex(printed.contents(), expected_log) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_progbar_logging_with_dataset_and_partial_batch(self): model = self._get_model(input_shape=(2,)) def generator(): # Have a partial batch at the end. for _ in range(9): yield np.random.random(2), 1 training = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) validation = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) with self.captureWritesToStream(sys.stdout) as printed: model.fit(x=training, validation_data=validation) # Make sure the value of val_ metrics are not zeros. log_content = printed.contents() val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content) self.assertLen(val_loss, 1) self.assertGreater(float(val_loss[0]), 0.0) @keras_parameterized.run_with_all_model_types def test_ModelCheckpoint(self): if h5py is None: return # Skip test if models cannot be saved. model_type = testing_utils.get_model_type() if model_type == 'subclass': return # Skip test since subclassed models cannot be saved in .h5 format. layers = [ keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'), keras.layers.Dense(NUM_CLASSES, activation='softmax') ] model = testing_utils.get_model_from_layers(layers, input_shape=(3,)) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) filepath = os.path.join(temp_dir, 'checkpoint.h5') (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) # Case 1 monitor = 'val_loss' save_best_only = False mode = 'auto' cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 2 mode = 'min' cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 3 mode = 'max' monitor = 'val_acc' cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 4 save_best_only = True cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 5: metric not available. cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor='unknown', save_best_only=True) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) # File won't be written. assert not os.path.exists(filepath) # Case 6 save_best_only = False period = 2 mode = 'auto' filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, period=period) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=4, verbose=1) assert os.path.exists(filepath.format(epoch=2)) assert os.path.exists(filepath.format(epoch=4)) os.remove(filepath.format(epoch=2)) os.remove(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=3)) # Invalid use: this will raise a warning but not an Exception. keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode='unknown') # Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`. # Though `period` is deprecated, we're testing it for # backward-compatibility. filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5) ] assert not os.path.exists(filepath.format(epoch=0)) assert not os.path.exists(filepath.format(epoch=5)) model.fit( x_train, y_train, batch_size=2, validation_data=(x_test, y_test), callbacks=cbks, epochs=10, verbose=1) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=2)) assert not os.path.exists(filepath.format(epoch=3)) assert not os.path.exists(filepath.format(epoch=4)) assert os.path.exists(filepath.format(epoch=5)) assert not os.path.exists(filepath.format(epoch=6)) assert os.path.exists(filepath.format(epoch=10)) os.remove(filepath.format(epoch=5)) os.remove(filepath.format(epoch=10)) # Case 8: `ModelCheckpoint` with an integer `save_freq` filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq=15, period=100) # The period should be ignored (this test tests this). ] assert not os.path.exists(filepath.format(epoch=3)) model.fit( x_train, y_train, batch_size=2, validation_data=(x_test, y_test), callbacks=cbks, epochs=10, verbose=1) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=2)) assert os.path.exists(filepath.format(epoch=3)) assert not os.path.exists(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=5)) assert os.path.exists(filepath.format(epoch=6)) assert not os.path.exists(filepath.format(epoch=7)) assert not os.path.exists(filepath.format(epoch=8)) assert os.path.exists(filepath.format(epoch=9)) os.remove(filepath.format(epoch=3)) os.remove(filepath.format(epoch=6)) os.remove(filepath.format(epoch=9)) # Case 9: `ModelCheckpoint` with valid and invalid save_freq argument. with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'): keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq='invalid_save_freq') # The following should not raise ValueError. keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq='epoch') keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq=3) # Case 10: `ModelCheckpoint` with valid and invalid `options` argument. with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'): keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=True, mode=mode, options=tf.saved_model.SaveOptions()) with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'): keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=False, mode=mode, options=tf.train.CheckpointOptions()) keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=True, mode=mode, options=tf.train.CheckpointOptions()) keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=False, mode=mode, options=tf.saved_model.SaveOptions()) # Case 11: `ModelCheckpoint` save model with batch number in filename. filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}batch{batch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint(filepath, monitor=monitor, save_freq=1) ] assert not os.path.exists(filepath.format(epoch=1, batch=1)) assert not os.path.exists(filepath.format(epoch=1, batch=2)) assert not os.path.exists(filepath.format(epoch=2, batch=1)) assert not os.path.exists(filepath.format(epoch=2, batch=2)) assert not os.path.exists(filepath.format(epoch=3, batch=1)) assert not os.path.exists(filepath.format(epoch=3, batch=2)) assert not os.path.exists(filepath.format(epoch=4, batch=1)) assert not os.path.exists(filepath.format(epoch=4, batch=2)) assert not os.path.exists(filepath.format(epoch=5, batch=1)) assert not os.path.exists(filepath.format(epoch=5, batch=2)) model.fit( x_train, y_train, batch_size=5, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=1) assert os.path.exists(filepath.format(epoch=1, batch=1)) assert os.path.exists(filepath.format(epoch=1, batch=2)) assert os.path.exists(filepath.format(epoch=2, batch=1)) assert os.path.exists(filepath.format(epoch=2, batch=2)) assert os.path.exists(filepath.format(epoch=3, batch=1)) assert os.path.exists(filepath.format(epoch=3, batch=2)) assert os.path.exists(filepath.format(epoch=4, batch=1)) assert os.path.exists(filepath.format(epoch=4, batch=2)) assert os.path.exists(filepath.format(epoch=5, batch=1)) assert os.path.exists(filepath.format(epoch=5, batch=2)) os.remove(filepath.format(epoch=1, batch=1)) os.remove(filepath.format(epoch=1, batch=2)) os.remove(filepath.format(epoch=2, batch=1)) os.remove(filepath.format(epoch=2, batch=2)) os.remove(filepath.format(epoch=3, batch=1)) os.remove(filepath.format(epoch=3, batch=2)) os.remove(filepath.format(epoch=4, batch=1)) os.remove(filepath.format(epoch=4, batch=2)) os.remove(filepath.format(epoch=5, batch=1)) os.remove(filepath.format(epoch=5, batch=2)) @testing_utils.run_v2_only def test_ModelCheckpoint_subclass_save_weights_false(self): model = testing_utils.get_small_subclass_mlp(NUM_HIDDEN, NUM_CLASSES) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) filepath = os.path.join(temp_dir, 'checkpoint') cbks = [keras.callbacks.ModelCheckpoint( filepath, save_weights_only=False)] (x_train, y_train), _ = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_train = np_utils.to_categorical(y_train, num_classes=NUM_CLASSES) model.fit( x_train, y_train, callbacks=cbks, epochs=1, verbose=0) # Check that the filepath is a SavedModel directory. self.assertIn('saved_model.pb', os.listdir(filepath)) def _get_dummy_resource_for_model_checkpoint_testing(self): def get_input_datasets(): # Simple training input. train_input = [[1.]] * 16 train_label = [[0.]] * 16 ds = tf.data.Dataset.from_tensor_slices((train_input, train_label)) return ds.batch(8, drop_remainder=True) # Very simple bias model to eliminate randomness. optimizer = gradient_descent.SGD(0.1) model = sequential.Sequential() model.add(testing_utils.Bias(input_shape=(1,))) model.compile(loss='mae', optimizer=optimizer, metrics=['mae']) train_ds = get_input_datasets() temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5') # The filepath shouldn't exist at the beginning. self.assertFalse(os.path.exists(filepath)) callback = keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=True) return model, train_ds, callback, filepath def _run_load_weights_on_restart_test_common_iterations(self): (model, train_ds, callback, filepath) = self._get_dummy_resource_for_model_checkpoint_testing() initial_epochs = 3 model.fit(train_ds, epochs=initial_epochs, callbacks=[callback]) # The files should exist after fitting with callback. for epoch in range(initial_epochs): self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1))) self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1))) self.assertEqual( callback._get_most_recently_modified_file_matching_pattern(filepath), filepath.format(epoch=initial_epochs)) model.fit(train_ds, epochs=1) weights_after_one_more_epoch = model.get_weights() # The filepath should continue to exist after fitting without callback. for epoch in range(initial_epochs): self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1))) return model, train_ds, filepath, weights_after_one_more_epoch @staticmethod def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only): def func(self): (model, train_ds, filepath, weights_after_one_more_epoch ) = self._run_load_weights_on_restart_test_common_iterations() # Sleep for some short time period ensuring the files are created with # a different time (in MacOS OSS the granularity is only 1 second). time.sleep(2) callback = keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=save_weights_only, load_weights_on_restart=True) model.fit(train_ds, epochs=1, callbacks=[callback]) weights_after_model_restoring_and_one_more_epoch = model.get_weights() self.assertEqual( callback._get_most_recently_modified_file_matching_pattern(filepath), filepath.format(epoch=1)) model.fit( train_ds, epochs=1, callbacks=[ keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=save_weights_only, load_weights_on_restart=True) ]) weights_with_one_final_extra_epoch = model.get_weights() # Asserting the weights one epoch after initial fitting and another epoch # after that are closed, if a ModelCheckpoint with # load_weights_on_restart=True is given (so the model is restored at the # beginning of training). self.assertAllClose(weights_after_one_more_epoch, weights_after_model_restoring_and_one_more_epoch) self.assertNotAllClose(weights_after_one_more_epoch, weights_with_one_final_extra_epoch) return func @staticmethod def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only): def func(self): (model, train_ds, filepath, weights_after_one_more_epoch ) = self._run_load_weights_on_restart_test_common_iterations() model.fit( train_ds, epochs=1, callbacks=[ keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=save_weights_only) ]) weights_after_model_restoring_and_one_more_epoch = model.get_weights() # Asserting the weights one epoch after initial fitting and another epoch # after that are different, if a ModelCheckpoint with # load_weights_on_restart=False is given (so the model is not restored at # the beginning of training). self.assertNotAllClose(weights_after_one_more_epoch, weights_after_model_restoring_and_one_more_epoch) return func test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \ get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True) test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \ get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False) test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \ get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True) test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \ = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False) def test_ModelCheckpoint_override_if_file_exist(self): (model, train_ds, filepath, _) = self._run_load_weights_on_restart_test_common_iterations() # Sleep for some short time period to ensure the files are created with # a different time (in MacOS OSS the granularity is only 1 second). time.sleep(2) callback = keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=True) model.load_weights( callback._get_most_recently_modified_file_matching_pattern(filepath)) weights_before_additional_fit = model.get_weights() model.fit(train_ds, epochs=1, callbacks=[callback]) model.load_weights( callback._get_most_recently_modified_file_matching_pattern(filepath)) weights_after_additional_fit = model.get_weights() self.assertNotAllClose(weights_before_additional_fit, weights_after_additional_fit) def test_fit_with_ModelCheckpoint_with_tf_config(self): (model, train_ds, callback, _) = self._get_dummy_resource_for_model_checkpoint_testing() os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ['localhost:23333'] }, 'task': { 'type': 'worker', 'index': 0 } }) # `model.fit()` should work regardless of the presence of `TF_CONFIG`. model.fit(train_ds, epochs=1, callbacks=[callback]) def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self): (model, train_ds, callback, filepath) = self._get_dummy_resource_for_model_checkpoint_testing() temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'temp.h5') self.assertFalse(os.path.exists(filepath)) os.mkdir(filepath) self.assertTrue(os.path.exists(filepath)) callback = keras.callbacks.ModelCheckpoint(filepath=filepath) with self.assertRaisesRegex( IOError, 'Please specify a non-directory ' 'filepath for ModelCheckpoint.'): model.fit(train_ds, epochs=1, callbacks=[callback]) def test_ModelCheckpoint_with_bad_path_placeholders(self): (model, train_ds, callback, filepath) = self._get_dummy_resource_for_model_checkpoint_testing() temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5') callback = keras.callbacks.ModelCheckpoint(filepath=filepath) with self.assertRaisesRegex(KeyError, 'Failed to format this callback ' 'filepath.*'): model.fit(train_ds, epochs=1, callbacks=[callback]) def test_ModelCheckpoint_nonblocking(self): filepath = self.get_temp_dir() # Should only cause a sync block when saving is actually performed. callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100) self.assertTrue(callback._supports_tf_logs) model = keras.Sequential([keras.layers.Dense(1)]) cb_list = keras.callbacks.CallbackList([callback], model=model, epochs=1, steps=10, verbose=0) tensor = tf.convert_to_tensor(1.) def mock_numpy(): raise RuntimeError( 'If this error is seen, ModelCheckpoint is causing a blocking ' 'NumPy conversion even when not checkpointing.') tensor.numpy = mock_numpy logs = {'metric': tensor} cb_list.on_train_begin(logs) cb_list.on_epoch_begin(0, logs) cb_list.on_train_batch_begin(0, logs) cb_list.on_train_batch_end(0, logs) cb_list.on_epoch_end(0, logs) cb_list.on_train_end(logs) cb_list.on_test_begin(logs) cb_list.on_test_batch_begin(0, logs) cb_list.on_test_batch_end(0, logs) cb_list.on_test_end(logs) cb_list.on_predict_begin(logs) cb_list.on_predict_batch_begin(logs) cb_list.on_predict_batch_end(logs) cb_list.on_predict_end(logs) def test_verbose_2_logging(self): data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = keras.models.Sequential((keras.layers.Dense( 1, input_dim=1, activation='relu'), keras.layers.Dense( 1, activation='sigmoid'),)) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) expected_log = r'(.*- loss:.*- accuracy:.*epoch)+' with self.captureWritesToStream(sys.stdout) as printed: model.fit(data, labels, verbose=2, epochs=20) self.assertRegex(printed.contents(), expected_log) def test_ProgbarLogger_verbose_2_nonblocking(self): # Should only cause a sync block on epoch end methods. callback = keras.callbacks.ProgbarLogger(count_mode='steps') self.assertTrue(callback._supports_tf_logs) model = keras.Sequential([keras.layers.Dense(1)]) cb_list = keras.callbacks.CallbackList([callback], model=model, epochs=1, steps=10, verbose=2) tensor = tf.convert_to_tensor(1.) def mock_numpy(): raise RuntimeError( 'If this error is seen, ModelCheckpoint is causing a blocking ' 'NumPy conversion even when not checkpointing.') tensor.numpy = mock_numpy logs = {'metric': tensor} cb_list.on_train_begin(logs) cb_list.on_epoch_begin(0, logs) cb_list.on_train_batch_begin(0, logs) cb_list.on_train_batch_end(0, logs) cb_list.on_test_begin(logs) cb_list.on_test_batch_begin(0, logs) cb_list.on_test_batch_end(0, logs) cb_list.on_test_end(logs) with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'): # on_epoch_end should still block. cb_list.on_epoch_end(0, logs) cb_list.on_train_end(logs) def test_EarlyStopping(self): with self.cached_session(): np.random.seed(123) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = testing_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) cases = [ ('max', 'val_acc'), ('min', 'val_loss'), ('auto', 'val_acc'), ('auto', 'loss'), ('unknown', 'unknown') ] for mode, monitor in cases: patience = 0 cbks = [ keras.callbacks.EarlyStopping( patience=patience, monitor=monitor, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=0) def test_EarlyStopping_reuse(self): with self.cached_session(): np.random.seed(1337) patience = 3 data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = keras.models.Sequential((keras.layers.Dense( 1, input_dim=1, activation='relu'), keras.layers.Dense( 1, activation='sigmoid'),)) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) weights = model.get_weights() # This should allow training to go for at least `patience` epochs model.set_weights(weights) stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience) hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) >= patience def test_EarlyStopping_with_baseline(self): with self.cached_session(): np.random.seed(1337) baseline = 0.6 (data, labels), _ = testing_utils.get_test_data( train_samples=100, test_samples=50, input_shape=(1,), num_classes=NUM_CLASSES) model = testing_utils.get_small_sequential_mlp( num_hidden=1, num_classes=1, input_dim=1) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['acc']) stopper = keras.callbacks.EarlyStopping(monitor='acc', baseline=baseline) hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) == 2 patience = 3 stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience, baseline=baseline) hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) >= patience def test_EarlyStopping_final_weights_when_restoring_model_weights(self): class DummyModel: def __init__(self): self.stop_training = False self.weights = -1 def get_weights(self): return self.weights def set_weights(self, weights): self.weights = weights def set_weight_to_epoch(self, epoch): self.weights = epoch early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) early_stop.model = DummyModel() losses = [0.2, 0.15, 0.1, 0.11, 0.12] # The best configuration is in the epoch 2 (loss = 0.1000). epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.model.set_weight_to_epoch(epoch=epoch) early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break # The best configuration is in epoch 2 (loss = 0.1000), # and while patience = 2, we're restoring the best weights, # so we end up at the epoch with the best weights, i.e. epoch 2 self.assertEqual(early_stop.model.get_weights(), 2) # Check early stopping when no model beats the baseline. early_stop = keras.callbacks.EarlyStopping( monitor='val_loss', patience=5, baseline=0.5, restore_best_weights=True) early_stop.model = DummyModel() losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73] # The best configuration is in the epoch 2 (loss = 0.7000). epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.model.set_weight_to_epoch(epoch=epoch) early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break # No epoch improves on the baseline, so we should train for only 5 epochs, # and restore the second model. self.assertEqual(epochs_trained, 5) self.assertEqual(early_stop.model.get_weights(), 2) def test_RemoteMonitor(self): if requests is None: self.skipTest('`requests` required to run this test') return None monitor = keras.callbacks.RemoteMonitor() # This will raise a warning since the default address in unreachable: monitor.on_epoch_end(0, logs={'loss': 0.}) def test_LearningRateScheduler(self): with self.cached_session(): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = testing_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=0) assert ( float(keras.backend.get_value( model.optimizer.lr)) - 0.2) < keras.backend.epsilon() cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)] model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) assert ( float(keras.backend.get_value( model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon() cbks = [ keras.callbacks.LearningRateScheduler( lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2) (epoch)) ] model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2))) decayed_learning_rate = 0.01 * cosine_decay_np assert (float(keras.backend.get_value(model.optimizer.lr)) - decayed_learning_rate) < keras.backend.epsilon() def test_ReduceLROnPlateau(self): with self.cached_session(): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): tf.compat.v1.set_random_seed(1234) np.random.seed(1337) model = testing_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.SGD(lr=0.1)) return model # TODO(psv): Make sure the callback works correctly when min_delta is # set as 0. Test fails when the order of this callback and assertion is # interchanged. model = make_model() cbks = [ keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) self.assertAllClose( float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4) model = make_model() # This should reduce the LR after the first epoch (due to high epsilon). cbks = [ keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=2) self.assertAllClose( float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4) def test_ReduceLROnPlateau_patience(self): class DummyOptimizer: def __init__(self): self.lr = keras.backend.variable(1.0) class DummyModel: def __init__(self): self.optimizer = DummyOptimizer() reduce_on_plateau = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', patience=2) reduce_on_plateau.model = DummyModel() losses = [0.0860, 0.1096, 0.1040] lrs = [] for epoch in range(len(losses)): reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr)) # The learning rates should be 1.0 except the last one for lr in lrs[:-1]: self.assertEqual(lr, 1.0) self.assertLess(lrs[-1], 1.0) def test_ReduceLROnPlateau_backwards_compatibility(self): with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13) self.assertRegex( str(mock_log.call_args), '`epsilon` argument is deprecated') self.assertFalse(hasattr(reduce_on_plateau, 'epsilon')) self.assertTrue(hasattr(reduce_on_plateau, 'min_delta')) self.assertEqual(reduce_on_plateau.min_delta, 1e-13) def test_CSVLogger(self): with self.cached_session(): np.random.seed(1337) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) filepath = os.path.join(temp_dir, 'log.tsv') sep = '\t' (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = testing_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.SGD(lr=0.1), metrics=['accuracy']) return model # case 1, create new file with defined separator model = make_model() cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) with open(filepath) as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) assert dialect.delimiter == sep del model del cbks # case 2, append data to existing file, skip header model = make_model() cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) # case 3, reuse of CSVLogger object model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) with open(filepath) as csvfile: list_lines = csvfile.readlines() for line in list_lines: assert line.count(sep) == 4 assert len(list_lines) == 5 output = ' '.join(list_lines) assert len(re.findall('epoch', output)) == 1 os.remove(filepath) def test_stop_training_csv(self): # Test that using the CSVLogger callback with the TerminateOnNaN callback # does not result in invalid CSVs. np.random.seed(1337) tmpdir = self.get_temp_dir() self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True) with self.cached_session(): fp = os.path.join(tmpdir, 'test.csv') (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)] model = keras.models.Sequential() for _ in range(5): model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu')) model.add(keras.layers.Dense(NUM_CLASSES, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(x_train) // BATCH_SIZE tot = 0 while 1: if tot > 3 * len(x_train): yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan, np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan) else: yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE], y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]) i += 1 tot += 1 i %= max_batch_index history = model.fit_generator(data_generator(), len(x_train) // BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in csv.reader(f): # In windows, due to \r\n line ends we may end up reading empty lines # after each line. Skip empty lines. if x: values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_TerminateOnNaN(self): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [keras.callbacks.TerminateOnNaN()] model = keras.models.Sequential() initializer = keras.initializers.Constant(value=1e5) for _ in range(5): model.add( keras.layers.Dense( 2, input_dim=INPUT_DIM, activation='relu', kernel_initializer=initializer)) model.add(keras.layers.Dense(NUM_CLASSES)) model.compile(loss='mean_squared_error', optimizer='rmsprop') history = model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] self.assertEqual(len(loss), 1) self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0])) @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') def test_LambdaCallback(self): with self.cached_session(): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = keras.models.Sequential() model.add( keras.layers.Dense( NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # Start an arbitrary process that should run during model # training and be terminated after training has completed. e = threading.Event() def target(): e.wait() t = threading.Thread(target=target) t.start() cleanup_callback = keras.callbacks.LambdaCallback( on_train_end=lambda logs: e.set()) cbks = [cleanup_callback] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=0) t.join() assert not t.is_alive() def test_RemoteMonitor_np_array(self): if requests is None: self.skipTest('`requests` required to run this test') with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post: monitor = keras.callbacks.RemoteMonitor(send_as_json=True) a = np.arange(1) # a 1 by 1 array logs = {'loss': 0., 'val': a} monitor.on_epoch_end(0, logs=logs) send = {'loss': 0., 'epoch': 0, 'val': 0} requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers) def test_RemoteMonitor_np_float32(self): if requests is None: self.skipTest('`requests` required to run this test') with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post: monitor = keras.callbacks.RemoteMonitor(send_as_json=True) a = np.float32(1.0) # a float32 generic type logs = {'loss': 0., 'val': a} monitor.on_epoch_end(0, logs=logs) send = {'loss': 0., 'epoch': 0, 'val': 1.0} requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers) def test_RemoteMonitorWithJsonPayload(self): if requests is None: self.skipTest('`requests` required to run this test') return None with self.cached_session(): (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = keras.utils.np_utils.to_categorical(y_test) y_train = keras.utils.np_utils.to_categorical(y_train) model = keras.models.Sequential() model.add( keras.layers.Dense( NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)] with tf.compat.v1.test.mock.patch.object(requests, 'post'): model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1) def test_progbar_infers_steps(self): x, y = np.ones((10, 1)), np.ones((10, 1)) data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2) data = data.filter(lambda x, y: True) # Unknown cardinality. progbar = keras.callbacks.ProgbarLogger('steps') model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') self.assertIsNone(progbar.target) model.fit(data, epochs=2, callbacks=[progbar]) self.assertEqual(progbar.target, 5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_callback_passed_floats(self): class MyCallback(keras.callbacks.Callback): def on_batch_end(self, batch, logs=None): assert isinstance(batch, int) assert isinstance(logs['loss'], float) self.on_batch_end_called = True def on_epoch_end(self, batch, logs=None): assert isinstance(batch, int) assert isinstance(logs['loss'], float) self.on_epoch_end_called = True x, y = np.ones((10, 1)), np.ones((10, 1)) model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) callback = MyCallback() model.fit(x, y, epochs=2, callbacks=[callback]) self.assertTrue(callback.on_batch_end_called) self.assertTrue(callback.on_batch_end_called) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_implements_batch_hooks(self): class MyCallbackWithBatchHooks(keras.callbacks.Callback): def __init__(self): self.train_batches = 0 self.test_batches = 0 self.predict_batches = 0 def on_train_batch_end(self, batch, logs=None): self.train_batches += 1 def on_test_batch_end(self, batch, logs=None): self.test_batches += 1 def on_predict_batch_end(self, batch, logs=None): self.predict_batches += 1 class MyCallbackWithTFBatchHooks(keras.callbacks.Callback): def __init__(self): super(MyCallbackWithTFBatchHooks, self).__init__() self._supports_tf_logs = True class MyCallbackWithoutBatchHooks(keras.callbacks.Callback): def __init__(self): self.epochs = 0 def on_epoch_end(self, epoch, logs=None): self.epochs += 1 x, y = np.ones((10, 1)), np.ones((10, 1)) model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') my_cb = MyCallbackWithBatchHooks() cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertTrue(cb_list._should_call_train_batch_hooks) self.assertTrue(cb_list._should_call_test_batch_hooks) self.assertTrue(cb_list._should_call_predict_batch_hooks) self.assertFalse(cb_list._batch_hooks_support_tf_logs) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) self.assertEqual(my_cb.train_batches, 2) self.assertEqual(my_cb.test_batches, 1) self.assertEqual(my_cb.predict_batches, 1) my_cb = MyCallbackWithTFBatchHooks() cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertTrue(cb_list._batch_hooks_support_tf_logs) my_cb = MyCallbackWithoutBatchHooks() cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertLen(cb_list.callbacks, 1) self.assertFalse(cb_list._should_call_train_batch_hooks) self.assertFalse(cb_list._should_call_test_batch_hooks) self.assertFalse(cb_list._should_call_predict_batch_hooks) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_logs_conversion(self): assert_dict_equal = self.assertDictEqual class MutateNumpyLogs(CallAllHooks): def _run(self, *args, logs=None): logs = logs or args[-1] logs['numpy'] = 1 class MutateTensorFlowLogs(CallAllHooks): def __init__(self): super(MutateTensorFlowLogs, self).__init__() self._supports_tf_logs = True def _run(self, *args, logs=None): logs = logs or args[-1] logs['tf'] = 2 class AssertNumpyLogs(CallAllHooks): def _run(self, *args, logs=None): logs = logs or args[-1] assert_dict_equal(logs, {'all': 0, 'numpy': 1, 'tf': 2}) class AssertTensorFlowLogs(AssertNumpyLogs): def __init__(self): super(AssertTensorFlowLogs, self).__init__() self._supports_tf_logs = True cb_list = keras.callbacks.CallbackList([ MutateNumpyLogs(), MutateTensorFlowLogs(), AssertNumpyLogs(), AssertTensorFlowLogs() ]) assert len(cb_list.callbacks) == 4 cb_list.on_epoch_begin(0, logs={'all': 0}) cb_list.on_epoch_end(0, logs={'all': 0}) cb_list.on_predict_batch_begin(0, logs={'all': 0}) cb_list.on_predict_batch_end(0, logs={'all': 0}) cb_list.on_predict_begin(logs={'all': 0}) cb_list.on_predict_end(logs={'all': 0}) cb_list.on_test_batch_begin(0, logs={'all': 0}) cb_list.on_test_batch_end(0, logs={'all': 0}) cb_list.on_test_begin(logs={'all': 0}) cb_list.on_test_end(logs={'all': 0}) cb_list.on_train_batch_begin(0, logs={'all': 0}) cb_list.on_train_batch_end(0, logs={'all': 0}) cb_list.on_train_begin(logs={'all': 0}) cb_list.on_train_end(logs={'all': 0}) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_implements_batch_hooks_override(self): class MyCallback(keras.callbacks.Callback): def __init__(self, should_run=True): self.should_run = should_run self.train_batches = 0 self.test_batches = 0 self.predict_batches = 0 def on_train_batch_end(self, batch, logs=None): self.train_batches += 1 def on_test_batch_end(self, batch, logs=None): self.test_batches += 1 def on_predict_batch_end(self, batch, logs=None): self.predict_batches += 1 def _implements_train_batch_hooks(self): return self.should_run def _implements_test_batch_hooks(self): return self.should_run def _implements_predict_batch_hooks(self): return self.should_run x, y = np.ones((10, 1)), np.ones((10, 1)) model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') my_cb = MyCallback(should_run=True) cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertTrue(cb_list._should_call_train_batch_hooks) self.assertTrue(cb_list._should_call_test_batch_hooks) self.assertTrue(cb_list._should_call_predict_batch_hooks) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) self.assertEqual(my_cb.train_batches, 2) self.assertEqual(my_cb.test_batches, 1) self.assertEqual(my_cb.predict_batches, 1) my_cb = MyCallback(should_run=False) cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertFalse(cb_list._should_call_train_batch_hooks) self.assertFalse(cb_list._should_call_test_batch_hooks) self.assertFalse(cb_list._should_call_predict_batch_hooks) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) self.assertEqual(my_cb.train_batches, 0) self.assertEqual(my_cb.test_batches, 0) self.assertEqual(my_cb.predict_batches, 0) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_default_callbacks_do_not_call_batch_hooks(self): model = keras.Sequential([keras.layers.Dense(1)]) log_dir = self.get_temp_dir() cb_list = keras.callbacks.CallbackList([ keras.callbacks.TensorBoard(log_dir, profile_batch=0), keras.callbacks.ModelCheckpoint(log_dir), ], add_progbar=True, model=model, verbose=2, epochs=3) self.assertLen(cb_list.callbacks, 3) self.assertFalse(cb_list._should_call_train_batch_hooks) self.assertFalse(cb_list._should_call_test_batch_hooks) self.assertFalse(cb_list._should_call_predict_batch_hooks) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_change_tf_functions_during_fit(self): class ChangeFunctions(keras.callbacks.Callback): def on_epoch_end(self, epochs, logs=None): def new_fn(iterator): raise ValueError('New function substituted successfully.') self.model.train_function = new_fn self.model.test_function = new_fn self.model.predict_function = new_fn model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') x, y = np.ones((10, 10)), np.ones((10, 1)) with self.assertRaisesRegexp(ValueError, 'New function '): model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()]) with self.assertRaisesRegexp(ValueError, 'New function '): model.evaluate(x, y, batch_size=2) with self.assertRaisesRegexp(ValueError, 'New function '): model.predict(x, batch_size=2) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_stop_training_batch_level(self): class MyCallback(keras.callbacks.Callback): def __init__(self): super(MyCallback, self).__init__() self.batch_counter = 0 def on_train_batch_end(self, batch, logs=None): self.batch_counter += 1 if batch == 2: self.model.stop_training = True model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') x, y = np.ones((10, 10)), np.ones((10, 1)) my_cb = MyCallback() # Will run 5 batches if `stop_training` doesn't work. model.fit(x, y, batch_size=2, callbacks=[my_cb]) self.assertEqual(my_cb.batch_counter, 3) # A summary that was emitted during a test. Fields: # logdir: str. The logdir of the FileWriter to which the summary was # written. # tag: str. The name of the summary. _ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag')) class _SummaryFile: """A record of summary tags and the files to which they were written. Fields `scalars`, `images`, `histograms`, and `tensors` are sets containing `_ObservedSummary` values. """ def __init__(self): self.scalars = set() self.images = set() self.histograms = set() self.tensors = set() self.graph_defs = [] self.convert_from_v2_summary_proto = False def list_summaries(logdir): """Read all summaries under the logdir into a `_SummaryFile`. Args: logdir: A path to a directory that contains zero or more event files, either as direct children or in transitive subdirectories. Summaries in these events must only contain old-style scalars, images, and histograms. Non-summary events, like `graph_def`s, are ignored. Returns: A `_SummaryFile` object reflecting all summaries written to any event files in the logdir or any of its descendant directories. Raises: ValueError: If an event file contains an summary of unexpected kind. """ result = _SummaryFile() for (dirpath, _, filenames) in os.walk(logdir): for filename in filenames: if not filename.startswith('events.out.'): continue path = os.path.join(dirpath, filename) for event in tf.compat.v1.train.summary_iterator(path): if event.graph_def: result.graph_defs.append(event.graph_def) if not event.summary: # (e.g., it's a `graph_def` event) continue for value in event.summary.value: tag = value.tag # Case on the `value` rather than the summary metadata because # the Keras callback uses `summary_ops_v2` to emit old-style # summaries. See b/124535134. kind = value.WhichOneof('value') container = { 'simple_value': result.scalars, 'image': result.images, 'histo': result.histograms, 'tensor': result.tensors, }.get(kind) if container is None: raise ValueError( 'Unexpected summary kind %r in event file %s:\n%r' % (kind, path, event)) elif kind == 'tensor' and tag != 'keras': # Convert the tf2 summary proto to old style for type checking. plugin_name = value.metadata.plugin_data.plugin_name container = { 'images': result.images, 'histograms': result.histograms, 'scalars': result.scalars, }.get(plugin_name) if container is not None: result.convert_from_v2_summary_proto = True else: container = result.tensors container.add(_ObservedSummary(logdir=dirpath, tag=tag)) return result @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestTensorBoardV2(keras_parameterized.TestCase): def setUp(self): super(TestTensorBoardV2, self).setUp() self.logdir = os.path.join(self.get_temp_dir(), 'tb') self.train_dir = os.path.join(self.logdir, 'train') self.validation_dir = os.path.join(self.logdir, 'validation') def _get_model(self, compile_model=True): layers = [ keras.layers.Conv2D(8, (3, 3)), keras.layers.Flatten(), keras.layers.Dense(1) ] model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1)) if compile_model: opt = gradient_descent.SGD(learning_rate=0.001) model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly()) return model def test_TensorBoard_default_logdir(self): """Regression test for cross-platform pathsep in default logdir.""" os.chdir(self.get_temp_dir()) model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard() # no logdir specified model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(logdir='.') train_dir = os.path.join('.', 'logs', 'train') validation_dir = os.path.join('.', 'logs', 'validation') self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=train_dir, tag='epoch_loss'), _ObservedSummary(logdir=validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=validation_dir, tag='evaluation_loss_vs_iterations'), }) def test_TensorBoard_basic(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }) def test_TensorBoard_across_invocations(self): """Regression test for summary writer resource use-after-free. See: <https://github.com/tensorflow/tensorflow/issues/25707> """ model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir) for _ in (1, 2): model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }) def test_TensorBoard_no_spurious_event_files(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir) model.fit( x, y, batch_size=2, epochs=2, callbacks=[tb_cbk]) events_file_run_basenames = set() for (dirpath, _, filenames) in os.walk(self.train_dir): if any(fn.startswith('events.out.') for fn in filenames): events_file_run_basenames.add(os.path.basename(dirpath)) self.assertEqual(events_file_run_basenames, {'train'}) def test_TensorBoard_batch_metrics(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='batch_loss'), _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }, ) def test_TensorBoard_learning_rate_schedules(self): model = self._get_model(compile_model=False) opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1)) model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) model.fit( x, y, batch_size=2, epochs=2, callbacks=[keras.callbacks.TensorBoard(self.logdir)]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'), }, ) def test_TensorBoard_global_step(self): model = self._get_model(compile_model=False) opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1)) model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) model.fit( x, y, batch_size=2, epochs=2, verbose=0, callbacks=[ keras.callbacks.TensorBoard( self.logdir, update_freq=1, profile_batch=0, write_steps_per_second=True) ]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.train_dir, tag='batch_loss'), _ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'), _ObservedSummary( logdir=self.train_dir, tag='epoch_steps_per_second'), _ObservedSummary( logdir=self.train_dir, tag='batch_steps_per_second'), }, ) def test_TensorBoard_weight_histograms(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1) model_type = testing_utils.get_model_type() model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }, ) self.assertEqual( self._strip_layer_names(summary_file.histograms, model_type), { _ObservedSummary(logdir=self.train_dir, tag='bias_0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0'), }, ) def test_TensorBoard_weight_images(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, write_images=True) model_type = testing_utils.get_model_type() model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }, ) self.assertEqual( self._strip_layer_names(summary_file.histograms, model_type), { _ObservedSummary(logdir=self.train_dir, tag='bias_0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0'), }, ) if summary_file.convert_from_v2_summary_proto: expected = { _ObservedSummary(logdir=self.train_dir, tag='bias_0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0'), } else: expected = { _ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'), } self.assertEqual( self._strip_layer_names(summary_file.images, model_type), expected ) def test_TensorBoard_projector_callback(self): layers = [ keras.layers.Embedding(10, 10, name='test_embedding'), keras.layers.Dense(10, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ] model = testing_utils.get_model_from_layers(layers, input_shape=(10,)) model.compile( optimizer='adam', loss=keras.losses.BinaryCrossentropy(from_logits=True), run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 10)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, embeddings_freq=1, embeddings_metadata={'test_embedding': 'metadata.tsv'}) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f: self.assertEqual(f.readlines(), [ 'embeddings {\n', (' tensor_name: ' '"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'), ' metadata_path: "metadata.tsv"\n', '}\n' ]) def test_custom_summary(self): if not tf.executing_eagerly(): self.skipTest('Custom summaries only supported in V2 code path.') def scalar_v2_mock(name, data, step=None): """A reimplementation of the scalar plugin to avoid circular deps.""" metadata = tf.compat.v1.SummaryMetadata() # Should match value in tensorboard/plugins/scalar/metadata.py. metadata.plugin_data.plugin_name = 'scalars' with tf.summary.experimental.summary_scope( name, 'scalar_summary', values=[data, step]) as (tag, _): return tf.summary.write( tag=tag, tensor=tf.cast(data, 'float32'), step=step, metadata=metadata) class LayerWithSummary(keras.layers.Layer): def call(self, x): scalar_v2_mock('custom_summary', tf.reduce_sum(x)) return x model = testing_utils.get_model_from_layers([LayerWithSummary()], input_shape=(5,), name='model') model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1) x, y = np.ones((10, 5)), np.ones((10, 5)) model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), _ObservedSummary(logdir=self.train_dir, tag='batch_loss'), _ObservedSummary( logdir=self.train_dir, tag='model/layer_with_summary/custom_summary'), _ObservedSummary( logdir=self.validation_dir, tag='model/layer_with_summary/custom_summary') }, ) def _strip_layer_names(self, summaries, model_type): """Deduplicate summary names modulo layer prefix. This removes the first slash-component of each tag name: for instance, "foo/bar/baz" becomes "bar/baz". Args: summaries: A `set` of `_ObservedSummary` values. model_type: The model type currently being tested. Returns: A new `set` of `_ObservedSummary` values with layer prefixes removed. """ result = set() for summary in summaries: if '/' not in summary.tag: raise ValueError('tag has no layer name: %r' % summary.tag) start_from = 2 if 'subclass' in model_type else 1 new_tag = '/'.join(summary.tag.split('/')[start_from:]) result.add(summary._replace(tag=new_tag)) return result def test_TensorBoard_invalid_argument(self): with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'): keras.callbacks.TensorBoard(wwrite_images=True) def test_TensorBoard_non_blocking(self): model = keras.Sequential([keras.layers.Dense(1)]) tb = keras.callbacks.TensorBoard(self.logdir) self.assertTrue(tb._supports_tf_logs) cb_list = keras.callbacks.CallbackList([tb], model=model, epochs=1, steps=100, verbose=0) tensor = tf.convert_to_tensor(1.) def mock_numpy(): raise RuntimeError( 'If this error is seen, TensorBoard is causing a blocking ' 'NumPy conversion.') with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy): logs = {'metric': tensor} cb_list.on_train_begin(logs) cb_list.on_epoch_begin(0, logs) cb_list.on_train_batch_begin(0, logs) cb_list.on_train_batch_end(0, logs) cb_list.on_epoch_end(0, logs) cb_list.on_train_end(logs) cb_list.on_test_begin(logs) cb_list.on_test_batch_begin(0, logs) cb_list.on_test_batch_end(0, logs) cb_list.on_test_end(logs) cb_list.on_predict_begin(logs) cb_list.on_predict_batch_begin(logs) cb_list.on_predict_batch_end(logs) cb_list.on_predict_end(logs) # Note that this test specifies model_type explicitly. @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase): def setUp(self): super(TestTensorBoardV2NonParameterizedTest, self).setUp() self.logdir = os.path.join(self.get_temp_dir(), 'tb') self.train_dir = os.path.join(self.logdir, 'train') self.validation_dir = os.path.join(self.logdir, 'validation') def _get_seq_model(self): model = keras.models.Sequential([ keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)), keras.layers.Flatten(), keras.layers.Dense(1), ]) opt = gradient_descent.SGD(learning_rate=0.001) model.compile( opt, 'mse', run_eagerly=testing_utils.should_run_eagerly()) return model def _count_trace_file(self, logdir): profile_dir = os.path.join(logdir, 'plugins', 'profile') count = 0 for (dirpath, dirnames, filenames) in os.walk(profile_dir): del dirpath # unused del dirnames # unused for filename in filenames: if filename.endswith('.trace.json.gz'): count += 1 return count def fitModelAndAssertKerasModelWritten(self, model): x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir, write_graph=True, profile_batch=0) model.fit( x, y, batch_size=2, epochs=3, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag='keras'), }, ) if not model.run_eagerly: # There should be one train graph self.assertLen(summary_file.graph_defs, 1) for graph_def in summary_file.graph_defs: graph_def_str = str(graph_def) # All the model layers should appear in the graphs for layer in model.layers: if 'input' not in layer.name: self.assertIn(layer.name, graph_def_str) def test_TensorBoard_writeSequentialModel_noInputShape(self): model = keras.models.Sequential([ keras.layers.Conv2D(8, (3, 3)), keras.layers.Flatten(), keras.layers.Dense(1), ]) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) self.fitModelAndAssertKerasModelWritten(model) def test_TensorBoard_writeSequentialModel_withInputShape(self): model = keras.models.Sequential([ keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)), keras.layers.Flatten(), keras.layers.Dense(1), ]) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) self.fitModelAndAssertKerasModelWritten(model) def test_TensorBoard_writeModel(self): inputs = keras.layers.Input([10, 10, 1]) x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs) x = keras.layers.Flatten()(x) x = keras.layers.Dense(1)(x) model = keras.models.Model(inputs=inputs, outputs=[x]) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) self.fitModelAndAssertKerasModelWritten(model) def test_TensorBoard_autoTrace(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=1, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag=u'batch_1'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_outerProfiler(self): """Runs a profiler session that interferes with the one from the callback. The callback will not generate a profile but execution will proceed without crashing due to unhandled exceptions. """ tf.profiler.experimental.start(logdir='') model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=1, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) tf.profiler.experimental.stop(save=False) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag=u'batch_1'), }, ) self.assertEqual(0, self._count_trace_file(logdir=self.train_dir)) def test_TensorBoard_autoTrace_tagNameWithBatchNum(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=2, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag=u'batch_2'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_profileBatchRangeSingle(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False) model.fit( x, y, batch_size=3, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { # Trace will be logged once at the batch it stops profiling. _ObservedSummary(logdir=self.train_dir, tag=u'batch_2'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_profileBatchRangeTwice(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False) model.fit( x, y, batch_size=3, epochs=10, validation_data=(x, y), callbacks=[tb_cbk]) time.sleep(1) # Avoids the second profile over-writing the first. model.fit( x, y, batch_size=3, epochs=10, validation_data=(x, y), callbacks=[tb_cbk]) self.assertEqual(2, self._count_trace_file(logdir=self.logdir)) # Test case that replicates a Github issue. # https://github.com/tensorflow/tensorflow/issues/37543 def test_TensorBoard_autoTrace_profileTwiceGraphMode(self): tf.compat.v1.disable_eager_execution() inp = keras.Input((1,)) out = keras.layers.Dense(units=1)(inp) model = keras.Model(inp, out) model.compile(gradient_descent.SGD(1), 'mse') logdir = os.path.join(self.get_temp_dir(), 'tb1') model.fit( np.zeros((64, 1)), np.zeros((64, 1)), batch_size=32, callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)], ) # Verifies trace exists in the first logdir. self.assertEqual(1, self._count_trace_file(logdir=logdir)) logdir = os.path.join(self.get_temp_dir(), 'tb2') model.fit( np.zeros((64, 1)), np.zeros((64, 1)), batch_size=32, callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)], ) # Verifies trace exists in the second logdir. self.assertEqual(1, self._count_trace_file(logdir=logdir)) def test_TensorBoard_autoTrace_profileBatchRange(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False) model.fit( x, y, batch_size=4, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { # Trace will be logged once at the batch it stops profiling. _ObservedSummary(logdir=self.train_dir, tag=u'batch_3'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_profileInvalidBatchRange(self): with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='-1,3', write_graph=False) with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='1,None', write_graph=False) with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False) with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False) def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) # Enabled trace only on the 10000th batch, thus it should be empty. self.assertEmpty(summary_file.tensors) self.assertEqual(0, self._count_trace_file(logdir=self.train_dir)) class MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase): def test_get_most_recently_modified_file_matching_pattern(self): file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5'] ] for file_path in file_paths: with open(file_path, 'w') as f: # Ensure there are some intervals between file creation. time.sleep(2) f.write('foo bar') # Ensure the files have been actually written. self.assertEqual( set([ os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir) ]), set(file_paths)) self.assertEqual( keras.callbacks.ModelCheckpoint(None) ._get_most_recently_modified_file_matching_pattern(path_pattern), file_paths[-1]) def test_some_file_not_matching_pattern(self): file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5'] ] for file_path in file_paths: with open(file_path, 'w') as f: # Ensure there are some intervals between file creation. time.sleep(2) f.write('foo bar') self.assertEqual( keras.callbacks.ModelCheckpoint(None) ._get_most_recently_modified_file_matching_pattern(path_pattern), file_paths[-2]) def test_get_same_file_if_file_name_equals_pattern(self): file_name = 'f.batch02.h5' test_dir = self.get_temp_dir() file_path = os.path.join(test_dir, file_name) with open(file_path, 'w') as f: f.write('foo bar') self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path) self.assertEqual( keras.callbacks.ModelCheckpoint( None)._get_most_recently_modified_file_matching_pattern(file_path), file_path) def test_get_none_if_file_does_not_exist(self): file_name = 'f.batch02.h5' test_dir = self.get_temp_dir() file_path = os.path.join(test_dir, file_name) self.assertLen(os.listdir(test_dir), 0) self.assertEqual( keras.callbacks.ModelCheckpoint( None)._get_most_recently_modified_file_matching_pattern(file_path), None) def test_using_checkpoint_management_latest_checkpoint(self): file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}' ckpt_file_name = 'f.batchXepochY' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) ckpt_file_path = os.path.join(test_dir, ckpt_file_name) with open(ckpt_file_path, 'w') as f: f.write('dummy ckpt') tf.__internal__.train.update_checkpoint_state( test_dir, ckpt_file_path) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02', 'f.batch02epoch02'] ] for file_path in file_paths: with open(file_path, 'w') as f: f.write('foo bar') # The result returned from checkpoint_management.latest_checkpoint takes # priority, so even if it was written earlier, we should still return that. self.assertEqual( keras.callbacks.ModelCheckpoint(None) ._get_most_recently_modified_file_matching_pattern(path_pattern), ckpt_file_path) class SummaryOpsTest(tf.test.TestCase): def tearDown(self): super(SummaryOpsTest, self).tearDown() tf.summary.trace_off() def keras_model(self, *args, **kwargs): logdir = self.get_temp_dir() writer = tf.summary.create_file_writer(logdir) with writer.as_default(): keras.callbacks.keras_model_summary(*args, **kwargs) writer.close() events = events_from_logdir(logdir) # The first event contains no summary values. The written content goes to # the second event. return events[1] @testing_utils.run_v2_only def testKerasModel(self): model = keras.Sequential( [Dense(10, input_shape=(100,)), Activation('relu', name='my_relu')]) event = self.keras_model(name='my_name', data=model, step=1) first_val = event.summary.value[0] self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode()) @testing_utils.run_v2_only def testKerasModel_usesDefaultStep(self): model = keras.Sequential( [Dense(10, input_shape=(100,)), Activation('relu', name='my_relu')]) try: tf.summary.experimental.set_step(42) event = self.keras_model(name='my_name', data=model) self.assertEqual(42, event.step) finally: # Reset to default state for other tests. tf.summary.experimental.set_step(None) @testing_utils.run_v2_only def testKerasModel_subclass(self): class SimpleSubclass(keras.Model): def __init__(self): super(SimpleSubclass, self).__init__(name='subclass') self.dense = Dense(10, input_shape=(100,)) self.activation = Activation('relu', name='my_relu') def call(self, inputs): x = self.dense(inputs) return self.activation(x) model = SimpleSubclass() with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: self.assertFalse( keras.callbacks.keras_model_summary( name='my_name', data=model, step=1)) self.assertRegex( str(mock_log.call_args), 'Model failed to serialize as JSON.') @testing_utils.run_v2_only def testKerasModel_otherExceptions(self): model = keras.Sequential() with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json: with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: mock_to_json.side_effect = Exception('oops') self.assertFalse( keras.callbacks.keras_model_summary( name='my_name', data=model, step=1)) self.assertRegex( str(mock_log.call_args), 'Model failed to serialize as JSON. Ignoring') def events_from_file(filepath): """Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file. """ result = [] raw_dataset = tf.data.TFRecordDataset([filepath]) for raw_record in raw_dataset.take(10): event = tf.compat.v1.Event() event.ParseFromString(raw_record.numpy()) result.append(event) return result def events_from_logdir(logdir): """Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file. """ assert tf.compat.v1.gfile.Exists(logdir) files = tf.compat.v1.gfile.ListDirectory(logdir) assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files return events_from_file(os.path.join(logdir, files[0])) if __name__ == '__main__': tf.test.main()
106,928
33.728483
80
py
keras
keras-master/keras/constraints_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras weights constraints.""" import tensorflow.compat.v2 as tf import math import numpy as np from keras import backend from keras import combinations from keras import constraints def get_test_values(): return [0.1, 0.5, 3, 8, 1e-7] def get_example_array(): np.random.seed(3537) example_array = np.random.random((100, 100)) * 100. - 50. example_array[0, 0] = 0. # 0 could possibly cause trouble return example_array def get_example_kernel(width): np.random.seed(3537) example_array = np.random.rand(width, width, 2, 2) return example_array @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class KerasConstraintsTest(tf.test.TestCase): def test_serialization(self): all_activations = ['max_norm', 'non_neg', 'unit_norm', 'min_max_norm'] for name in all_activations: fn = constraints.get(name) ref_fn = getattr(constraints, name)() assert fn.__class__ == ref_fn.__class__ config = constraints.serialize(fn) fn = constraints.deserialize(config) assert fn.__class__ == ref_fn.__class__ def test_max_norm(self): array = get_example_array() for m in get_test_values(): norm_instance = constraints.max_norm(m) normed = norm_instance(backend.variable(array)) assert np.all(backend.eval(normed) < m) # a more explicit example norm_instance = constraints.max_norm(2.0) x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T x_normed_target = np.array( [[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T x_normed_actual = backend.eval(norm_instance(backend.variable(x))) self.assertAllClose(x_normed_actual, x_normed_target, rtol=1e-05) def test_non_neg(self): non_neg_instance = constraints.non_neg() normed = non_neg_instance(backend.variable(get_example_array())) assert np.all(np.min(backend.eval(normed), axis=1) == 0.) def test_unit_norm(self): unit_norm_instance = constraints.unit_norm() normalized = unit_norm_instance(backend.variable(get_example_array())) norm_of_normalized = np.sqrt(np.sum(backend.eval(normalized)**2, axis=0)) # In the unit norm constraint, it should be equal to 1. difference = norm_of_normalized - 1. largest_difference = np.max(np.abs(difference)) assert np.abs(largest_difference) < 10e-5 def test_min_max_norm(self): array = get_example_array() for m in get_test_values(): norm_instance = constraints.min_max_norm(min_value=m, max_value=m * 2) normed = norm_instance(backend.variable(array)) value = backend.eval(normed) l2 = np.sqrt(np.sum(np.square(value), axis=0)) assert not l2[l2 < m] assert not l2[l2 > m * 2 + 1e-5] def test_conv2d_radial_constraint(self): for width in (3, 4, 5, 6): array = get_example_kernel(width) norm_instance = constraints.radial_constraint() normed = norm_instance(backend.variable(array)) value = backend.eval(normed) assert np.all(value.shape == array.shape) assert np.all(value[0:, 0, 0, 0] == value[-1:, 0, 0, 0]) assert len(set(value[..., 0, 0].flatten())) == math.ceil(float(width) / 2) if __name__ == '__main__': tf.test.main()
3,983
34.571429
80
py
keras
keras-master/keras/callbacks_v1_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras callbacks.""" import tensorflow.compat.v2 as tf import os import shutil import tempfile from absl.testing import parameterized import numpy as np from keras import callbacks from keras import callbacks_v1 from keras import combinations from keras import layers from keras import testing_utils from keras.engine import input_layer from keras.engine import sequential from keras.engine import training from keras.utils import np_utils TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 NUM_CLASSES = 2 INPUT_DIM = 3 NUM_HIDDEN = 5 BATCH_SIZE = 5 class TestTensorBoardV1(tf.test.TestCase, parameterized.TestCase): def test_TensorBoard(self): np.random.seed(1337) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(x_train) // BATCH_SIZE else: max_batch_index = len(x_test) // BATCH_SIZE i = 0 while 1: if train: yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE], y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]) else: yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE], y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]) i += 1 i %= max_batch_index # case: Sequential with tf.Graph().as_default(), self.cached_session(): model = sequential.Sequential() model.add( layers.Dense( NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) # non_trainable_weights: moving_variance, moving_mean model.add(layers.BatchNormalization()) model.add(layers.Dense(NUM_CLASSES, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) tsb = callbacks_v1.TensorBoard( log_dir=temp_dir, histogram_freq=1, write_images=True, write_grads=True, batch_size=5) cbks = [tsb] # fit with validation data model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=3, verbose=0) # fit with validation data and accuracy model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) # fit generator with validation data model.fit_generator( data_generator(True), len(x_train), epochs=2, validation_data=(x_test, y_test), callbacks=cbks, verbose=0) # fit generator without validation data # histogram_freq must be zero tsb.histogram_freq = 0 model.fit_generator( data_generator(True), len(x_train), epochs=2, callbacks=cbks, verbose=0) # fit generator with validation data and accuracy tsb.histogram_freq = 1 model.fit_generator( data_generator(True), len(x_train), epochs=2, validation_data=(x_test, y_test), callbacks=cbks, verbose=0) # fit generator without validation data and accuracy tsb.histogram_freq = 0 model.fit_generator( data_generator(True), len(x_train), epochs=2, callbacks=cbks) assert os.path.exists(temp_dir) def test_TensorBoard_multi_input_output(self): np.random.seed(1337) tmpdir = self.get_temp_dir() self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True) with tf.Graph().as_default(), self.cached_session(): filepath = os.path.join(tmpdir, 'logs') (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(x_train) // BATCH_SIZE else: max_batch_index = len(x_test) // BATCH_SIZE i = 0 while 1: if train: # simulate multi-input/output models yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2, [y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2) else: yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2, [y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2) i += 1 i %= max_batch_index inp1 = input_layer.Input((INPUT_DIM,)) inp2 = input_layer.Input((INPUT_DIM,)) inp = layers.add([inp1, inp2]) hidden = layers.Dense(2, activation='relu')(inp) hidden = layers.Dropout(0.1)(hidden) output1 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden) output2 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden) model = training.Model([inp1, inp2], [output1, output2]) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [ callbacks_v1.TensorBoard( log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, batch_size=5) ] # fit without validation data model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE, callbacks=callbacks_factory(histogram_freq=0), epochs=3) # fit with validation data and accuracy model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE, validation_data=([x_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(x_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(x_train), epochs=2, validation_data=([x_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) def test_Tensorboard_histogram_summaries_in_test_function(self): class FileWriterStub: def __init__(self, logdir, graph=None): self.logdir = logdir self.graph = graph self.steps_seen = [] def add_summary(self, summary, global_step): summary_obj = tf.compat.v1.Summary() # ensure a valid Summary proto is being sent if isinstance(summary, bytes): summary_obj.ParseFromString(summary) else: assert isinstance(summary, tf.compat.v1.Summary) summary_obj = summary # keep track of steps seen for the merged_summary op, # which contains the histogram summaries if len(summary_obj.value) > 1: self.steps_seen.append(global_step) def flush(self): pass def close(self): pass def _init_writer(obj, _): obj.writer = FileWriterStub(obj.log_dir) np.random.seed(1337) tmpdir = self.get_temp_dir() self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) with tf.Graph().as_default(), self.cached_session(): model = sequential.Sequential() model.add( layers.Dense( NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) # non_trainable_weights: moving_variance, moving_mean model.add(layers.BatchNormalization()) model.add(layers.Dense(NUM_CLASSES, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) callbacks_v1.TensorBoard._init_writer = _init_writer tsb = callbacks_v1.TensorBoard( log_dir=tmpdir, histogram_freq=1, write_images=True, write_grads=True, batch_size=5) cbks = [tsb] # fit with validation data model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=3, verbose=0) self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5]) def test_Tensorboard_histogram_summaries_with_generator(self): np.random.seed(1337) tmpdir = self.get_temp_dir() self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True) def generator(): x = np.random.randn(10, 100).astype(np.float32) y = np.random.randn(10, 10).astype(np.float32) while True: yield x, y with tf.Graph().as_default(), self.cached_session(): model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=10, input_dim=100) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) tsb = callbacks_v1.TensorBoard( log_dir=tmpdir, histogram_freq=1, write_images=True, write_grads=True, batch_size=5) cbks = [tsb] # fit with validation generator model.fit_generator( generator(), steps_per_epoch=2, epochs=2, validation_data=generator(), validation_steps=2, callbacks=cbks, verbose=0) with self.assertRaises(ValueError): # fit with validation generator but no # validation_steps model.fit_generator( generator(), steps_per_epoch=2, epochs=2, validation_data=generator(), callbacks=cbks, verbose=0) self.assertTrue(os.path.exists(tmpdir)) def test_TensorBoard_with_ReduceLROnPlateau(self): with self.cached_session(): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = testing_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=4, verbose=1), callbacks_v1.TensorBoard(log_dir=temp_dir) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) assert os.path.exists(temp_dir) def test_Tensorboard_batch_logging(self): class FileWriterStub: def __init__(self, logdir, graph=None): self.logdir = logdir self.graph = graph self.batches_logged = [] self.summary_values = [] self.summary_tags = [] def add_summary(self, summary, step): self.summary_values.append(summary.value[0].simple_value) self.summary_tags.append(summary.value[0].tag) self.batches_logged.append(step) def flush(self): pass def close(self): pass with tf.Graph().as_default(): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch') tb_cbk.writer = FileWriterStub(temp_dir) for batch in range(5): tb_cbk.on_batch_end(batch, {'acc': batch}) self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4]) self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.]) self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5) def test_Tensorboard_epoch_and_batch_logging(self): class FileWriterStub: def __init__(self, logdir, graph=None): self.logdir = logdir self.graph = graph def add_summary(self, summary, step): if 'batch_' in summary.value[0].tag: self.batch_summary = (step, summary) elif 'epoch_' in summary.value[0].tag: self.epoch_summary = (step, summary) def flush(self): pass def close(self): pass with tf.Graph().as_default(): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch') tb_cbk.writer = FileWriterStub(temp_dir) tb_cbk.on_batch_end(0, {'acc': 5.0}) tb_cbk.on_train_end() batch_step, batch_summary = tb_cbk.writer.batch_summary self.assertEqual(batch_step, 0) self.assertEqual(batch_summary.value[0].simple_value, 5.0) tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch') tb_cbk.writer = FileWriterStub(temp_dir) tb_cbk.on_epoch_end(0, {'acc': 10.0}) tb_cbk.on_train_end() epoch_step, epoch_summary = tb_cbk.writer.epoch_summary self.assertEqual(epoch_step, 0) self.assertEqual(epoch_summary.value[0].simple_value, 10.0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_Tensorboard_eager(self): temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = testing_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='binary_crossentropy', optimizer=tf.compat.v1.train.AdamOptimizer(0.01), metrics=['accuracy']) cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) self.assertTrue(os.path.exists(temp_dir)) def test_TensorBoard_update_freq(self): class FileWriterStub: def __init__(self, logdir, graph=None): self.logdir = logdir self.graph = graph self.batch_summaries = [] self.epoch_summaries = [] def add_summary(self, summary, step): if 'batch_' in summary.value[0].tag: self.batch_summaries.append((step, summary)) elif 'epoch_' in summary.value[0].tag: self.epoch_summaries.append((step, summary)) def flush(self): pass def close(self): pass with tf.Graph().as_default(): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) # Epoch mode tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch') tb_cbk.writer = FileWriterStub(temp_dir) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1}) self.assertEqual(tb_cbk.writer.batch_summaries, []) tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1}) self.assertLen(tb_cbk.writer.epoch_summaries, 1) tb_cbk.on_train_end() # Batch mode tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch') tb_cbk.writer = FileWriterStub(temp_dir) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1}) self.assertLen(tb_cbk.writer.batch_summaries, 1) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1}) self.assertLen(tb_cbk.writer.batch_summaries, 2) self.assertFalse(tb_cbk.writer.epoch_summaries) tb_cbk.on_train_end() # Integer mode tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq=20) tb_cbk.writer = FileWriterStub(temp_dir) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10}) self.assertFalse(tb_cbk.writer.batch_summaries) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10}) self.assertLen(tb_cbk.writer.batch_summaries, 1) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10}) self.assertLen(tb_cbk.writer.batch_summaries, 1) tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10}) self.assertLen(tb_cbk.writer.batch_summaries, 2) tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10}) self.assertLen(tb_cbk.writer.batch_summaries, 2) self.assertFalse(tb_cbk.writer.epoch_summaries) tb_cbk.on_train_end() if __name__ == '__main__': tf.test.main()
18,476
31.760638
80
py
keras
keras-master/keras/keras_parameterized.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for unit-testing Keras.""" import tensorflow.compat.v2 as tf import collections import functools import itertools import unittest from absl.testing import parameterized import keras from keras import testing_utils try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None class TestCase(tf.test.TestCase, parameterized.TestCase): def tearDown(self): keras.backend.clear_session() super(TestCase, self).tearDown() def run_with_all_saved_model_formats( test_or_class=None, exclude_formats=None): """Execute the decorated test with all Keras saved model formats). This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once for each Keras saved model format. The Keras saved model formats include: 1. HDF5: 'h5' 2. SavedModel: 'tf' Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. Various methods in `testing_utils` to get file path for saved models will auto-generate a string of the two saved model formats. This allows unittests to confirm the equivalence between the two Keras saved model formats. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_with_all_saved_model_formats def test_foo(self): save_format = testing_utils.get_save_format() saved_model_dir = '/tmp/saved_model/' model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) keras.models.save_model(model, saved_model_dir, save_format=save_format) model = keras.models.load_model(saved_model_dir) if __name__ == "__main__": tf.test.main() ``` This test tries to save the model into the formats of 'hdf5', 'h5', 'keras', 'tensorflow', and 'tf'. We can also annotate the whole class if we want this to apply to all tests in the class: ```python @testing_utils.run_with_all_saved_model_formats class MyTests(testing_utils.KerasTestCase): def test_foo(self): save_format = testing_utils.get_save_format() saved_model_dir = '/tmp/saved_model/' model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) keras.models.save_model(model, saved_model_dir, save_format=save_format) model = tf.keras.models.load_model(saved_model_dir) if __name__ == "__main__": tf.test.main() ``` Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. exclude_formats: A collection of Keras saved model formats to not run. (May also be a single format not wrapped in a collection). Defaults to None. Returns: Returns a decorator that will run the decorated test method multiple times: once for each desired Keras saved model format. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ # Exclude h5 save format if H5py isn't available. if h5py is None: exclude_formats.append(['h5']) saved_model_formats = ['h5', 'tf', 'tf_no_traces'] params = [('_%s' % saved_format, saved_format) for saved_format in saved_model_formats if saved_format not in tf.nest.flatten(exclude_formats)] def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, saved_format, *args, **kwargs): """A run of a single test case w/ the specified model type.""" if saved_format == 'h5': _test_h5_saved_model_format(f, self, *args, **kwargs) elif saved_format == 'tf': _test_tf_saved_model_format(f, self, *args, **kwargs) elif saved_format == 'tf_no_traces': _test_tf_saved_model_format_no_traces(f, self, *args, **kwargs) else: raise ValueError('Unknown model type: %s' % (saved_format,)) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _test_h5_saved_model_format(f, test_or_class, *args, **kwargs): with testing_utils.saved_model_format_scope('h5'): f(test_or_class, *args, **kwargs) def _test_tf_saved_model_format(f, test_or_class, *args, **kwargs): with testing_utils.saved_model_format_scope('tf'): f(test_or_class, *args, **kwargs) def _test_tf_saved_model_format_no_traces(f, test_or_class, *args, **kwargs): with testing_utils.saved_model_format_scope('tf', save_traces=False): f(test_or_class, *args, **kwargs) def run_with_all_weight_formats(test_or_class=None, exclude_formats=None): """Runs all tests with the supported formats for saving weights.""" exclude_formats = exclude_formats or [] exclude_formats.append('tf_no_traces') # Only applies to saving models return run_with_all_saved_model_formats(test_or_class, exclude_formats) # TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass # it. Or perhaps make 'subclass' always use a custom build method. def run_with_all_model_types( test_or_class=None, exclude_models=None): """Execute the decorated test with all Keras model types. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once for each Keras model type. The Keras model types are: ['functional', 'subclass', 'sequential'] Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. Various methods in `testing_utils` to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_with_all_model_types( exclude_models = ['sequential']) def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` This test tries building a small mlp as both a functional model and as a subclass model. We can also annotate the whole class if we want this to apply to all tests in the class: ```python @testing_utils.run_with_all_model_types(exclude_models = ['sequential']) class MyTests(testing_utils.KerasTestCase): def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. exclude_models: A collection of Keras model types to not run. (May also be a single model type not wrapped in a collection). Defaults to None. Returns: Returns a decorator that will run the decorated test method multiple times: once for each desired Keras model type. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ model_types = ['functional', 'subclass', 'sequential'] params = [('_%s' % model, model) for model in model_types if model not in tf.nest.flatten(exclude_models)] def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, model_type, *args, **kwargs): """A run of a single test case w/ the specified model type.""" if model_type == 'functional': _test_functional_model_type(f, self, *args, **kwargs) elif model_type == 'subclass': _test_subclass_model_type(f, self, *args, **kwargs) elif model_type == 'sequential': _test_sequential_model_type(f, self, *args, **kwargs) else: raise ValueError('Unknown model type: %s' % (model_type,)) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _test_functional_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('functional'): f(test_or_class, *args, **kwargs) def _test_subclass_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('subclass'): f(test_or_class, *args, **kwargs) def _test_sequential_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('sequential'): f(test_or_class, *args, **kwargs) def run_all_keras_modes(test_or_class=None, config=None, always_skip_v1=False, always_skip_eager=False, **kwargs): """Execute the decorated test with all keras execution modes. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once executing in legacy graph mode, once running eagerly and with `should_run_eagerly` returning True, and once running eagerly with `should_run_eagerly` returning False. If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and the test will only run twice. Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_all_keras_modes def test_foo(self): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` This test will try compiling & fitting the small functional mlp using all three Keras execution modes. Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. always_skip_v1: If True, does not try running the legacy graph mode even when Tensorflow v2 behavior is not enabled. always_skip_eager: If True, does not execute the decorated test with eager execution modes. **kwargs: Additional kwargs for configuring tests for in-progress Keras behaviors/ refactorings that we haven't fully rolled out yet Returns: Returns a decorator that will run the decorated test method multiple times. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ if kwargs: raise ValueError('Unrecognized keyword args: {}'.format(kwargs)) params = [('_v2_function', 'v2_function')] if not always_skip_eager: params.append(('_v2_eager', 'v2_eager')) if not (always_skip_v1 or tf.__internal__.tf2.enabled()): params.append(('_v1_session', 'v1_session')) def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, run_mode, *args, **kwargs): """A run of a single test case w/ specified run mode.""" if run_mode == 'v1_session': _v1_session_test(f, self, config, *args, **kwargs) elif run_mode == 'v2_eager': _v2_eager_test(f, self, *args, **kwargs) elif run_mode == 'v2_function': _v2_function_test(f, self, *args, **kwargs) else: return ValueError('Unknown run mode %s' % run_mode) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _v1_session_test(f, test_or_class, config, *args, **kwargs): with tf.compat.v1.get_default_graph().as_default(): with testing_utils.run_eagerly_scope(False): with test_or_class.test_session(config=config): f(test_or_class, *args, **kwargs) def _v2_eager_test(f, test_or_class, *args, **kwargs): with tf.__internal__.eager_context.eager_mode(): with testing_utils.run_eagerly_scope(True): f(test_or_class, *args, **kwargs) def _v2_function_test(f, test_or_class, *args, **kwargs): with tf.__internal__.eager_context.eager_mode(): with testing_utils.run_eagerly_scope(False): f(test_or_class, *args, **kwargs) def _test_or_class_decorator(test_or_class, single_method_decorator): """Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result. """ def _decorate_test_or_class(obj): if isinstance(obj, collections.abc.Iterable): return itertools.chain.from_iterable( single_method_decorator(method) for method in obj) if isinstance(obj, type): cls = obj for name, value in cls.__dict__.copy().items(): if callable(value) and name.startswith( unittest.TestLoader.testMethodPrefix): setattr(cls, name, single_method_decorator(value)) cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy()) return cls return single_method_decorator(obj) if test_or_class is not None: return _decorate_test_or_class(test_or_class) return _decorate_test_or_class
17,530
35.829832
80
py
keras
keras-master/keras/regularizers_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras regularizers.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import regularizers from keras import testing_utils from keras.utils import np_utils DATA_DIM = 5 NUM_CLASSES = 2 class KerasRegularizersTest(keras_parameterized.TestCase, parameterized.TestCase): def create_model(self, kernel_regularizer=None, activity_regularizer=None): model = keras.models.Sequential() model.add(keras.layers.Dense(NUM_CLASSES, kernel_regularizer=kernel_regularizer, activity_regularizer=activity_regularizer, input_shape=(DATA_DIM,))) return model def get_data(self): (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(DATA_DIM,), num_classes=NUM_CLASSES) y_train = np_utils.to_categorical(y_train, NUM_CLASSES) y_test = np_utils.to_categorical(y_test, NUM_CLASSES) return (x_train, y_train), (x_test, y_test) def create_multi_input_model_from(self, layer1, layer2): input_1 = keras.layers.Input(shape=(DATA_DIM,)) input_2 = keras.layers.Input(shape=(DATA_DIM,)) out1 = layer1(input_1) out2 = layer2(input_2) out = keras.layers.Average()([out1, out2]) model = keras.models.Model([input_1, input_2], out) model.add_loss(keras.backend.mean(out2)) model.add_loss(tf.reduce_sum(input_1)) return model @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_kernel_regularization(self, regularizer): (x_train, y_train), _ = self.get_data() model = self.create_model(kernel_regularizer=regularizer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(len(model.losses), 1) model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ('l2_zero', keras.regularizers.l2(0.)), ]) def test_activity_regularization(self, regularizer): (x_train, y_train), _ = self.get_data() model = self.create_model(activity_regularizer=regularizer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(len(model.losses), 1 if tf.executing_eagerly() else 1) model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_zero_regularization(self): # Verifies that training with zero regularization works. x, y = np.ones((10, 10)), np.ones((10, 3)) model = testing_utils.get_model_from_layers( [keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, batch_size=5, epochs=1) def test_custom_regularizer_saving(self): def my_regularizer(weights): return tf.reduce_sum(tf.abs(weights)) inputs = keras.Input((10,)) outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs) model = keras.Model(inputs, outputs) model2 = model.from_config( model.get_config(), custom_objects={'my_regularizer': my_regularizer}) self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_layer(self, regularizer): dense_layer = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) model = self.create_multi_input_model_from(dense_layer, dense_layer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.assertLen(model.losses, 5) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_model(self, regularizer): dense_layer = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) input_tensor = keras.layers.Input(shape=(DATA_DIM,)) dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor)) model = self.create_multi_input_model_from(dummy_model, dummy_model) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.assertLen(model.losses, 6) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_layer_in_different_models(self, regularizer): shared_dense = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) models = [] for _ in range(2): input_tensor = keras.layers.Input(shape=(DATA_DIM,)) unshared_dense = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer) out = unshared_dense(shared_dense(input_tensor)) models.append(keras.models.Model(input_tensor, out)) model = self.create_multi_input_model_from( layer1=models[0], layer2=models[1]) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) # We expect to see 9 losses on the model: # - 2 from the 2 add_loss calls on the outer model. # - 3 from the weight regularizers on the shared_dense layer, unshared_dense # in inner model 1, unshared_dense in inner model 2. # - 4 from activity regularizers on the shared_dense layer. self.assertLen(model.losses, 9) def test_deserialization_error(self): with self.assertRaisesRegex(ValueError, 'Could not interpret regularizer'): keras.regularizers.get(0) @parameterized.named_parameters([ ('l1', regularizers.l1(l1=None), 0.01), ('l2', regularizers.l2(l2=None), 0.01), ('l1_l2', regularizers.l1_l2(l1=None, l2=None), 0.), ]) def test_default_value_when_init_with_none(self, regularizer, expected_value): expected_value = np.asarray(expected_value) if hasattr(regularizer, 'l1'): self.assertAllClose(regularizer.l1, expected_value) if hasattr(regularizer, 'l2'): self.assertAllClose(regularizer.l2, expected_value) if __name__ == '__main__': tf.test.main()
8,009
36.083333
80
py
keras
keras-master/keras/regularizers.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in regularizers.""" import tensorflow.compat.v2 as tf # pylint: disable=invalid-name import math from keras import backend from keras.utils.generic_utils import deserialize_keras_object from keras.utils.generic_utils import serialize_keras_object from tensorflow.python.util.tf_export import keras_export def _check_penalty_number(x): """check penalty number availability, raise ValueError if failed.""" if not isinstance(x, (float, int)): raise ValueError( f'Value: {x} is not a valid regularization penalty number, ' 'expected an int or float value') if math.isinf(x) or math.isnan(x): raise ValueError( f'Value: {x} is not a valid regularization penalty number, ' 'an infinity nubmer or NaN are not valid value') def _none_to_default(inputs, default): return default if inputs is None else default @keras_export('keras.regularizers.Regularizer') class Regularizer: """Regularizer base class. Regularizers allow you to apply penalties on layer parameters or layer activity during optimization. These penalties are summed into the loss function that the network optimizes. Regularization penalties are applied on a per-layer basis. The exact API will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and `Conv3D`) have a unified API. These layers expose 3 keyword arguments: - `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel - `bias_regularizer`: Regularizer to apply a penalty on the layer's bias - `activity_regularizer`: Regularizer to apply a penalty on the layer's output All layers (including custom layers) expose `activity_regularizer` as a settable property, whether or not it is in the constructor arguments. The value returned by the `activity_regularizer` is divided by the input batch size so that the relative weighting between the weight regularizers and the activity regularizers does not change with the batch size. You can access a layer's regularization penalties by calling `layer.losses` after calling the layer on inputs. ## Example >>> layer = tf.keras.layers.Dense( ... 5, input_dim=5, ... kernel_initializer='ones', ... kernel_regularizer=tf.keras.regularizers.L1(0.01), ... activity_regularizer=tf.keras.regularizers.L2(0.01)) >>> tensor = tf.ones(shape=(5, 5)) * 2.0 >>> out = layer(tensor) >>> # The kernel regularization term is 0.25 >>> # The activity regularization term (after dividing by the batch size) is 5 >>> tf.math.reduce_sum(layer.losses) <tf.Tensor: shape=(), dtype=float32, numpy=5.25> ## Available penalties ```python tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties ``` ## Directly calling a regularizer Compute a regularization loss on a tensor by directly calling a regularizer as if it is a one-argument function. E.g. >>> regularizer = tf.keras.regularizers.L2(2.) >>> tensor = tf.ones(shape=(5, 5)) >>> regularizer(tensor) <tf.Tensor: shape=(), dtype=float32, numpy=50.0> ## Developing new regularizers Any function that takes in a weight matrix and returns a scalar tensor can be used as a regularizer, e.g.: >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1') ... def l1_reg(weight_matrix): ... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix)) ... >>> layer = tf.keras.layers.Dense(5, input_dim=5, ... kernel_initializer='ones', kernel_regularizer=l1_reg) >>> tensor = tf.ones(shape=(5, 5)) >>> out = layer(tensor) >>> layer.losses [<tf.Tensor: shape=(), dtype=float32, numpy=0.25>] Alternatively, you can write your custom regularizers in an object-oriented way by extending this regularizer base class, e.g.: >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2') ... class L2Regularizer(tf.keras.regularizers.Regularizer): ... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name ... self.l2 = l2 ... ... def __call__(self, x): ... return self.l2 * tf.math.reduce_sum(tf.math.square(x)) ... ... def get_config(self): ... return {'l2': float(self.l2)} ... >>> layer = tf.keras.layers.Dense( ... 5, input_dim=5, kernel_initializer='ones', ... kernel_regularizer=L2Regularizer(l2=0.5)) >>> tensor = tf.ones(shape=(5, 5)) >>> out = layer(tensor) >>> layer.losses [<tf.Tensor: shape=(), dtype=float32, numpy=12.5>] ### A note on serialization and deserialization: Registering the regularizers as serializable is optional if you are just training and executing models, exporting to and from SavedModels, or saving and loading weight checkpoints. Registration is required for Keras `model_to_estimator`, saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. If using this functionality, you must make sure any python process running your model has also defined and registered your custom regularizer. `tf.keras.utils.register_keras_serializable` is only available in TF 2.1 and beyond. In earlier versions of TensorFlow you must pass your custom regularizer to the `custom_objects` argument of methods that expect custom regularizers to be registered as serializable. """ def __call__(self, x): """Compute a regularization penalty from an input tensor.""" return 0. @classmethod def from_config(cls, config): """Creates a regularizer from its config. This method is the reverse of `get_config`, capable of instantiating the same regularizer from the config dictionary. This method is used by Keras `model_to_estimator`, saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. Args: config: A Python dictionary, typically the output of get_config. Returns: A regularizer instance. """ return cls(**config) def get_config(self): """Returns the config of the regularizer. An regularizer config is a Python dictionary (serializable) containing all configuration parameters of the regularizer. The same regularizer can be reinstantiated later (without any saved state) from this configuration. This method is optional if you are just training and executing models, exporting to and from SavedModels, or using weight checkpoints. This method is required for Keras `model_to_estimator`, saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. Returns: Python dictionary. """ raise NotImplementedError(f'{self} does not implement get_config()') @keras_export('keras.regularizers.L1L2') class L1L2(Regularizer): """A regularizer that applies both L1 and L2 regularization penalties. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` The L2 regularization penalty is computed as `loss = l2 * reduce_sum(square(x))` L1L2 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2') In this case, the default values used are `l1=0.01` and `l2=0.01`. Attributes: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor. """ def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name # The default value for l1 and l2 are different from the value in l1_l2 # for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2 # and no l1 penalty. l1 = 0. if l1 is None else l1 l2 = 0. if l2 is None else l2 _check_penalty_number(l1) _check_penalty_number(l2) self.l1 = backend.cast_to_floatx(l1) self.l2 = backend.cast_to_floatx(l2) def __call__(self, x): regularization = backend.constant(0., dtype=x.dtype) if self.l1: regularization += self.l1 * tf.reduce_sum(tf.abs(x)) if self.l2: regularization += self.l2 * tf.reduce_sum(tf.square(x)) return regularization def get_config(self): return {'l1': float(self.l1), 'l2': float(self.l2)} @keras_export('keras.regularizers.L1', 'keras.regularizers.l1') class L1(Regularizer): """A regularizer that applies a L1 regularization penalty. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` L1 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1') In this case, the default value used is `l1=0.01`. Attributes: l1: Float; L1 regularization factor. """ def __init__(self, l1=0.01, **kwargs): # pylint: disable=redefined-outer-name l1 = kwargs.pop('l', l1) # Backwards compatibility if kwargs: raise TypeError(f'Argument(s) not recognized: {kwargs}') l1 = 0.01 if l1 is None else l1 _check_penalty_number(l1) self.l1 = backend.cast_to_floatx(l1) def __call__(self, x): return self.l1 * tf.reduce_sum(tf.abs(x)) def get_config(self): return {'l1': float(self.l1)} @keras_export('keras.regularizers.L2', 'keras.regularizers.l2') class L2(Regularizer): """A regularizer that applies a L2 regularization penalty. The L2 regularization penalty is computed as: `loss = l2 * reduce_sum(square(x))` L2 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2') In this case, the default value used is `l2=0.01`. Attributes: l2: Float; L2 regularization factor. """ def __init__(self, l2=0.01, **kwargs): # pylint: disable=redefined-outer-name l2 = kwargs.pop('l', l2) # Backwards compatibility if kwargs: raise TypeError(f'Argument(s) not recognized: {kwargs}') l2 = 0.01 if l2 is None else l2 _check_penalty_number(l2) self.l2 = backend.cast_to_floatx(l2) def __call__(self, x): return self.l2 * tf.reduce_sum(tf.square(x)) def get_config(self): return {'l2': float(self.l2)} @keras_export('keras.regularizers.l1_l2') def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name r"""Create a regularizer that applies both L1 and L2 penalties. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` The L2 regularization penalty is computed as: `loss = l2 * reduce_sum(square(x))` Args: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor. Returns: An L1L2 Regularizer with the given regularization factors. """ return L1L2(l1=l1, l2=l2) # Deserialization aliases. l1 = L1 l2 = L2 @keras_export('keras.regularizers.serialize') def serialize(regularizer): return serialize_keras_object(regularizer) @keras_export('keras.regularizers.deserialize') def deserialize(config, custom_objects=None): if config == 'l1_l2': # Special case necessary since the defaults used for "l1_l2" (string) # differ from those of the L1L2 class. return L1L2(l1=0.01, l2=0.01) return deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='regularizer') @keras_export('keras.regularizers.get') def get(identifier): """Retrieve a regularizer instance from a config or identifier.""" if identifier is None: return None if isinstance(identifier, dict): return deserialize(identifier) elif isinstance(identifier, str): return deserialize(str(identifier)) elif callable(identifier): return identifier else: raise ValueError( f'Could not interpret regularizer identifier: {identifier}')
12,535
31.989474
80
py
keras
keras-master/keras/constraints.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name # pylint: disable=g-classes-have-attributes """Constraints: functions that impose constraints on weight values.""" import tensorflow.compat.v2 as tf from keras import backend from keras.utils.generic_utils import deserialize_keras_object from keras.utils.generic_utils import serialize_keras_object from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls @keras_export('keras.constraints.Constraint') class Constraint: """Base class for weight constraints. A `Constraint` instance works like a stateless function. Users who subclass this class should override the `__call__` method, which takes a single weight parameter and return a projected version of that parameter (e.g. normalized or clipped). Constraints can be used with various Keras layers via the `kernel_constraint` or `bias_constraint` arguments. Here's a simple example of a non-negative weight constraint: >>> class NonNegative(tf.keras.constraints.Constraint): ... ... def __call__(self, w): ... return w * tf.cast(tf.math.greater_equal(w, 0.), w.dtype) >>> weight = tf.constant((-1.0, 1.0)) >>> NonNegative()(weight) <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.], dtype=float32)> >>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative()) """ def __call__(self, w): """Applies the constraint to the input weight variable. By default, the inputs weight variable is not modified. Users should override this method to implement their own projection function. Args: w: Input weight variable. Returns: Projected variable (by default, returns unmodified inputs). """ return w def get_config(self): """Returns a Python dict of the object config. A constraint config is a Python dictionary (JSON-serializable) that can be used to reinstantiate the same object. Returns: Python dict containing the configuration of the constraint object. """ return {} @keras_export('keras.constraints.MaxNorm', 'keras.constraints.max_norm') class MaxNorm(Constraint): """MaxNorm weight constraint. Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value. Also available via the shortcut function `tf.keras.constraints.max_norm`. Args: max_value: the maximum norm value for the incoming weights. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format="channels_last"`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`. """ def __init__(self, max_value=2, axis=0): self.max_value = max_value self.axis = axis @doc_controls.do_not_generate_docs def __call__(self, w): norms = backend.sqrt( tf.reduce_sum(tf.square(w), axis=self.axis, keepdims=True)) desired = backend.clip(norms, 0, self.max_value) return w * (desired / (backend.epsilon() + norms)) @doc_controls.do_not_generate_docs def get_config(self): return {'max_value': self.max_value, 'axis': self.axis} @keras_export('keras.constraints.NonNeg', 'keras.constraints.non_neg') class NonNeg(Constraint): """Constrains the weights to be non-negative. Also available via the shortcut function `tf.keras.constraints.non_neg`. """ def __call__(self, w): return w * tf.cast(tf.greater_equal(w, 0.), backend.floatx()) @keras_export('keras.constraints.UnitNorm', 'keras.constraints.unit_norm') class UnitNorm(Constraint): """Constrains the weights incident to each hidden unit to have unit norm. Also available via the shortcut function `tf.keras.constraints.unit_norm`. Args: axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format="channels_last"`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`. """ def __init__(self, axis=0): self.axis = axis @doc_controls.do_not_generate_docs def __call__(self, w): return w / ( backend.epsilon() + backend.sqrt( tf.reduce_sum( tf.square(w), axis=self.axis, keepdims=True))) @doc_controls.do_not_generate_docs def get_config(self): return {'axis': self.axis} @keras_export('keras.constraints.MinMaxNorm', 'keras.constraints.min_max_norm') class MinMaxNorm(Constraint): """MinMaxNorm weight constraint. Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound. Also available via the shortcut function `tf.keras.constraints.min_max_norm`. Args: min_value: the minimum norm for the incoming weights. max_value: the maximum norm for the incoming weights. rate: rate for enforcing the constraint: weights will be rescaled to yield `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`. Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format="channels_last"`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`. """ def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0): self.min_value = min_value self.max_value = max_value self.rate = rate self.axis = axis @doc_controls.do_not_generate_docs def __call__(self, w): norms = backend.sqrt( tf.reduce_sum(tf.square(w), axis=self.axis, keepdims=True)) desired = ( self.rate * backend.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms) return w * (desired / (backend.epsilon() + norms)) @doc_controls.do_not_generate_docs def get_config(self): return { 'min_value': self.min_value, 'max_value': self.max_value, 'rate': self.rate, 'axis': self.axis } @keras_export('keras.constraints.RadialConstraint', 'keras.constraints.radial_constraint') class RadialConstraint(Constraint): """Constrains `Conv2D` kernel weights to be the same for each radius. Also available via the shortcut function `tf.keras.constraints.radial_constraint`. For example, the desired output for the following 4-by-4 kernel: ``` kernel = [[v_00, v_01, v_02, v_03], [v_10, v_11, v_12, v_13], [v_20, v_21, v_22, v_23], [v_30, v_31, v_32, v_33]] ``` is this:: ``` kernel = [[v_11, v_11, v_11, v_11], [v_11, v_33, v_33, v_11], [v_11, v_33, v_33, v_11], [v_11, v_11, v_11, v_11]] ``` This constraint can be applied to any `Conv2D` layer version, including `Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"` or `"channels_first"` data format. The method assumes the weight tensor is of shape `(rows, cols, input_depth, output_depth)`. """ @doc_controls.do_not_generate_docs def __call__(self, w): w_shape = w.shape if w_shape.rank is None or w_shape.rank != 4: raise ValueError( 'The weight tensor must have rank 4. ' f'Received weight tensor with shape: {w_shape}') height, width, channels, kernels = w_shape w = backend.reshape(w, (height, width, channels * kernels)) # TODO(cpeter): Switch map_fn for a faster tf.vectorized_map once # backend.switch is supported. w = backend.map_fn( self._kernel_constraint, backend.stack(tf.unstack(w, axis=-1), axis=0)) return backend.reshape(backend.stack(tf.unstack(w, axis=0), axis=-1), (height, width, channels, kernels)) def _kernel_constraint(self, kernel): """Radially constraints a kernel with shape (height, width, channels).""" padding = backend.constant([[1, 1], [1, 1]], dtype='int32') kernel_shape = backend.shape(kernel)[0] start = backend.cast(kernel_shape / 2, 'int32') kernel_new = backend.switch( backend.cast(tf.math.floormod(kernel_shape, 2), 'bool'), lambda: kernel[start - 1:start, start - 1:start], lambda: kernel[start - 1:start, start - 1:start] + backend.zeros( # pylint: disable=g-long-lambda (2, 2), dtype=kernel.dtype)) index = backend.switch( backend.cast(tf.math.floormod(kernel_shape, 2), 'bool'), lambda: backend.constant(0, dtype='int32'), lambda: backend.constant(1, dtype='int32')) while_condition = lambda index, *args: backend.less(index, start) def body_fn(i, array): return i + 1, tf.pad( array, padding, constant_values=kernel[start + i, start + i]) _, kernel_new = tf.compat.v1.while_loop( while_condition, body_fn, [index, kernel_new], shape_invariants=[index.get_shape(), tf.TensorShape([None, None])]) return kernel_new # Aliases. max_norm = MaxNorm non_neg = NonNeg unit_norm = UnitNorm min_max_norm = MinMaxNorm radial_constraint = RadialConstraint # Legacy aliases. maxnorm = max_norm nonneg = non_neg unitnorm = unit_norm @keras_export('keras.constraints.serialize') def serialize(constraint): return serialize_keras_object(constraint) @keras_export('keras.constraints.deserialize') def deserialize(config, custom_objects=None): return deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='constraint') @keras_export('keras.constraints.get') def get(identifier): """Retrieves a Keras constraint function.""" if identifier is None: return None if isinstance(identifier, dict): return deserialize(identifier) elif isinstance(identifier, str): config = {'class_name': str(identifier), 'config': {}} return deserialize(config) elif callable(identifier): return identifier else: raise ValueError( f'Could not interpret constraint function identifier: {identifier}')
11,814
32.853868
106
py
keras
keras-master/keras/backend.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access # pylint: disable=redefined-outer-name # pylint: disable=redefined-builtin # pylint: disable=g-classes-have-attributes """Keras backend API.""" import tensorflow.compat.v2 as tf import collections import itertools import json import os import sys import threading import warnings import weakref import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python.eager import context from tensorflow.python.eager.context import get_config from tensorflow.python.framework import config from keras import backend_config from keras.distribute import distribute_coordinator_utils as dc from keras.engine import keras_tensor from keras.utils import control_flow_util from keras.utils import object_identity from keras.utils import tf_contextlib from keras.utils import tf_inspect from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls py_all = all py_sum = sum py_any = any # INTERNAL UTILS # The internal graph maintained by Keras and used by the symbolic Keras APIs # while executing eagerly (such as the functional API for model-building). # This is thread-local to allow building separate models in different threads # concurrently, but comes at the cost of not being able to build one model # across threads. _GRAPH = threading.local() # A graph which is used for constructing functions in eager mode. _CURRENT_SCRATCH_GRAPH = threading.local() # This is a thread local object that will hold the default internal TF session # used by Keras. It can be set manually via `set_session(sess)`. class SessionLocal(threading.local): def __init__(self): super().__init__() self.session = None _SESSION = SessionLocal() # A global dictionary mapping graph objects to an index of counters used # for various layer/optimizer names in each graph. # Allows to give unique autogenerated names to layers, in a graph-specific way. PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary() # A global set tracking what object names have been seen so far. # Optionally used as an avoid-list when generating names OBSERVED_NAMES = set() # _DUMMY_EAGER_GRAPH.key is used as a key in _GRAPH_LEARNING_PHASES. # We keep a separate reference to it to make sure it does not get removed from # _GRAPH_LEARNING_PHASES. # _DummyEagerGraph inherits from threading.local to make its `key` attribute # thread local. This is needed to make set_learning_phase affect only the # current thread during eager execution (see b/123096885 for more details). class _DummyEagerGraph(threading.local): """_DummyEagerGraph provides a thread local `key` attribute. We can't use threading.local directly, i.e. without subclassing, because gevent monkey patches threading.local and its version does not support weak references. """ class _WeakReferencableClass: """This dummy class is needed for two reasons. - We need something that supports weak references. Basic types like string and ints don't. - We need something whose hash and equality are based on object identity to make sure they are treated as different keys to _GRAPH_LEARNING_PHASES. An empty Python class satisfies both of these requirements. """ pass def __init__(self): # Constructors for classes subclassing threading.local run once # per thread accessing something in the class. Thus, each thread will # get a different key. super(_DummyEagerGraph, self).__init__() self.key = _DummyEagerGraph._WeakReferencableClass() self.learning_phase_is_set = False _DUMMY_EAGER_GRAPH = _DummyEagerGraph() # This boolean flag can be set to True to leave variable initialization # up to the user. # Change its value via `manual_variable_initialization(value)`. _MANUAL_VAR_INIT = False # This list holds the available devices. # It is populated when `_get_available_gpus()` is called for the first time. # We assume our devices don't change henceforth. _LOCAL_DEVICES = None # The below functions are kept accessible from backend for compatibility. epsilon = backend_config.epsilon floatx = backend_config.floatx image_data_format = backend_config.image_data_format set_epsilon = backend_config.set_epsilon set_floatx = backend_config.set_floatx set_image_data_format = backend_config.set_image_data_format @keras_export('keras.backend.backend') @doc_controls.do_not_generate_docs def backend(): """Publicly accessible method for determining the current backend. Only exists for API compatibility with multi-backend Keras. Returns: The string "tensorflow". """ return 'tensorflow' @keras_export('keras.backend.cast_to_floatx') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. Args: x: Numpy array or TensorFlow tensor. Returns: The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor if `x` was a tensor), cast to its new type. Example: >>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32') """ if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)): return tf.cast(x, dtype=floatx()) return np.asarray(x, dtype=floatx()) @keras_export('keras.backend.get_uid') def get_uid(prefix=''): """Associates a string prefix with an integer counter in a TensorFlow graph. Args: prefix: String prefix to index. Returns: Unique integer ID. Example: >>> get_uid('dense') 1 >>> get_uid('dense') 2 """ graph = get_graph() if graph not in PER_GRAPH_OBJECT_NAME_UIDS: PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int) layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph] layer_name_uids[prefix] += 1 return layer_name_uids[prefix] @keras_export('keras.backend.reset_uids') def reset_uids(): """Resets graph identifiers. """ PER_GRAPH_OBJECT_NAME_UIDS.clear() OBSERVED_NAMES.clear() @keras_export('keras.backend.clear_session') def clear_session(): """Resets all state generated by Keras. Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names. If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling `clear_session()` releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited. Example 1: calling `clear_session()` when creating models in a loop ```python for _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. tf.keras.backend.clear_session() model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) ``` Example 2: resetting the layer name generation counter >>> import tensorflow as tf >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)] >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> tf.keras.backend.set_learning_phase(1) >>> print(tf.keras.backend.learning_phase()) 1 >>> tf.keras.backend.clear_session() >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense """ global _SESSION global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned global _GRAPH _GRAPH.graph = None tf.compat.v1.reset_default_graph() reset_uids() if _SESSION.session is not None: _SESSION.session.close() _SESSION.session = None graph = get_graph() with graph.as_default(): _DUMMY_EAGER_GRAPH.learning_phase_is_set = False _GRAPH_LEARNING_PHASES.clear() # Create the learning phase placeholder in graph using the default factory. _GRAPH_LEARNING_PHASES.setdefault(graph) _GRAPH_VARIABLES.pop(graph, None) _GRAPH_TF_OPTIMIZERS.pop(graph, None) if tf.executing_eagerly(): # Clear pending nodes in eager executors, kernel caches and step_containers. context.context().clear_kernel_cache() # Inject the clear_session function to keras_deps to remove the dependency # from TFLite to Keras. tf.__internal__.register_clear_session_function(clear_session) @keras_export('keras.backend.manual_variable_initialization') @doc_controls.do_not_generate_docs def manual_variable_initialization(value): """Sets the manual variable initialization flag. This boolean flag determines whether variables should be initialized as they are instantiated (default), or if the user should handle the initialization (e.g. via `tf.compat.v1.initialize_all_variables()`). Args: value: Python boolean. """ global _MANUAL_VAR_INIT _MANUAL_VAR_INIT = value @keras_export('keras.backend.learning_phase') @doc_controls.do_not_generate_docs def learning_phase(): """Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. Returns: Learning phase (scalar integer tensor or Python integer). """ graph = tf.compat.v1.get_default_graph() if graph is getattr(_GRAPH, 'graph', None): # Don't enter an init_scope for the learning phase if eager execution # is enabled but we're inside the Keras workspace graph. learning_phase = symbolic_learning_phase() else: with tf.init_scope(): # We always check & set the learning phase inside the init_scope, # otherwise the wrong default_graph will be used to look up the learning # phase inside of functions & defuns. # # This is because functions & defuns (both in graph & in eager mode) # will always execute non-eagerly using a function-specific default # subgraph. learning_phase = _GRAPH_LEARNING_PHASES[None] _mark_func_graph_as_unsaveable(graph, learning_phase) return learning_phase def global_learning_phase_is_set(): return _DUMMY_EAGER_GRAPH.learning_phase_is_set def _mark_func_graph_as_unsaveable(graph, learning_phase): """Mark func graph as unsaveable due to use of symbolic keras learning phase. Functions that capture the symbolic learning phase cannot be exported to SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised if it is exported. Args: graph: Graph or FuncGraph object. learning_phase: Learning phase placeholder or int defined in the graph. """ if graph.building_function and is_placeholder(learning_phase): graph.mark_as_unsaveable( 'The keras learning phase placeholder was used inside a function. ' 'Exporting placeholders is not supported when saving out a SavedModel. ' 'Please call `tf.keras.backend.set_learning_phase(0)` in the function ' 'to set the learning phase to a constant value.') def symbolic_learning_phase(): graph = get_graph() with graph.as_default(): return _GRAPH_LEARNING_PHASES[graph] def _default_learning_phase(): if tf.executing_eagerly(): return 0 else: with name_scope(''): return tf.compat.v1.placeholder_with_default( False, shape=(), name='keras_learning_phase') @keras_export('keras.backend.set_learning_phase') @doc_controls.do_not_generate_docs def set_learning_phase(value): """Sets the learning phase to a fixed value. The backend learning phase affects any code that calls `backend.learning_phase()` In particular, all Keras built-in layers use the learning phase as the default for the `training` arg to `Layer.__call__`. User-written layers and models can achieve the same behavior with code that looks like: ```python def call(self, inputs, training=None): if training is None: training = backend.learning_phase() ``` Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if `value` is neither `0` nor `1`. """ warnings.warn('`tf.keras.backend.set_learning_phase` is deprecated and ' 'will be removed after 2020-10-11. To update it, simply ' 'pass a True/False value to the `training` argument of the ' '`__call__` method of your layer or model.') deprecated_internal_set_learning_phase(value) def deprecated_internal_set_learning_phase(value): """A deprecated internal implementation of set_learning_phase. This method is an internal-only version of `set_learning_phase` that does not raise a deprecation error. It is required because saved_model needs to keep working with user code that uses the deprecated learning phase methods until those APIs are fully removed from the public API. Specifically SavedModel saving needs to make sure the learning phase is 0 during tracing even if users overwrote it to a different value. But, we don't want to raise deprecation warnings for users when savedmodel sets learning phase just for compatibility with code that relied on explicitly setting the learning phase for other values. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if `value` is neither `0` nor `1`. """ global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned if value not in {0, 1}: raise ValueError('Expected learning phase to be 0 or 1.') with tf.init_scope(): if tf.executing_eagerly(): # In an eager context, the learning phase values applies to both the eager # context and the internal Keras graph. _DUMMY_EAGER_GRAPH.learning_phase_is_set = True _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value _GRAPH_LEARNING_PHASES[get_graph()] = value @keras_export('keras.backend.learning_phase_scope') @tf_contextlib.contextmanager @doc_controls.do_not_generate_docs def learning_phase_scope(value): """Provides a scope within which the learning phase is equal to `value`. The learning phase gets restored to its original value upon exiting the scope. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`. """ warnings.warn('`tf.keras.backend.learning_phase_scope` is deprecated and ' 'will be removed after 2020-10-11. To update it, simply ' 'pass a True/False value to the `training` argument of the ' '`__call__` method of your layer or model.') with deprecated_internal_learning_phase_scope(value): try: yield finally: pass @tf_contextlib.contextmanager def deprecated_internal_learning_phase_scope(value): """An internal-only version of `learning_phase_scope`. Unlike the public method, this method does not raise a deprecation warning. This is needed because saved model saving needs to set learning phase to maintain compatibility with code that sets/gets the learning phase, but saved model saving itself shouldn't raise a deprecation warning. We can get rid of this method and its usages when the public API is removed. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`. """ global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned if value not in {0, 1}: raise ValueError('Expected learning phase to be 0 or 1.') with tf.init_scope(): if tf.executing_eagerly(): previous_eager_value = _GRAPH_LEARNING_PHASES.get( _DUMMY_EAGER_GRAPH.key, None) previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None) learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set try: deprecated_internal_set_learning_phase(value) yield finally: # Restore learning phase to initial value. if not learning_phase_previously_set: _DUMMY_EAGER_GRAPH.learning_phase_is_set = False with tf.init_scope(): if tf.executing_eagerly(): if previous_eager_value is not None: _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES: del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] graph = get_graph() if previous_graph_value is not None: _GRAPH_LEARNING_PHASES[graph] = previous_graph_value elif graph in _GRAPH_LEARNING_PHASES: del _GRAPH_LEARNING_PHASES[graph] @tf_contextlib.contextmanager def eager_learning_phase_scope(value): """Internal scope that sets the learning phase in eager / tf.function only. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`. """ global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned assert value in {0, 1} assert tf.compat.v1.executing_eagerly_outside_functions() global_learning_phase_was_set = global_learning_phase_is_set() if global_learning_phase_was_set: previous_value = learning_phase() try: _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value yield finally: # Restore learning phase to initial value or unset. if global_learning_phase_was_set: _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_value else: del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] def _as_graph_element(obj): """Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`. """ conv_fn = getattr(obj, '_as_graph_element', None) if conv_fn and callable(conv_fn): return conv_fn() return None def _assert_same_graph(original_item, item): """Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match. """ original_graph = getattr(original_item, 'graph', None) graph = getattr(item, 'graph', None) if original_graph and graph and original_graph is not graph: raise ValueError( '%s must be from the same graph as %s (graphs are %s and %s).' % (item, original_item, graph, original_graph)) def _current_graph(op_input_list, graph=None): """Returns the appropriate graph to use for the given inputs. This library method provides a consistent algorithm for choosing the graph in which an Operation should be constructed: 1. If the default graph is being used to construct a function, we use the default graph. 2. If the "graph" is specified explicitly, we validate that all of the inputs in "op_input_list" are compatible with that graph. 3. Otherwise, we attempt to select a graph from the first Operation- or Tensor-valued input in "op_input_list", and validate that all other such inputs are in the same graph. 4. If the graph was not specified and it could not be inferred from "op_input_list", we attempt to use the default graph. Args: op_input_list: A list of inputs to an operation, which may include `Tensor`, `Operation`, and other objects that may be converted to a graph element. graph: (Optional) The explicit graph to use. Raises: TypeError: If op_input_list is not a list or tuple, or if graph is not a Graph. ValueError: If a graph is explicitly passed and not all inputs are from it, or if the inputs are from multiple graphs, or we could not find a graph and there was no default graph. Returns: The appropriate graph to use for the given inputs. """ current_default_graph = tf.compat.v1.get_default_graph() if current_default_graph.building_function: return current_default_graph op_input_list = tuple(op_input_list) # Handle generators correctly if graph and not isinstance(graph, tf.Graph): raise TypeError('Input graph needs to be a Graph: %s' % (graph,)) # 1. We validate that all of the inputs are from the same graph. This is # either the supplied graph parameter, or the first one selected from one # the graph-element-valued inputs. In the latter case, we hold onto # that input in original_graph_element so we can provide a more # informative error if a mismatch is found. original_graph_element = None for op_input in op_input_list: # Determine if this is a valid graph_element. # TODO(joshl): Note that we exclude subclasses of Tensor. Need to clean this # up. if (isinstance(op_input, ( tf.Operation, tf.Tensor, tf.__internal__.CompositeTensor)) and ((not isinstance(op_input, tf.Tensor)) or type(op_input) == tf.Tensor)): # pylint: disable=unidiomatic-typecheck graph_element = op_input else: graph_element = _as_graph_element(op_input) if graph_element is not None: if not graph: original_graph_element = graph_element graph = getattr(graph_element, 'graph', None) elif original_graph_element is not None: _assert_same_graph(original_graph_element, graph_element) elif graph_element.graph is not graph: raise ValueError('%s is not from the passed-in graph.' % graph_element) # 2. If all else fails, we use the default graph, which is always there. return graph or current_default_graph def _get_session(op_input_list=()): """Returns the session object for the current thread.""" global _SESSION default_session = tf.compat.v1.get_default_session() if default_session is not None: session = default_session else: if tf.inside_function(): raise RuntimeError('Cannot get session inside Tensorflow graph function.') # If we don't have a session, or that session does not match the current # graph, create and cache a new session. if (getattr(_SESSION, 'session', None) is None or _SESSION.session.graph is not _current_graph(op_input_list)): # If we are creating the Session inside a tf.distribute.Strategy scope, # we ask the strategy for the right session options to use. if tf.distribute.has_strategy(): configure_and_create_distributed_session( tf.distribute.get_strategy()) else: _SESSION.session = tf.compat.v1.Session( config=get_default_session_config()) session = _SESSION.session return session @keras_export(v1=['keras.backend.get_session']) def get_session(op_input_list=()): """Returns the TF session to be used by the backend. If a default TensorFlow session is available, we will return it. Else, we will return the global Keras session assuming it matches the current graph. If no global Keras session exists at this point: we will create a new global session. Note that you can manually set the global session via `K.set_session(sess)`. Args: op_input_list: An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used. Returns: A TensorFlow session. """ session = _get_session(op_input_list) if not _MANUAL_VAR_INIT: with session.graph.as_default(): _initialize_variables(session) return session # Inject the get_session function to keras_deps to remove the dependency # from TFLite to Keras. tf.__internal__.register_get_session_function(get_session) # Inject the get_session function to tracking_util to avoid the backward # dependency from TF to Keras. tf.__internal__.tracking.register_session_provider(get_session) def get_graph(): if tf.executing_eagerly(): global _GRAPH if not getattr(_GRAPH, 'graph', None): _GRAPH.graph = tf.__internal__.FuncGraph('keras_graph') return _GRAPH.graph else: return tf.compat.v1.get_default_graph() @tf_contextlib.contextmanager def _scratch_graph(graph=None): """Retrieve a shared and temporary func graph. The eager execution path lifts a subgraph from the keras global graph into a scratch graph in order to create a function. DistributionStrategies, in turn, constructs multiple functions as well as a final combined function. In order for that logic to work correctly, all of the functions need to be created on the same scratch FuncGraph. Args: graph: A graph to be used as the current scratch graph. If not set then a scratch graph will either be retrieved or created: Yields: The current scratch graph. """ global _CURRENT_SCRATCH_GRAPH scratch_graph = getattr(_CURRENT_SCRATCH_GRAPH, 'graph', None) # If scratch graph and `graph` are both configured, they must match. if (scratch_graph is not None and graph is not None and scratch_graph is not graph): raise ValueError('Multiple scratch graphs specified.') if scratch_graph: yield scratch_graph return graph = graph or tf.__internal__.FuncGraph('keras_scratch_graph') try: _CURRENT_SCRATCH_GRAPH.graph = graph yield graph finally: _CURRENT_SCRATCH_GRAPH.graph = None @keras_export(v1=['keras.backend.set_session']) def set_session(session): """Sets the global TensorFlow session. Args: session: A TF Session. """ global _SESSION _SESSION.session = session def get_default_session_config(): if os.environ.get('OMP_NUM_THREADS'): logging.warning( 'OMP_NUM_THREADS is no longer used by the default Keras config. ' 'To configure the number of threads, use tf.config.threading APIs.') config = get_config() config.allow_soft_placement = True return config def get_default_graph_uid_map(): graph = tf.compat.v1.get_default_graph() name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None) if name_uid_map is None: name_uid_map = collections.defaultdict(int) PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map return name_uid_map # DEVICE MANIPULATION class _TfDeviceCaptureOp: """Class for capturing the TF device scope.""" def __init__(self): self.device = None def _set_device(self, device): """This method captures TF's explicit device scope setting.""" if isinstance(device, tf.DeviceSpec): device = device.to_string() self.device = device def _set_device_from_string(self, device_str): self.device = device_str def _get_current_tf_device(): """Return explicit device of current context, otherwise returns `None`. Returns: If the current device scope is explicitly set, it returns a string with the device (`CPU` or `GPU`). If the scope is not explicitly set, it will return `None`. """ graph = get_graph() op = _TfDeviceCaptureOp() graph._apply_device_functions(op) if tf.__internal__.tf2.enabled(): return tf.DeviceSpec.from_string(op.device) else: return tf.compat.v1.DeviceSpec.from_string(op.device) def _is_current_explicit_device(device_type): """Check if the current device is explicitly set on the device type specified. Args: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the `device_type` string indicates an unsupported device. """ device_type = device_type.upper() if device_type not in ['CPU', 'GPU']: raise ValueError('`device_type` should be either "CPU" or "GPU".') device = _get_current_tf_device() return device is not None and device.device_type == device_type.upper() def _get_available_gpus(): """Get a list of available GPU devices (formatted as strings). Returns: A list of available GPU devices. """ if tf.compat.v1.executing_eagerly_outside_functions(): # Returns names of devices directly. return [d.name for d in tf.config.list_logical_devices('GPU')] global _LOCAL_DEVICES if _LOCAL_DEVICES is None: _LOCAL_DEVICES = get_session().list_devices() return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU'] def _has_nchw_support(): """Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw """ explicitly_on_cpu = _is_current_explicit_device('CPU') gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION def _constant_to_tensor(x, dtype): """Convert the input `x` to a tensor of type `dtype`. This is slightly faster than the _to_tensor function, at the cost of handling fewer cases. Args: x: An object to be converted (numpy arrays, floats, ints and lists of them). dtype: The destination type. Returns: A tensor. """ return tf.constant(x, dtype=dtype) def _to_tensor(x, dtype): """Convert the input `x` to a tensor of type `dtype`. Args: x: An object to be converted (numpy array, list, tensors). dtype: The destination type. Returns: A tensor. """ return tf.convert_to_tensor(x, dtype=dtype) @keras_export('keras.backend.is_sparse') @doc_controls.do_not_generate_docs def is_sparse(tensor): """Returns whether a tensor is a sparse tensor. Args: tensor: A tensor instance. Returns: A boolean. Example: >>> a = tf.keras.backend.placeholder((2, 2), sparse=False) >>> print(tf.keras.backend.is_sparse(a)) False >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True """ spec = getattr(tensor, '_type_spec', None) if spec is not None: return isinstance(spec, tf.SparseTensorSpec) return isinstance(tensor, tf.SparseTensor) @keras_export('keras.backend.to_dense') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def to_dense(tensor): """Converts a sparse tensor into a dense tensor and returns it. Args: tensor: A tensor instance (potentially sparse). Returns: A dense tensor. Examples: >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True >>> c = tf.keras.backend.to_dense(b) >>> print(tf.keras.backend.is_sparse(c)) False """ if is_sparse(tensor): return tf.sparse.to_dense(tensor) else: return tensor @keras_export('keras.backend.name_scope', v1=[]) @doc_controls.do_not_generate_docs def name_scope(name): """A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called `my_op`: def my_op(a): with tf.name_scope("MyOp") as scope: a = tf.convert_to_tensor(a, name="a") # Define some computation that uses `a`. return foo_op(..., name=scope) When executed, the Tensor `a` will have the name `MyOp/a`. Args: name: The prefix to use on all names created within the name scope. Returns: Name scope context manager. """ return tf.name_scope(name) # Export V1 version. _v1_name_scope = tf.compat.v1.name_scope keras_export(v1=['keras.backend.name_scope'], allow_multiple_exports=True)(_v1_name_scope) @keras_export('keras.backend.variable') @doc_controls.do_not_generate_docs def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy= array([[1., 2.], [3., 4.]])> """ if dtype is None: dtype = floatx() if hasattr(value, 'tocoo'): sparse_coo = value.tocoo() indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims( sparse_coo.col, 1)), 1) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, 'shape'): v._keras_shape = int_shape(value) track_variable(v) return v def track_tf_optimizer(tf_optimizer): """Tracks the given TF optimizer for initialization of its variables.""" if tf.executing_eagerly(): return optimizers = _GRAPH_TF_OPTIMIZERS[None] optimizers.add(tf_optimizer) @keras_export('keras.__internal__.backend.track_variable', v1=[]) def track_variable(v): """Tracks the given variable for initialization.""" if tf.executing_eagerly(): return graph = v.graph if hasattr(v, 'graph') else get_graph() _GRAPH_VARIABLES[graph].add(v) def observe_object_name(name): """Observe a name and make sure it won't be used by `unique_object_name`.""" OBSERVED_NAMES.add(name) def unique_object_name(name, name_uid_map=None, avoid_names=None, namespace='', zero_based=False, avoid_observed_names=False): """Makes a object name (or arbitrary string) unique within a TensorFlow graph. Args: name: String name to make unique. name_uid_map: An optional defaultdict(int) to use when creating unique names. If None (default), uses a per-Graph dictionary. avoid_names: An optional set or dict with names which should not be used. If None (default), don't avoid any names unless `avoid_observed_names` is True. namespace: Gets a name which is unique within the (graph, namespace). Layers which are not Networks use a blank namespace and so get graph-global names. zero_based: If True, name sequences start with no suffix (e.g. "dense", "dense_1"). If False, naming is one-based ("dense_1", "dense_2"). avoid_observed_names: If True, avoid any names that have been observed by `backend.observe_object_name`. Returns: Unique string name. Example: unique_object_name('dense') # dense_1 unique_object_name('dense') # dense_2 """ if name_uid_map is None: name_uid_map = get_default_graph_uid_map() if avoid_names is None: if avoid_observed_names: avoid_names = OBSERVED_NAMES else: avoid_names = set() proposed_name = None while proposed_name is None or proposed_name in avoid_names: name_key = (namespace, name) if zero_based: number = name_uid_map[name_key] if number: proposed_name = name + '_' + str(number) else: proposed_name = name name_uid_map[name_key] += 1 else: name_uid_map[name_key] += 1 proposed_name = name + '_' + str(name_uid_map[name_key]) return proposed_name def _get_variables(graph=None): """Returns variables corresponding to the given graph for initialization.""" assert not tf.executing_eagerly() variables = _GRAPH_VARIABLES[graph] for opt in _GRAPH_TF_OPTIMIZERS[graph]: variables.update(opt.optimizer.variables()) return variables @keras_export('keras.__internal__.backend.initialize_variables', v1=[]) def _initialize_variables(session): """Utility to initialize uninitialized variables on the fly.""" variables = _get_variables(get_graph()) candidate_vars = [] for v in variables: if not getattr(v, '_keras_initialized', False): candidate_vars.append(v) if candidate_vars: # This step is expensive, so we only run it on variables not already # marked as initialized. is_initialized = session.run( [tf.compat.v1.is_variable_initialized(v) for v in candidate_vars]) # TODO(kathywu): Some metric variables loaded from SavedModel are never # actually used, and do not have an initializer. should_be_initialized = [ (not is_initialized[n]) and v.initializer is not None for n, v in enumerate(candidate_vars)] uninitialized_vars = [] for flag, v in zip(should_be_initialized, candidate_vars): if flag: uninitialized_vars.append(v) v._keras_initialized = True if uninitialized_vars: session.run(tf.compat.v1.variables_initializer(uninitialized_vars)) @keras_export('keras.backend.constant') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def constant(value, dtype=None, shape=None, name=None): """Creates a constant tensor. Args: value: A constant value (or list) dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. Returns: A Constant Tensor. """ if dtype is None: dtype = floatx() return tf.constant(value, dtype=dtype, shape=shape, name=name) @keras_export('keras.backend.is_keras_tensor') def is_keras_tensor(x): """Returns whether `x` is a Keras tensor. A "Keras tensor" is a tensor that was returned by a Keras layer, (`Layer` class) or by `Input`. Args: x: A candidate tensor. Returns: A boolean: Whether the argument is a Keras tensor. Raises: ValueError: In case `x` is not a symbolic tensor. Examples: >>> np_var = np.array([1, 2]) >>> # A numpy array is not a symbolic tensor. >>> tf.keras.backend.is_keras_tensor(np_var) Traceback (most recent call last): ... ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`. Expected a symbolic tensor instance. >>> keras_var = tf.keras.backend.variable(np_var) >>> # A variable created with the keras backend is not a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_var) False >>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> # A placeholder is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_placeholder) True >>> keras_input = tf.keras.layers.Input([10]) >>> # An Input is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_input) True >>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input) >>> # Any Keras layer output is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_layer_output) True """ if not isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.RaggedTensor, keras_tensor.KerasTensor)): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. Expected a symbolic tensor instance.') if tf.compat.v1.executing_eagerly_outside_functions(): return isinstance(x, keras_tensor.KerasTensor) return hasattr(x, '_keras_history') @keras_export('keras.backend.placeholder') @doc_controls.do_not_generate_docs def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None, ragged=False): """Instantiates a placeholder tensor and returns it. Args: shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. ragged: Boolean, whether the placeholder should have a ragged type. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this [guide](https://www.tensorflow.org/guide/ragged_tensors). Raises: ValueError: If called with sparse = True and ragged = True. Returns: Tensor instance (with Keras metadata included). Examples: >>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> input_ph <KerasTensor: shape=(2, 4, 5) dtype=float32 (created by layer ...)> """ if sparse and ragged: raise ValueError( 'Cannot set both sparse and ragged to True when creating a placeholder.' ) if dtype is None: dtype = floatx() if not shape: if ndim: shape = (None,) * ndim if tf.compat.v1.executing_eagerly_outside_functions(): if sparse: spec = tf.SparseTensorSpec( shape=shape, dtype=dtype) elif ragged: ragged_rank = 0 for i in range(1, len(shape)): # Hacky because could be tensorshape or tuple maybe? # Or just tensorshape? if shape[i] is None or ( hasattr(shape[i], 'value') and shape[i].value is None): ragged_rank = i spec = tf.RaggedTensorSpec( shape=shape, dtype=dtype, ragged_rank=ragged_rank) else: spec = tf.TensorSpec( shape=shape, dtype=dtype, name=name) x = keras_tensor.keras_tensor_from_type_spec(spec, name=name) else: with get_graph().as_default(): if sparse: x = tf.compat.v1.sparse_placeholder(dtype, shape=shape, name=name) elif ragged: ragged_rank = 0 for i in range(1, len(shape)): if shape[i] is None: ragged_rank = i type_spec = tf.RaggedTensorSpec( shape=shape, dtype=dtype, ragged_rank=ragged_rank) def tensor_spec_to_placeholder(tensorspec): return tf.compat.v1.placeholder(tensorspec.dtype, tensorspec.shape) x = tf.nest.map_structure(tensor_spec_to_placeholder, type_spec, expand_composites=True) else: x = tf.compat.v1.placeholder(dtype, shape=shape, name=name) if tf.executing_eagerly(): # Add keras_history connectivity information to the placeholder # when the placeholder is built in a top-level eager context # (intended to be used with keras.backend.function) from keras.engine import input_layer # pylint: disable=g-import-not-at-top x = input_layer.Input(tensor=x) x._is_backend_placeholder = True return x def is_placeholder(x): """Returns whether `x` is a placeholder. Args: x: A candidate placeholder. Returns: Boolean. """ try: if tf.compat.v1.executing_eagerly_outside_functions(): return hasattr(x, '_is_backend_placeholder') from keras.utils import tf_utils # pylint: disable=g-import-not-at-top if tf_utils.is_extension_type(x): flat_components = tf.nest.flatten(x, expand_composites=True) return py_any(is_placeholder(c) for c in flat_components) else: return x.op.type == 'Placeholder' except AttributeError: return False @keras_export('keras.backend.shape') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def shape(x): """Returns the symbolic shape of a tensor or variable. Args: x: A tensor or variable. Returns: A symbolic shape (which is itself a tensor). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.shape(kvar) <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)> >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.shape(input) <KerasTensor: shape=(3,) dtype=int32 inferred_value=[2, 4, 5] ...> """ return tf.shape(x) @keras_export('keras.backend.int_shape') @doc_controls.do_not_generate_docs def int_shape(x): """Returns the shape of tensor or variable as a tuple of int or None entries. Args: x: Tensor or variable. Returns: A tuple of integers (or None entries). Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.int_shape(input) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.int_shape(kvar) (2, 2) """ try: shape = x.shape if not isinstance(shape, tuple): shape = tuple(shape.as_list()) return shape except ValueError: return None @keras_export('keras.backend.ndim') @doc_controls.do_not_generate_docs def ndim(x): """Returns the number of axes in a tensor, as an integer. Args: x: Tensor or variable. Returns: Integer (scalar), number of axes. Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.ndim(input) 3 >>> tf.keras.backend.ndim(kvar) 2 """ return x.shape.rank @keras_export('keras.backend.dtype') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def dtype(x): """Returns the dtype of a Keras tensor or variable, as a string. Args: x: Tensor or variable. Returns: String, dtype of `x`. Examples: >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5))) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float32')) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float64')) 'float64' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]])) >>> tf.keras.backend.dtype(kvar) 'float32' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.dtype(kvar) 'float32' """ return x.dtype.base_dtype.name @doc_controls.do_not_generate_docs def dtype_numpy(x): """Returns the numpy dtype of a Keras tensor or variable. Args: x: Tensor or variable. Returns: numpy.dtype, dtype of `x`. """ return tf.as_dtype(x.dtype).as_numpy_dtype @keras_export('keras.backend.eval') @doc_controls.do_not_generate_docs def eval(x): """Evaluates the value of a variable. Args: x: A variable. Returns: A Numpy array. Examples: >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.eval(kvar) array([[1., 2.], [3., 4.]], dtype=float32) """ return get_value(to_dense(x)) @keras_export('keras.backend.zeros') @doc_controls.do_not_generate_docs def zeros(shape, dtype=None, name=None): """Instantiates an all-zeros variable and returns it. Args: shape: Tuple or list of integers, shape of returned Keras variable dtype: data type of returned Keras variable name: name of returned Keras variable Returns: A variable (including Keras metadata), filled with `0.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.zeros((3,4)) >>> tf.keras.backend.eval(kvar) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], dtype=float32) >>> A = tf.constant([1,2,3]) >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.] >>> tf.keras.backend.eval(kvar2) array([0., 0., 0.], dtype=float32) >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32) >>> tf.keras.backend.eval(kvar3) array([0, 0, 0], dtype=int32) >>> kvar4 = tf.keras.backend.zeros([2,3]) >>> tf.keras.backend.eval(kvar4) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) """ with tf.init_scope(): if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) v = tf.zeros(shape=shape, dtype=tf_dtype, name=name) if py_all(v.shape.as_list()): return variable(v, dtype=dtype, name=name) return v @keras_export('keras.backend.ones') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ones(shape, dtype=None, name=None): """Instantiates an all-ones variable and returns it. Args: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, filled with `1.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.ones((3,4)) >>> tf.keras.backend.eval(kvar) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], dtype=float32) """ with tf.init_scope(): if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) v = tf.ones(shape=shape, dtype=tf_dtype, name=name) if py_all(v.shape.as_list()): return variable(v, dtype=dtype, name=name) return v @keras_export('keras.backend.eye') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def eye(size, dtype=None, name=None): """Instantiate an identity matrix and returns it. Args: size: Integer, number of rows/columns. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, an identity matrix. Example: >>> kvar = tf.keras.backend.eye(3) >>> tf.keras.backend.eval(kvar) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) return variable(tf.eye(size, dtype=tf_dtype), dtype, name) @keras_export('keras.backend.zeros_like') @doc_controls.do_not_generate_docs def zeros_like(x, dtype=None, name=None): """Instantiates an all-zeros variable of the same shape as another tensor. Args: x: Keras variable or Keras tensor. dtype: dtype of returned Keras variable. `None` uses the dtype of `x`. name: name for the variable to create. Returns: A Keras variable with the shape of `x` filled with zeros. Example: ```python from tensorflow.keras import backend as K kvar = K.variable(np.random.random((2,3))) kvar_zeros = K.zeros_like(kvar) K.eval(kvar_zeros) # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ``` """ return tf.zeros_like(x, dtype=dtype, name=name) @keras_export('keras.backend.ones_like') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ones_like(x, dtype=None, name=None): """Instantiates an all-ones variable of the same shape as another tensor. Args: x: Keras variable or tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. name: String, name for the variable to create. Returns: A Keras variable with the shape of x filled with ones. Example: >>> kvar = tf.keras.backend.variable(np.random.random((2,3))) >>> kvar_ones = tf.keras.backend.ones_like(kvar) >>> tf.keras.backend.eval(kvar_ones) array([[1., 1., 1.], [1., 1., 1.]], dtype=float32) """ return tf.ones_like(x, dtype=dtype, name=name) def identity(x, name=None): """Returns a tensor with the same content as the input tensor. Args: x: The input tensor. name: String, name for the variable to create. Returns: A tensor of the same shape, type and content. """ return tf.identity(x, name=name) @keras_export('keras.backend.random_uniform_variable') @doc_controls.do_not_generate_docs def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a uniform distribution. Args: shape: Tuple of integers, shape of returned Keras variable. low: Float, lower boundary of the output interval. high: Float, upper boundary of the output interval. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3), ... low=0.0, high=1.0) >>> kvar <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = tf.compat.v1.random_uniform_initializer( low, high, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name) @keras_export('keras.backend.random_normal_variable') @doc_controls.do_not_generate_docs def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a normal distribution. Args: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3), ... mean=0.0, scale=1.0) >>> kvar <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = tf.compat.v1.random_normal_initializer( mean, scale, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name) @keras_export('keras.backend.count_params') @doc_controls.do_not_generate_docs def count_params(x): """Returns the static number of elements in a variable or tensor. Args: x: Variable or tensor. Returns: Integer, the number of scalars in `x`. Example: >>> kvar = tf.keras.backend.zeros((2,3)) >>> tf.keras.backend.count_params(kvar) 6 >>> tf.keras.backend.eval(kvar) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) """ return np.prod(x.shape.as_list()) @keras_export('keras.backend.cast') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cast(x, dtype): """Casts a tensor to a different dtype and returns it. You can cast a Keras variable but it still returns a Keras tensor. Args: x: Keras tensor (or variable). dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). Returns: Keras tensor with dtype `dtype`. Examples: Cast a float32 variable to a float64 tensor >>> input = tf.keras.backend.ones(shape=(1,3)) >>> print(input) <tf.Variable 'Variable:0' shape=(1, 3) dtype=float32, numpy=array([[1., 1., 1.]], dtype=float32)> >>> cast_input = tf.keras.backend.cast(input, dtype='float64') >>> print(cast_input) tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64) """ return tf.cast(x, dtype) # UPDATES OPS @keras_export('keras.backend.update') @doc_controls.do_not_generate_docs def update(x, new_x): return tf.compat.v1.assign(x, new_x) @keras_export('keras.backend.update_add') @doc_controls.do_not_generate_docs def update_add(x, increment): """Update the value of `x` by adding `increment`. Args: x: A Variable. increment: A tensor of same shape as `x`. Returns: The variable `x` updated. """ return tf.compat.v1.assign_add(x, increment) @keras_export('keras.backend.update_sub') @doc_controls.do_not_generate_docs def update_sub(x, decrement): """Update the value of `x` by subtracting `decrement`. Args: x: A Variable. decrement: A tensor of same shape as `x`. Returns: The variable `x` updated. """ return tf.compat.v1.assign_sub(x, decrement) @keras_export('keras.backend.moving_average_update') @doc_controls.do_not_generate_docs def moving_average_update(x, value, momentum): """Compute the exponential moving average of a value. The moving average 'x' is updated with 'value' following: ``` x = x * momentum + value * (1 - momentum) ``` For example: >>> x = tf.Variable(0.0) >>> momentum=0.9 >>> moving_average_update(x, value = 2.0, momentum=momentum).numpy() >>> x.numpy() 0.2 The result will be biased towards the initial value of the variable. If the variable was initialized to zero, you can divide by `1 - momentum ** num_updates` to debias it (Section 3 of [Kingma et al., 2015](https://arxiv.org/abs/1412.6980)): >>> num_updates = 1.0 >>> x_zdb = x/(1 - momentum**num_updates) >>> x_zdb.numpy() 2.0 Args: x: A Variable, the moving average. value: A tensor with the same shape as `x`, the new value to be averaged in. momentum: The moving average momentum. Returns: The updated variable. """ if tf.__internal__.tf2.enabled(): momentum = tf.cast(momentum, x.dtype) value = tf.cast(value, x.dtype) return x.assign(x * momentum + value * (1 - momentum)) else: return tf.__internal__.train.assign_moving_average( x, value, momentum, zero_debias=True) # LINEAR ALGEBRA @keras_export('keras.backend.dot') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def dot(x, y): """Multiplies 2 tensors (and/or variables) and returns a tensor. This operation corresponds to `numpy.dot(a, b, out=None)`. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor, dot product of `x` and `y`. Examples: If inputs `x` and `y` are 2-D arrays, then it is equivalent to `tf.matmul`. >>> x = tf.keras.backend.placeholder(shape=(2, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy <KerasTensor: shape=(2, 4) dtype=float32 ...> >>> x = tf.keras.backend.placeholder(shape=(32, 28, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy <KerasTensor: shape=(32, 28, 4) dtype=float32 ...> If `x` is an N-D array and `y` is an M-D array (where M>=2), it is a sum product over the last axis of `x` and the second-to-last axis of `y`. >>> x = tf.keras.backend.random_uniform_variable( ... shape=(2, 3), low=0., high=1.) >>> y = tf.keras.backend.ones((4, 3, 5)) >>> xy = tf.keras.backend.dot(x, y) >>> tf.keras.backend.int_shape(xy) (2, 4, 5) """ if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): x_shape = [] for i, s in zip(int_shape(x), tf.unstack(tf.shape(x))): if i is not None: x_shape.append(i) else: x_shape.append(s) x_shape = tuple(x_shape) y_shape = [] for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))): if i is not None: y_shape.append(i) else: y_shape.append(s) y_shape = tuple(y_shape) y_permute_dim = list(range(ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = tf.reshape(x, [-1, x_shape[-1]]) yt = tf.reshape( tf.compat.v1.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return tf.reshape( tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) if is_sparse(x): out = tf.sparse.sparse_dense_matmul(x, y) else: out = tf.matmul(x, y) return out @keras_export('keras.backend.batch_dot') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_dot(x, y, axes=None): """Batchwise dot product. `batch_dot` is used to compute dot product of `x` and `y` when `x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`. `batch_dot` results in a tensor or variable with less dimensions than the input. If the number of dimensions is reduced to 1, we use `expand_dims` to make sure that ndim is at least 2. Args: x: Keras tensor or variable with `ndim >= 2`. y: Keras tensor or variable with `ndim >= 2`. axes: Tuple or list of integers with target dimensions, or single integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal. Returns: A tensor with shape equal to the concatenation of `x`'s shape (less the dimension that was summed over) and `y`'s shape (less the batch dimension and the dimension that was summed over). If the final rank is 1, we reshape it to `(batch_size, 1)`. Examples: >>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1)) >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20)) >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2)) >>> tf.keras.backend.int_shape(xy_batch_dot) (32, 1, 30) Shape inference: Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`. If `axes` is (1, 2), to find the output shape of resultant tensor, loop through each dimension in `x`'s shape and `y`'s shape: * `x.shape[0]` : 100 : append to output shape * `x.shape[1]` : 20 : do not append to output shape, dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1) * `y.shape[0]` : 100 : do not append to output shape, always ignore first dimension of `y` * `y.shape[1]` : 30 : append to output shape * `y.shape[2]` : 20 : do not append to output shape, dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2) `output_shape` = `(100, 30)` """ x_shape = int_shape(x) y_shape = int_shape(y) x_ndim = len(x_shape) y_ndim = len(y_shape) if x_ndim < 2 or y_ndim < 2: raise ValueError('Cannot do batch_dot on inputs ' 'with rank < 2. ' 'Received inputs with shapes ' + str(x_shape) + ' and ' + str(y_shape) + '.') x_batch_size = x_shape[0] y_batch_size = y_shape[0] if x_batch_size is not None and y_batch_size is not None: if x_batch_size != y_batch_size: raise ValueError('Cannot do batch_dot on inputs ' 'with different batch sizes. ' 'Received inputs with shapes ' + str(x_shape) + ' and ' + str(y_shape) + '.') if isinstance(axes, int): axes = [axes, axes] if axes is None: if y_ndim == 2: axes = [x_ndim - 1, y_ndim - 1] else: axes = [x_ndim - 1, y_ndim - 2] if py_any(isinstance(a, (list, tuple)) for a in axes): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) # if tuple, convert to list. axes = list(axes) # convert negative indices. if axes[0] < 0: axes[0] += x_ndim if axes[1] < 0: axes[1] += y_ndim # sanity checks if 0 in axes: raise ValueError('Cannot perform batch_dot over axis 0. ' 'If your inputs are not batched, ' 'add a dummy batch dimension to your ' 'inputs using K.expand_dims(x, 0)') a0, a1 = axes d1 = x_shape[a0] d2 = y_shape[a1] if d1 is not None and d2 is not None and d1 != d2: raise ValueError('Cannot do batch_dot on inputs with shapes ' + str(x_shape) + ' and ' + str(y_shape) + ' with axes=' + str(axes) + '. x.shape[%d] != ' 'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2)) # backup ndims. Need them later. orig_x_ndim = x_ndim orig_y_ndim = y_ndim # if rank is 2, expand to 3. if x_ndim == 2: x = tf.expand_dims(x, 1) a0 += 1 x_ndim += 1 if y_ndim == 2: y = tf.expand_dims(y, 2) y_ndim += 1 # bring x's dimension to be reduced to last axis. if a0 != x_ndim - 1: pattern = list(range(x_ndim)) for i in range(a0, x_ndim - 1): pattern[i] = pattern[i + 1] pattern[-1] = a0 x = tf.compat.v1.transpose(x, pattern) # bring y's dimension to be reduced to axis 1. if a1 != 1: pattern = list(range(y_ndim)) for i in range(a1, 1, -1): pattern[i] = pattern[i - 1] pattern[1] = a1 y = tf.compat.v1.transpose(y, pattern) # normalize both inputs to rank 3. if x_ndim > 3: # squash middle dimensions of x. x_shape = shape(x) x_mid_dims = x_shape[1:-1] x_squashed_shape = tf.stack( [x_shape[0], -1, x_shape[-1]]) x = tf.reshape(x, x_squashed_shape) x_squashed = True else: x_squashed = False if y_ndim > 3: # squash trailing dimensions of y. y_shape = shape(y) y_trail_dims = y_shape[2:] y_squashed_shape = tf.stack( [y_shape[0], y_shape[1], -1]) y = tf.reshape(y, y_squashed_shape) y_squashed = True else: y_squashed = False result = tf.matmul(x, y) # if inputs were squashed, we have to reshape the matmul output. output_shape = tf.shape(result) do_reshape = False if x_squashed: output_shape = tf.concat( [output_shape[:1], x_mid_dims, output_shape[-1:]], 0) do_reshape = True if y_squashed: output_shape = tf.concat([output_shape[:-1], y_trail_dims], 0) do_reshape = True if do_reshape: result = tf.reshape(result, output_shape) # if the inputs were originally rank 2, we remove the added 1 dim. if orig_x_ndim == 2: result = tf.squeeze(result, 1) elif orig_y_ndim == 2: result = tf.squeeze(result, -1) return result @keras_export('keras.backend.transpose') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def transpose(x): """Transposes a tensor and returns it. Args: x: Tensor or variable. Returns: A tensor. Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_transposed = tf.keras.backend.transpose(var) >>> tf.keras.backend.eval(var_transposed) array([[1., 4.], [2., 5.], [3., 6.]], dtype=float32) >>> input = tf.keras.backend.placeholder((2, 3)) >>> input <KerasTensor: shape=(2, 3) dtype=float32 ...> >>> input_transposed = tf.keras.backend.transpose(input) >>> input_transposed <KerasTensor: shape=(3, 2) dtype=float32 ...> """ return tf.compat.v1.transpose(x) @keras_export('keras.backend.gather') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def gather(reference, indices): """Retrieves the elements of indices `indices` in the tensor `reference`. Args: reference: A tensor. indices: An integer tensor of indices. Returns: A tensor of same type as `reference`. Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [1]) >>> tf.keras.backend.eval(var_gathered) array([[4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0,1,0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.], [4., 5., 6.], [1., 2., 3.]], dtype=float32) """ return tf.compat.v1.gather(reference, indices) # ELEMENT-WISE OPERATIONS @keras_export('keras.backend.max') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def max(x, axis=None, keepdims=False): """Maximum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with maximum values of `x`. """ return tf.reduce_max(x, axis, keepdims) @keras_export('keras.backend.min') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def min(x, axis=None, keepdims=False): """Minimum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find minimum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with minimum values of `x`. """ return tf.reduce_min(x, axis, keepdims) @keras_export('keras.backend.sum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with sum of `x`. """ return tf.reduce_sum(x, axis, keepdims) @keras_export('keras.backend.prod') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def prod(x, axis=None, keepdims=False): """Multiplies the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the product of elements of `x`. """ return tf.reduce_prod(x, axis, keepdims) @keras_export('keras.backend.cumsum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cumsum(x, axis=0): """Cumulative sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the sum. Returns: A tensor of the cumulative sum of values of `x` along `axis`. """ return tf.cumsum(x, axis=axis) @keras_export('keras.backend.cumprod') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cumprod(x, axis=0): """Cumulative product of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. Returns: A tensor of the cumulative product of values of `x` along `axis`. """ return tf.math.cumprod(x, axis=axis) @keras_export('keras.backend.var') @doc_controls.do_not_generate_docs def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of `x`. """ if x.dtype.base_dtype == tf.bool: x = tf.cast(x, floatx()) return tf.math.reduce_variance(x, axis=axis, keepdims=keepdims) @keras_export('keras.backend.std') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def std(x, axis=None, keepdims=False): """Standard deviation of a tensor, alongside the specified axis. It is an alias to `tf.math.reduce_std`. Args: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(x), rank(x))`. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of `x` with same dtype. Boolean type input will be converted to float. """ if x.dtype.base_dtype == tf.bool: x = tf.cast(x, floatx()) return tf.math.reduce_std(x, axis=axis, keepdims=keepdims) @keras_export('keras.backend.mean') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of elements of `x`. """ if x.dtype.base_dtype == tf.bool: x = tf.cast(x, floatx()) return tf.reduce_mean(x, axis, keepdims) @keras_export('keras.backend.any') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def any(x, axis=None, keepdims=False): """Bitwise reduction (logical OR). Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = tf.cast(x, tf.bool) return tf.reduce_any(x, axis, keepdims) @keras_export('keras.backend.all') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def all(x, axis=None, keepdims=False): """Bitwise reduction (logical AND). Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = tf.cast(x, tf.bool) return tf.reduce_all(x, axis, keepdims) @keras_export('keras.backend.argmax') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def argmax(x, axis=-1): """Returns the index of the maximum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. """ return tf.argmax(x, axis) @keras_export('keras.backend.argmin') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def argmin(x, axis=-1): """Returns the index of the minimum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. """ return tf.argmin(x, axis) @keras_export('keras.backend.square') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def square(x): """Element-wise square. Args: x: Tensor or variable. Returns: A tensor. """ return tf.square(x) @keras_export('keras.backend.abs') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def abs(x): """Element-wise absolute value. Args: x: Tensor or variable. Returns: A tensor. """ return tf.abs(x) @keras_export('keras.backend.sqrt') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sqrt(x): """Element-wise square root. This function clips negative tensor values to 0 before computing the square root. Args: x: Tensor or variable. Returns: A tensor. """ zero = _constant_to_tensor(0., x.dtype.base_dtype) x = tf.maximum(x, zero) return tf.sqrt(x) @keras_export('keras.backend.exp') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def exp(x): """Element-wise exponential. Args: x: Tensor or variable. Returns: A tensor. """ return tf.exp(x) @keras_export('keras.backend.log') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def log(x): """Element-wise log. Args: x: Tensor or variable. Returns: A tensor. """ return tf.math.log(x) def logsumexp(x, axis=None, keepdims=False): """Computes log(sum(exp(elements across dimensions of a tensor))). This function is more numerically stable than log(sum(exp(x))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. Args: x: A tensor or variable. axis: An integer, the axis to reduce over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: The reduced tensor. """ return tf.reduce_logsumexp(x, axis, keepdims) @keras_export('keras.backend.round') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def round(x): """Element-wise rounding to the closest integer. In case of tie, the rounding mode used is "half to even". Args: x: Tensor or variable. Returns: A tensor. """ return tf.round(x) @keras_export('keras.backend.sign') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sign(x): """Element-wise sign. Args: x: Tensor or variable. Returns: A tensor. """ return tf.sign(x) @keras_export('keras.backend.pow') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def pow(x, a): """Element-wise exponentiation. Args: x: Tensor or variable. a: Python integer. Returns: A tensor. """ return tf.pow(x, a) @keras_export('keras.backend.clip') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def clip(x, min_value, max_value): """Element-wise value clipping. Args: x: Tensor or variable. min_value: Python float, integer, or tensor. max_value: Python float, integer, or tensor. Returns: A tensor. """ if (isinstance(min_value, (int, float)) and isinstance(max_value, (int, float))): if max_value < min_value: max_value = min_value if min_value is None: min_value = -np.inf if max_value is None: max_value = np.inf return tf.clip_by_value(x, min_value, max_value) @keras_export('keras.backend.equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def equal(x, y): """Element-wise equality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.equal(x, y) @keras_export('keras.backend.not_equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def not_equal(x, y): """Element-wise inequality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.not_equal(x, y) @keras_export('keras.backend.greater') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def greater(x, y): """Element-wise truth value of (x > y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.greater(x, y) @keras_export('keras.backend.greater_equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def greater_equal(x, y): """Element-wise truth value of (x >= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.greater_equal(x, y) @keras_export('keras.backend.less') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def less(x, y): """Element-wise truth value of (x < y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.less(x, y) @keras_export('keras.backend.less_equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def less_equal(x, y): """Element-wise truth value of (x <= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.less_equal(x, y) @keras_export('keras.backend.maximum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def maximum(x, y): """Element-wise maximum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor with the element wise maximum value(s) of `x` and `y`. Examples: >>> x = tf.Variable([[1, 2], [3, 4]]) >>> y = tf.Variable([[2, 1], [0, -1]]) >>> m = tf.keras.backend.maximum(x, y) >>> m <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[2, 2], [3, 4]], dtype=int32)> """ return tf.maximum(x, y) @keras_export('keras.backend.minimum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def minimum(x, y): """Element-wise minimum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor. """ return tf.minimum(x, y) @keras_export('keras.backend.sin') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sin(x): """Computes sin of x element-wise. Args: x: Tensor or variable. Returns: A tensor. """ return tf.sin(x) @keras_export('keras.backend.cos') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cos(x): """Computes cos of x element-wise. Args: x: Tensor or variable. Returns: A tensor. """ return tf.cos(x) def _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Non-fused version of `normalize_batch_in_training`. Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = tf.compat.v1.nn.moments(x, reduction_axes, None, None, False) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) return normed, mean, var def _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Non-fused, broadcast version of `normalize_batch_in_training`. Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = tf.compat.v1.nn.moments(x, reduction_axes, None, None, False) target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(tf.shape(x)[axis]) target_shape = tf.stack(target_shape) broadcast_mean = tf.reshape(mean, target_shape) broadcast_var = tf.reshape(var, target_shape) if gamma is None: broadcast_gamma = None else: broadcast_gamma = tf.reshape(gamma, target_shape) if beta is None: broadcast_beta = None else: broadcast_beta = tf.reshape(beta, target_shape) normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Fused version of `normalize_batch_in_training`. Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ if list(reduction_axes) == [0, 1, 2]: normalization_axis = 3 tf_data_format = 'NHWC' else: normalization_axis = 1 tf_data_format = 'NCHW' if gamma is None: gamma = tf.constant( 1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]]) if beta is None: beta = tf.constant( 0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]]) return tf.compat.v1.nn.fused_batch_norm( x, gamma, beta, epsilon=epsilon, data_format=tf_data_format) @keras_export('keras.backend.normalize_batch_in_training') @doc_controls.do_not_generate_docs def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]: if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) return _fused_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) else: if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: return _regular_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) else: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) @keras_export('keras.backend.batch_normalization') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): """Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Args: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. axis: Integer, the axis that should be normalized. (typically the features axis). epsilon: Fuzz factor. Returns: A tensor. """ if ndim(x) == 4: # The CPU implementation of `fused_batch_norm` only supports NHWC if axis == 1 or axis == -3: tf_data_format = 'NCHW' elif axis == 3 or axis == -1: tf_data_format = 'NHWC' else: tf_data_format = None if (tf_data_format == 'NHWC' or tf_data_format == 'NCHW' and _has_nchw_support()): # The mean / var / beta / gamma tensors may be broadcasted # so they may have extra axes of size 1, which should be squeezed. if ndim(mean) > 1: mean = tf.reshape(mean, [-1]) if ndim(var) > 1: var = tf.reshape(var, [-1]) if beta is None: beta = zeros_like(mean) elif ndim(beta) > 1: beta = tf.reshape(beta, [-1]) if gamma is None: gamma = ones_like(mean) elif ndim(gamma) > 1: gamma = tf.reshape(gamma, [-1]) y, _, _ = tf.compat.v1.nn.fused_batch_norm( x, gamma, beta, epsilon=epsilon, mean=mean, variance=var, data_format=tf_data_format, is_training=False ) return y return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) # SHAPE OPERATIONS @keras_export('keras.backend.concatenate') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def concatenate(tensors, axis=-1): """Concatenates a list of tensors alongside the specified axis. Args: tensors: list of tensors to concatenate. axis: concatenation axis. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]]) >>> tf.keras.backend.concatenate((a, b), axis=-1) <tf.Tensor: shape=(3, 6), dtype=int32, numpy= array([[ 1, 2, 3, 10, 20, 30], [ 4, 5, 6, 40, 50, 60], [ 7, 8, 9, 70, 80, 90]], dtype=int32)> """ if axis < 0: rank = ndim(tensors[0]) if rank: axis %= rank else: axis = 0 if py_all(is_sparse(x) for x in tensors): return tf.compat.v1.sparse_concat(axis, tensors) elif py_all(isinstance(x, tf.RaggedTensor) for x in tensors): return tf.concat(tensors, axis) else: return tf.concat([to_dense(x) for x in tensors], axis) @keras_export('keras.backend.reshape') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def reshape(x, shape): """Reshapes a tensor to the specified shape. Args: x: Tensor or variable. shape: Target shape tuple. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]], dtype=int32)> >>> tf.keras.backend.reshape(a, shape=(2, 6)) <tf.Tensor: shape=(2, 6), dtype=int32, numpy= array([[ 1, 2, 3, 4, 5, 6], [ 7, 8, 9, 10, 11, 12]], dtype=int32)> """ return tf.reshape(x, shape) @keras_export('keras.backend.permute_dimensions') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def permute_dimensions(x, pattern): """Permutes axes in a tensor. Args: x: Tensor or variable. pattern: A tuple of dimension indices, e.g. `(0, 2, 1)`. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]], dtype=int32)> >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0)) <tf.Tensor: shape=(3, 4), dtype=int32, numpy= array([[ 1, 4, 7, 10], [ 2, 5, 8, 11], [ 3, 6, 9, 12]], dtype=int32)> """ return tf.compat.v1.transpose(x, perm=pattern) @keras_export('keras.backend.resize_images') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'): """Resizes the images contained in a 4D tensor. Args: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. interpolation: A string, one of `nearest` or `bilinear`. Returns: A tensor. Raises: ValueError: in case of incorrect value for `data_format` or `interpolation`. """ if data_format == 'channels_first': rows, cols = 2, 3 elif data_format == 'channels_last': rows, cols = 1, 2 else: raise ValueError('Invalid `data_format` argument: %s' % (data_format,)) new_shape = x.shape[rows:cols + 1] if new_shape.is_fully_defined(): new_shape = tf.constant(new_shape.as_list(), dtype='int32') else: new_shape = tf.shape(x)[rows:cols + 1] new_shape *= tf.constant( np.array([height_factor, width_factor], dtype='int32')) if data_format == 'channels_first': x = permute_dimensions(x, [0, 2, 3, 1]) if interpolation == 'nearest': x = tf.image.resize( x, new_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) elif interpolation == 'bilinear': x = tf.image.resize(x, new_shape, method=tf.image.ResizeMethod.BILINEAR) else: raise ValueError('interpolation should be one ' 'of "nearest" or "bilinear".') if data_format == 'channels_first': x = permute_dimensions(x, [0, 3, 1, 2]) return x @keras_export('keras.backend.resize_volumes') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): """Resizes the volume contained in a 5D tensor. Args: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. Returns: A tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('Invalid data_format: ' + str(data_format)) @keras_export('keras.backend.repeat_elements') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def repeat_elements(x, rep, axis): """Repeats the elements of a tensor along an axis, like `np.repeat`. If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. Args: x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. Returns: A tensor. Example: >>> b = tf.constant([1, 2, 3]) >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0) <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)> """ x_shape = x.shape.as_list() # For static axis if x_shape[axis] is not None: # slices along the repeat axis splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis) # repeat each slice the given number of reps x_rep = [s for s in splits for _ in range(rep)] return concatenate(x_rep, axis) # Here we use tf.tile to mimic behavior of np.repeat so that # we can handle dynamic shapes (that include None). # To do that, we need an auxiliary axis to repeat elements along # it and then merge them along the desired axis. # Repeating auxiliary_axis = axis + 1 x_shape = tf.shape(x) x_rep = tf.expand_dims(x, axis=auxiliary_axis) reps = np.ones(len(x.shape) + 1) reps[auxiliary_axis] = rep x_rep = tf.tile(x_rep, reps) # Merging reps = np.delete(reps, auxiliary_axis) reps[axis] = rep reps = tf.constant(reps, dtype='int32') x_shape *= reps x_rep = tf.reshape(x_rep, x_shape) # Fix shape representation x_shape = x.shape.as_list() x_rep.set_shape(x_shape) x_rep._keras_shape = tuple(x_shape) return x_rep @keras_export('keras.backend.repeat') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def repeat(x, n): """Repeats a 2D tensor. if `x` has shape (samples, dim) and `n` is `2`, the output will have shape `(samples, 2, dim)`. Args: x: Tensor or variable. n: Python integer, number of times to repeat. Returns: A tensor. Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[1, 2], [3, 4]], dtype=int32)> >>> tf.keras.backend.repeat(b, n=2) <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy= array([[[1, 2], [1, 2]], [[3, 4], [3, 4]]], dtype=int32)> """ assert ndim(x) == 2 x = tf.expand_dims(x, 1) pattern = tf.stack([1, n, 1]) return tf.tile(x, pattern) @keras_export('keras.backend.arange') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def arange(start, stop=None, step=1, dtype='int32'): """Creates a 1D tensor containing a sequence of integers. The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the "stop" argument and "start" is 0. The default type of the returned tensor is `'int32'` to match TensorFlow's default. Args: start: Start value. stop: Stop value. step: Difference between two successive values. dtype: Integer dtype to use. Returns: An integer tensor. Example: >>> tf.keras.backend.arange(start=0, stop=10, step=1.5) <tf.Tensor: shape=(7,), dtype=float32, numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)> """ # Match the behavior of numpy and Theano by returning an empty sequence. if stop is None and start < 0: start = 0 result = tf.range(start, limit=stop, delta=step, name='arange') if dtype != 'int32': result = cast(result, dtype) return result @keras_export('keras.backend.tile') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def tile(x, n): """Creates a tensor by tiling `x` by `n`. Args: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in `x`. Returns: A tiled tensor. """ if isinstance(n, int): n = [n] return tf.tile(x, n) @keras_export('keras.backend.flatten') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def flatten(x): """Flatten a tensor. Args: x: A tensor or variable. Returns: A tensor, reshaped into 1-D Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[1, 2], [3, 4]], dtype=int32)> >>> tf.keras.backend.flatten(b) <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4], dtype=int32)> """ return tf.reshape(x, [-1]) @keras_export('keras.backend.batch_flatten') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_flatten(x): """Turn a nD tensor into a 2D tensor with same 0th dimension. In other words, it flattens each data samples of a batch. Args: x: A tensor or variable. Returns: A tensor. Examples: Flattening a 3D tensor to 2D by collapsing the last dimension. >>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5)) >>> x_batch_flatten = batch_flatten(x_batch) >>> tf.keras.backend.int_shape(x_batch_flatten) (2, 60) """ x = tf.reshape(x, tf.stack([-1, prod(shape(x)[1:])])) return x @keras_export('keras.backend.expand_dims') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def expand_dims(x, axis=-1): """Adds a 1-sized dimension at index "axis". Args: x: A tensor or variable. axis: Position where to add a new axis. Returns: A tensor with expanded dimensions. """ return tf.expand_dims(x, axis) @keras_export('keras.backend.squeeze') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def squeeze(x, axis): """Removes a 1-dimension from the tensor at index "axis". Args: x: A tensor or variable. axis: Axis to drop. Returns: A tensor with the same data as `x` but reduced dimensions. """ return tf.squeeze(x, [axis]) @keras_export('keras.backend.temporal_padding') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def temporal_padding(x, padding=(1, 1)): """Pads the middle dimension of a 3D tensor. Args: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor. """ assert len(padding) == 2 pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] return tf.compat.v1.pad(x, pattern) @keras_export('keras.backend.spatial_2d_padding') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pads the 2nd and 3rd dimensions of a 4D tensor. Args: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 4D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return tf.compat.v1.pad(x, pattern) @keras_export('keras.backend.spatial_3d_padding') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): """Pads 5D tensor with zeros along the depth, height, width dimensions. Pads these dimensions with respectively "padding[0]", "padding[1]" and "padding[2]" zeros left and right. For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. Args: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 5D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]] else: pattern = [[0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]], [0, 0]] return tf.compat.v1.pad(x, pattern) @keras_export('keras.backend.stack') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def stack(x, axis=0): """Stacks a list of rank `R` tensors into a rank `R+1` tensor. Args: x: List of tensors. axis: Axis along which to perform stacking. Returns: A tensor. Example: >>> a = tf.constant([[1, 2],[3, 4]]) >>> b = tf.constant([[10, 20],[30, 40]]) >>> tf.keras.backend.stack((a, b)) <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy= array([[[ 1, 2], [ 3, 4]], [[10, 20], [30, 40]]], dtype=int32)> """ return tf.stack(x, axis=axis) @keras_export('keras.backend.one_hot') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def one_hot(indices, num_classes): """Computes the one-hot representation of an integer tensor. Args: indices: nD integer tensor of shape `(batch_size, dim1, dim2, ... dim(n-1))` num_classes: Integer, number of classes to consider. Returns: (n + 1)D one hot representation of the input with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)` Returns: The one-hot tensor. """ return tf.one_hot(indices, depth=num_classes, axis=-1) @keras_export('keras.backend.reverse') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def reverse(x, axes): """Reverse a tensor along the specified axes. Args: x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. Returns: A tensor. """ if isinstance(axes, int): axes = [axes] return tf.reverse(x, axes) # VALUE MANIPULATION _VALUE_SET_CODE_STRING = """ >>> K = tf.keras.backend # Common keras convention >>> v = K.variable(1.) >>> # reassign >>> K.set_value(v, 2.) >>> print(K.get_value(v)) 2.0 >>> # increment >>> K.set_value(v, K.get_value(v) + 1) >>> print(K.get_value(v)) 3.0 Variable semantics in TensorFlow 2 are eager execution friendly. The above code is roughly equivalent to: >>> v = tf.Variable(1.) >>> v.assign(2.) >>> print(v.numpy()) 2.0 >>> v.assign_add(1.) >>> print(v.numpy()) 3.0"""[3:] # Prune first newline and indent to match the docstring template. @keras_export('keras.backend.get_value') @doc_controls.do_not_generate_docs def get_value(x): """Returns the value of a variable. `backend.get_value` is the complement of `backend.set_value`, and provides a generic interface for reading from variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: input variable. Returns: A Numpy array. """ if not tf.is_tensor(x): return x if tf.executing_eagerly() or isinstance(x, tf.__internal__.EagerTensor): return x.numpy() if not getattr(x, '_in_graph_mode', True): # This is a variable which was created in an eager context, but is being # evaluated from a Graph. with tf.__internal__.eager_context.eager_mode(): return x.numpy() if tf.compat.v1.executing_eagerly_outside_functions(): # This method of evaluating works inside the Keras FuncGraph. with tf.init_scope(): return x.numpy() with x.graph.as_default(): return x.eval(session=get_session((x,))) @keras_export('keras.backend.batch_get_value') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_get_value(tensors): """Returns the value of more than one tensor variable. Args: tensors: list of ops to run. Returns: A list of Numpy arrays. Raises: RuntimeError: If this method is called inside defun. """ if tf.executing_eagerly(): return [x.numpy() for x in tensors] elif tf.inside_function(): # pylint: disable=protected-access raise RuntimeError('Cannot get value inside Tensorflow graph function.') if tensors: return get_session(tensors).run(tensors) else: return [] @keras_export('keras.backend.set_value') @doc_controls.do_not_generate_docs def set_value(x, value): """Sets the value of a variable, from a Numpy array. `backend.set_value` is the complement of `backend.get_value`, and provides a generic interface for assigning to variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: Variable to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape). """ value = np.asarray(value, dtype=dtype_numpy(x)) if tf.compat.v1.executing_eagerly_outside_functions(): x.assign(value) else: with get_graph().as_default(): tf_dtype = tf.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: # In order to support assigning weights to resizable variables in # Keras, we make a placeholder with the correct number of dimensions # but with None in each dimension. This way, we can assign weights # of any size (as long as they have the correct dimensionality). placeholder_shape = tf.TensorShape([None] * value.ndim) assign_placeholder = tf.compat.v1.placeholder( tf_dtype, shape=placeholder_shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op get_session().run(assign_op, feed_dict={assign_placeholder: value}) @keras_export('keras.backend.batch_set_value') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_set_value(tuples): """Sets the values of many tensor variables at once. Args: tuples: a list of tuples `(tensor, value)`. `value` should be a Numpy array. """ if tf.compat.v1.executing_eagerly_outside_functions(): for x, value in tuples: x.assign(np.asarray(value, dtype=dtype_numpy(x))) else: with get_graph().as_default(): if tuples: assign_ops = [] feed_dict = {} for x, value in tuples: value = np.asarray(value, dtype=dtype_numpy(x)) tf_dtype = tf.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: # In order to support assigning weights to resizable variables in # Keras, we make a placeholder with the correct number of dimensions # but with None in each dimension. This way, we can assign weights # of any size (as long as they have the correct dimensionality). placeholder_shape = tf.TensorShape([None] * value.ndim) assign_placeholder = tf.compat.v1.placeholder( tf_dtype, shape=placeholder_shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op assign_ops.append(assign_op) feed_dict[assign_placeholder] = value get_session().run(assign_ops, feed_dict=feed_dict) get_value.__doc__ = get_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING) set_value.__doc__ = set_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING) @keras_export('keras.backend.print_tensor') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def print_tensor(x, message='', summarize=3): """Prints `message` and the tensor value when evaluated. Note that `print_tensor` returns a new tensor identical to `x` which should be used in the following code. Otherwise the print operation is not taken into account during evaluation. Example: >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> tf.keras.backend.print_tensor(x) <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[1., 2.], [3., 4.]], dtype=float32)> Args: x: Tensor to print. message: Message to print jointly with the tensor. summarize: The first and last `summarize` elements within each dimension are recursively printed per Tensor. If None, then the first 3 and last 3 elements of each dimension are printed for each tensor. If set to -1, it will print all elements of every tensor. Returns: The same tensor `x`, unchanged. """ if isinstance(x, tf.Tensor) and hasattr(x, 'graph'): with get_graph().as_default(): op = tf.print( message, x, output_stream=sys.stdout, summarize=summarize) with tf.control_dependencies([op]): return tf.identity(x) else: tf.print( message, x, output_stream=sys.stdout, summarize=summarize) return x # GRAPH MANIPULATION class GraphExecutionFunction: """Runs a computation graph. It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`. In particular additional operations via `fetches` argument and additional tensor substitutions via `feed_dict` arguments. Note that given substitutions are merged with substitutions from `inputs`. Even though `feed_dict` is passed once in the constructor (called in `model.compile()`) we can modify the values in the dictionary. Through this feed_dict we can provide additional substitutions besides Keras inputs. Args: inputs: Feed placeholders to the computation graph. outputs: Output tensors to fetch. updates: Additional update ops to be run at function call. name: A name to help users identify what this function does. session_kwargs: Arguments to `tf.Session.run()`: `fetches`, `feed_dict`, `options`, `run_metadata`. """ def __init__(self, inputs, outputs, updates=None, name=None, **session_kwargs): updates = updates or [] if not isinstance(updates, (list, tuple)): raise TypeError('`updates` in a Keras backend function ' 'should be a list or tuple.') self._inputs_structure = inputs self.inputs = tf.nest.flatten(inputs, expand_composites=True) self._outputs_structure = outputs self.outputs = cast_variables_to_tensor( tf.nest.flatten(outputs, expand_composites=True)) # TODO(b/127668432): Consider using autograph to generate these # dependencies in call. # Index 0 = total loss or model output for `predict`. with tf.control_dependencies([self.outputs[0]]): updates_ops = [] for update in updates: if isinstance(update, tuple): p, new_p = update updates_ops.append(tf.compat.v1.assign(p, new_p)) else: # assumed already an op updates_ops.append(update) self.updates_op = tf.group(*updates_ops) self.name = name # additional tensor substitutions self.feed_dict = session_kwargs.pop('feed_dict', None) # additional operations self.fetches = session_kwargs.pop('fetches', []) if not isinstance(self.fetches, list): self.fetches = [self.fetches] self.run_options = session_kwargs.pop('options', None) self.run_metadata = session_kwargs.pop('run_metadata', None) # The main use case of `fetches` being passed to a model is the ability # to run custom updates # This requires us to wrap fetches in `identity` ops. self.fetches = [tf.identity(x) for x in self.fetches] self.session_kwargs = session_kwargs # This mapping keeps track of the function that should receive the # output from a fetch in `fetches`: { fetch: function(fetch_output) } # A Callback can use this to register a function with access to the # output values for a fetch it added. self.fetch_callbacks = {} if session_kwargs: raise ValueError('Some keys in session_kwargs are not supported at this ' 'time: %s' % (session_kwargs.keys(),)) self._callable_fn = None self._feed_arrays = None self._feed_symbols = None self._symbol_vals = None self._fetches = None self._session = None def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session): """Generates a callable that runs the graph. Args: feed_arrays: List of input tensors to be fed Numpy arrays at runtime. feed_symbols: List of input tensors to be fed symbolic tensors at runtime. symbol_vals: List of symbolic tensors to be fed to `feed_symbols`. session: Session to use to generate the callable. Returns: Function that runs the graph according to the above options. """ # Prepare callable options. callable_opts = config_pb2.CallableOptions() # Handle external-data feed. for x in feed_arrays: callable_opts.feed.append(x.name) if self.feed_dict: for key in sorted(self.feed_dict.keys()): callable_opts.feed.append(key.name) # Handle symbolic feed. for x, y in zip(feed_symbols, symbol_vals): connection = callable_opts.tensor_connection.add() if x.dtype != y.dtype: y = tf.cast(y, dtype=x.dtype) from_tensor = _as_graph_element(y) if from_tensor is None: from_tensor = y connection.from_tensor = from_tensor.name # Data tensor connection.to_tensor = x.name # Placeholder # Handle fetches. for x in self.outputs + self.fetches: callable_opts.fetch.append(x.name) # Handle updates. callable_opts.target.append(self.updates_op.name) # Handle run_options. if self.run_options: callable_opts.run_options.CopyFrom(self.run_options) # Create callable. callable_fn = session._make_callable_from_options(callable_opts) # Cache parameters corresponding to the generated callable, so that # we can detect future mismatches and refresh the callable. self._callable_fn = callable_fn self._feed_arrays = feed_arrays self._feed_symbols = feed_symbols self._symbol_vals = symbol_vals self._fetches = list(self.fetches) self._session = session def _call_fetch_callbacks(self, fetches_output): for fetch, output in zip(self._fetches, fetches_output): if fetch in self.fetch_callbacks: self.fetch_callbacks[fetch](output) def _eval_if_composite(self, tensor): """Helper method which evaluates any CompositeTensors passed to it.""" # We need to evaluate any composite tensor objects that have been # reconstructed in 'pack_sequence_as', since otherwise they'll be output as # actual CompositeTensor objects instead of the value(s) contained in the # CompositeTensors. E.g., if output_structure contains a SparseTensor, then # this ensures that we return its value as a SparseTensorValue rather than # a SparseTensor. from keras.utils import tf_utils # pylint: disable=g-import-not-at-top if tf_utils.is_extension_type(tensor): return self._session.run(tensor) else: return tensor def __call__(self, inputs): inputs = tf.nest.flatten(inputs, expand_composites=True) session = get_session(inputs) feed_arrays = [] array_vals = [] feed_symbols = [] symbol_vals = [] for tensor, value in zip(self.inputs, inputs): if value is None: continue if tf.is_tensor(value): # Case: feeding symbolic tensor. feed_symbols.append(tensor) symbol_vals.append(value) else: # Case: feeding Numpy array. feed_arrays.append(tensor) # We need to do array conversion and type casting at this level, since # `callable_fn` only supports exact matches. tensor_type = tf.as_dtype(tensor.dtype) array_vals.append(np.asarray(value, dtype=tensor_type.as_numpy_dtype)) if self.feed_dict: for key in sorted(self.feed_dict.keys()): array_vals.append( np.asarray(self.feed_dict[key], dtype=key.dtype.as_numpy_dtype)) # Refresh callable if anything has changed. if (self._callable_fn is None or feed_arrays != self._feed_arrays or symbol_vals != self._symbol_vals or feed_symbols != self._feed_symbols or self.fetches != self._fetches or session != self._session): self._make_callable(feed_arrays, feed_symbols, symbol_vals, session) fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata) self._call_fetch_callbacks(fetched[-len(self._fetches):]) output_structure = tf.nest.pack_sequence_as( self._outputs_structure, fetched[:len(self.outputs)], expand_composites=True) # We need to evaluate any composite tensor objects that have been # reconstructed in 'pack_sequence_as', since otherwise they'll be output as # actual CompositeTensor objects instead of the value(s) contained in the # CompositeTensors. E.g., if output_structure contains a SparseTensor, then # this ensures that we return its value as a SparseTensorValue rather than # a SparseTensor. return tf.nest.map_structure(self._eval_if_composite, output_structure) @keras_export('keras.backend.function') @doc_controls.do_not_generate_docs def function(inputs, outputs, updates=None, name=None, **kwargs): """Instantiates a Keras function. Args: inputs: List of placeholder tensors. outputs: List of output tensors. updates: List of update ops. name: String, name of function. **kwargs: Passed to `tf.Session.run`. Returns: Output values as Numpy arrays. Raises: ValueError: if invalid kwargs are passed in or if in eager execution. """ if tf.compat.v1.executing_eagerly_outside_functions(): if kwargs: raise ValueError('Session keyword arguments are not supported during ' 'eager execution. You passed: %s' % (kwargs,)) if updates: raise ValueError('`updates` argument is not supported during ' 'eager execution. You passed: %s' % (updates,)) from keras import models # pylint: disable=g-import-not-at-top from keras.utils import tf_utils # pylint: disable=g-import-not-at-top model = models.Model(inputs=inputs, outputs=outputs) wrap_outputs = isinstance(outputs, list) and len(outputs) == 1 def func(model_inputs): outs = model(model_inputs) if wrap_outputs: outs = [outs] return tf_utils.sync_to_numpy_or_python_type(outs) return func if kwargs: for key in kwargs: if (key not in tf_inspect.getfullargspec(tf.compat.v1.Session.run)[0] and key not in ['inputs', 'outputs', 'updates', 'name']): msg = ('Invalid argument "%s" passed to K.function with TensorFlow ' 'backend') % key raise ValueError(msg) return GraphExecutionFunction( inputs, outputs, updates=updates, name=name, **kwargs) @keras_export('keras.backend.gradients') @doc_controls.do_not_generate_docs def gradients(loss, variables): """Returns the gradients of `loss` w.r.t. `variables`. Args: loss: Scalar tensor to minimize. variables: List of variables. Returns: A gradients tensor. """ return tf.compat.v1.gradients( loss, variables, colocate_gradients_with_ops=True) @keras_export('keras.backend.stop_gradient') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. Args: variables: Tensor or list of tensors to consider constant with respect to any other variable. Returns: A single tensor or a list of tensors (depending on the passed argument) that has no gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(tf.stop_gradient, variables) return tf.stop_gradient(variables) # CONTROL FLOW @keras_export('keras.backend.rnn') @tf.__internal__.dispatch.add_dispatch_support def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False): """Iterates over the time dimension of a tensor. Args: step_function: RNN step function. Args; input; Tensor with shape `(samples, ...)` (no time dimension), representing input for the batch of samples at a certain time step. states; List of tensors. Returns; output; Tensor with shape `(samples, output_dim)` (no time dimension). new_states; List of tensors, same length and shapes as 'states'. The first state in the list must be the output tensor at the previous timestep. inputs: Tensor of temporal data of shape `(samples, time, ...)` (at least 3D), or nested tensors, and each of which has shape `(samples, time, ...)`. initial_states: Tensor with shape `(samples, state_size)` (no time dimension), containing the initial values for the states used in the step function. In the case that state_size is in a nested shape, the shape of initial_states will also follow the nested structure. go_backwards: Boolean. If True, do the iteration over the time dimension in reverse order and return the reversed sequence. mask: Binary tensor with shape `(samples, time, 1)`, with a zero for every element that is masked. constants: List of constant values passed at each step. unroll: Whether to unroll the RNN or to use a symbolic `while_loop`. input_length: An integer or a 1-D Tensor, depending on whether the time dimension is fixed-length or not. In case of variable length input, it is used for masking in case there's no mask specified. time_major: Boolean. If true, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. zero_output_for_mask: Boolean. If True, the output for masked timestep will be zeros, whereas in the False case, output from previous timestep is returned. Returns: A tuple, `(last_output, outputs, new_states)`. last_output: the latest output of the rnn, of shape `(samples, ...)` outputs: tensor with shape `(samples, time, ...)` where each entry `outputs[s, t]` is the output of the step function at time `t` for sample `s`. new_states: list of tensors, latest states returned by the step function, of shape `(samples, ...)`. Raises: ValueError: if input dimension is less than 3. ValueError: if `unroll` is `True` but input timestep is not a fixed number. ValueError: if `mask` is provided (not `None`) but states is not provided (`len(states)` == 0). """ def swap_batch_timestep(input_t): # Swap the batch and timestep dim for the incoming tensor. axes = list(range(len(input_t.shape))) axes[0], axes[1] = 1, 0 return tf.compat.v1.transpose(input_t, axes) if not time_major: inputs = tf.nest.map_structure(swap_batch_timestep, inputs) flatted_inputs = tf.nest.flatten(inputs) time_steps = flatted_inputs[0].shape[0] batch = flatted_inputs[0].shape[1] time_steps_t = tf.shape(flatted_inputs[0])[0] for input_ in flatted_inputs: input_.shape.with_rank_at_least(3) if mask is not None: if mask.dtype != tf.bool: mask = tf.cast(mask, tf.bool) if len(mask.shape) == 2: mask = expand_dims(mask) if not time_major: mask = swap_batch_timestep(mask) if constants is None: constants = [] # tf.where needs its condition tensor to be the same shape as its two # result tensors, but in our case the condition (mask) tensor is # (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. # So we need to broadcast the mask to match the shape of inputs. # That's what the tile call does, it just repeats the mask along its # second dimension n times. def _expand_mask(mask_t, input_t, fixed_dim=1): if tf.nest.is_nested(mask_t): raise ValueError('mask_t is expected to be tensor, but got %s' % mask_t) if tf.nest.is_nested(input_t): raise ValueError('input_t is expected to be tensor, but got %s' % input_t) rank_diff = len(input_t.shape) - len(mask_t.shape) for _ in range(rank_diff): mask_t = tf.expand_dims(mask_t, -1) multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:] return tf.tile(mask_t, multiples) if unroll: if not time_steps: raise ValueError('Unrolling requires a fixed number of timesteps.') states = tuple(initial_states) successive_states = [] successive_outputs = [] # Process the input tensors. The input tensor need to be split on the # time_step dim, and reverse if go_backwards is True. In the case of nested # input, the input is flattened and then transformed individually. # The result of this will be a tuple of lists, each of the item in tuple is # list of the tensor with shape (batch, feature) def _process_single_input_t(input_t): input_t = tf.unstack(input_t) # unstack for time_step dim if go_backwards: input_t.reverse() return input_t if tf.nest.is_nested(inputs): processed_input = tf.nest.map_structure(_process_single_input_t, inputs) else: processed_input = (_process_single_input_t(inputs),) def _get_input_tensor(time): inp = [t_[time] for t_ in processed_input] return tf.nest.pack_sequence_as(inputs, inp) if mask is not None: mask_list = tf.unstack(mask) if go_backwards: mask_list.reverse() for i in range(time_steps): inp = _get_input_tensor(i) mask_t = mask_list[i] output, new_states = step_function(inp, tuple(states) + tuple(constants)) tiled_mask_t = _expand_mask(mask_t, output) if not successive_outputs: prev_output = zeros_like(output) else: prev_output = successive_outputs[-1] output = tf.where(tiled_mask_t, output, prev_output) flat_states = tf.nest.flatten(states) flat_new_states = tf.nest.flatten(new_states) tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_states) flat_final_states = tuple( tf.where(m, s, ps) for m, s, ps in zip(tiled_mask_t, flat_new_states, flat_states)) states = tf.nest.pack_sequence_as(states, flat_final_states) successive_outputs.append(output) successive_states.append(states) last_output = successive_outputs[-1] new_states = successive_states[-1] outputs = tf.stack(successive_outputs) if zero_output_for_mask: last_output = tf.where( _expand_mask(mask_list[-1], last_output), last_output, zeros_like(last_output)) outputs = tf.where( _expand_mask(mask, outputs, fixed_dim=2), outputs, zeros_like(outputs)) else: # mask is None for i in range(time_steps): inp = _get_input_tensor(i) output, states = step_function(inp, tuple(states) + tuple(constants)) successive_outputs.append(output) successive_states.append(states) last_output = successive_outputs[-1] new_states = successive_states[-1] outputs = tf.stack(successive_outputs) else: # Unroll == False states = tuple(initial_states) # Create input tensor array, if the inputs is nested tensors, then it will # be flattened first, and tensor array will be created one per flattened # tensor. input_ta = tuple( tf.TensorArray( dtype=inp.dtype, size=time_steps_t, tensor_array_name='input_ta_%s' % i) for i, inp in enumerate(flatted_inputs)) input_ta = tuple( ta.unstack(input_) if not go_backwards else ta .unstack(reverse(input_, 0)) for ta, input_ in zip(input_ta, flatted_inputs)) # Get the time(0) input and compute the output for that, the output will be # used to determine the dtype of output tensor array. Don't read from # input_ta due to TensorArray clear_after_read default to True. input_time_zero = tf.nest.pack_sequence_as(inputs, [inp[0] for inp in flatted_inputs]) # output_time_zero is used to determine the cell output shape and its dtype. # the value is discarded. output_time_zero, _ = step_function( input_time_zero, tuple(initial_states) + tuple(constants)) output_ta = tuple( tf.TensorArray( dtype=out.dtype, size=time_steps_t, element_shape=out.shape, tensor_array_name='output_ta_%s' % i) for i, out in enumerate(tf.nest.flatten(output_time_zero))) time = tf.constant(0, dtype='int32', name='time') # We only specify the 'maximum_iterations' when building for XLA since that # causes slowdowns on GPU in TF. if (not tf.executing_eagerly() and control_flow_util.GraphOrParentsInXlaContext(tf.compat.v1.get_default_graph())): max_iterations = tf.reduce_max(input_length) else: max_iterations = None while_loop_kwargs = { 'cond': lambda time, *_: time < time_steps_t, 'maximum_iterations': max_iterations, 'parallel_iterations': 32, 'swap_memory': True, } if mask is not None: if go_backwards: mask = reverse(mask, 0) mask_ta = tf.TensorArray( dtype=tf.bool, size=time_steps_t, tensor_array_name='mask_ta') mask_ta = mask_ta.unstack(mask) def masking_fn(time): return mask_ta.read(time) def compute_masked_output(mask_t, flat_out, flat_mask): tiled_mask_t = tuple( _expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) for o in flat_out) return tuple( tf.where(m, o, fm) for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask)) elif isinstance(input_length, tf.Tensor): if go_backwards: max_len = tf.reduce_max(input_length, axis=0) rev_input_length = tf.subtract(max_len - 1, input_length) def masking_fn(time): return tf.less(rev_input_length, time) else: def masking_fn(time): return tf.greater(input_length, time) def compute_masked_output(mask_t, flat_out, flat_mask): return tuple( tf.compat.v1.where(mask_t, o, zo) for (o, zo) in zip(flat_out, flat_mask)) else: masking_fn = None if masking_fn is not None: # Mask for the T output will be base on the output of T - 1. In the case # T = 0, a zero filled tensor will be used. flat_zero_output = tuple(tf.zeros_like(o) for o in tf.nest.flatten(output_time_zero)) def _step(time, output_ta_t, prev_output, *states): """RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. *states: List of states. Returns: Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)` """ current_input = tuple(ta.read(time) for ta in input_ta) # maybe set shape. current_input = tf.nest.pack_sequence_as(inputs, current_input) mask_t = masking_fn(time) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) # mask output flat_output = tf.nest.flatten(output) flat_mask_output = (flat_zero_output if zero_output_for_mask else tf.nest.flatten(prev_output)) flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) # mask states flat_state = tf.nest.flatten(states) flat_new_state = tf.nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tf.Tensor): new_state.set_shape(state.shape) flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) new_states = tf.nest.pack_sequence_as(new_states, flat_final_state) output_ta_t = tuple( ta.write(time, out) for ta, out in zip(output_ta_t, flat_new_output)) return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states) final_outputs = tf.compat.v1.while_loop( body=_step, loop_vars=(time, output_ta, flat_zero_output) + states, **while_loop_kwargs) # Skip final_outputs[2] which is the output for final timestep. new_states = final_outputs[3:] else: def _step(time, output_ta_t, *states): """RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)` """ current_input = tuple(ta.read(time) for ta in input_ta) current_input = tf.nest.pack_sequence_as(inputs, current_input) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_state = tf.nest.flatten(states) flat_new_state = tf.nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tf.Tensor): new_state.set_shape(state.shape) flat_output = tf.nest.flatten(output) output_ta_t = tuple( ta.write(time, out) for ta, out in zip(output_ta_t, flat_output)) new_states = tf.nest.pack_sequence_as(initial_states, flat_new_state) return (time + 1, output_ta_t) + tuple(new_states) final_outputs = tf.compat.v1.while_loop( body=_step, loop_vars=(time, output_ta) + states, **while_loop_kwargs) new_states = final_outputs[2:] output_ta = final_outputs[1] outputs = tuple(o.stack() for o in output_ta) last_output = tuple(o[-1] for o in outputs) outputs = tf.nest.pack_sequence_as(output_time_zero, outputs) last_output = tf.nest.pack_sequence_as(output_time_zero, last_output) # static shape inference def set_shape(output_): if isinstance(output_, tf.Tensor): shape = output_.shape.as_list() shape[0] = time_steps shape[1] = batch output_.set_shape(shape) return output_ outputs = tf.nest.map_structure(set_shape, outputs) if not time_major: outputs = tf.nest.map_structure(swap_batch_timestep, outputs) return last_output, outputs, new_states @keras_export('keras.backend.switch') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Args: condition: tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. Raises: ValueError: If rank of `condition` is greater than rank of expressions. """ if condition.dtype != tf.bool: condition = tf.cast(condition, 'bool') cond_ndim = ndim(condition) if not cond_ndim: if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = tf.compat.v1.cond(condition, then_expression_fn, else_expression_fn) else: # tf.where needs its condition tensor # to be the same shape as its two # result tensors if callable(then_expression): then_expression = then_expression() if callable(else_expression): else_expression = else_expression() expr_ndim = ndim(then_expression) if cond_ndim > expr_ndim: raise ValueError('Rank of `condition` should be less than or' ' equal to rank of `then_expression` and ' '`else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)' '=' + str(expr_ndim)) if cond_ndim > 1: ndim_diff = expr_ndim - cond_ndim cond_shape = tf.concat( [tf.shape(condition), [1] * ndim_diff], axis=0) condition = tf.reshape(condition, cond_shape) expr_shape = tf.shape(then_expression) shape_diff = expr_shape - cond_shape tile_shape = tf.where(shape_diff > 0, expr_shape, tf.ones_like(expr_shape)) condition = tf.tile(condition, tile_shape) x = tf.where(condition, then_expression, else_expression) return x @keras_export('keras.backend.in_train_phase') @doc_controls.do_not_generate_docs def in_train_phase(x, alt, training=None): """Selects `x` in train phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Args: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on the `training` flag. the `training` flag defaults to `K.learning_phase()`. """ from keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top if training is None: training = base_layer_utils.call_context().training if training is None: training = learning_phase() # TODO(b/138862903): Handle the case when training is tensor. if not tf.is_tensor(training): if training == 1 or training is True: if callable(x): return x() else: return x elif training == 0 or training is False: if callable(alt): return alt() else: return alt # else: assume learning phase is a placeholder tensor. x = switch(training, x, alt) return x @keras_export('keras.backend.in_test_phase') @doc_controls.do_not_generate_docs def in_test_phase(x, alt, training=None): """Selects `x` in test phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Args: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on `K.learning_phase`. """ return in_train_phase(alt, x, training=training) # NN OPERATIONS @keras_export('keras.backend.relu') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def relu(x, alpha=0., max_value=None, threshold=0.): """Rectified linear unit. With default values, it returns element-wise `max(x, 0)`. Otherwise, it follows: `f(x) = max_value` for `x >= max_value`, `f(x) = x` for `threshold <= x < max_value`, `f(x) = alpha * (x - threshold)` otherwise. Args: x: A tensor or variable. alpha: A scalar, slope of negative section (default=`0.`). max_value: float. Saturation threshold. threshold: float. Threshold value for thresholded activation. Returns: A tensor. """ # While x can be a tensor or variable, we also see cases where # numpy arrays, lists, tuples are passed as well. # lists, tuples do not have 'dtype' attribute. dtype = getattr(x, 'dtype', floatx()) if alpha != 0.: if max_value is None and threshold == 0: return tf.nn.leaky_relu(x, alpha=alpha) if threshold != 0: negative_part = tf.nn.relu(-x + threshold) else: negative_part = tf.nn.relu(-x) clip_max = max_value is not None if threshold != 0: # computes x for x > threshold else 0 x = x * tf.cast(tf.greater(x, threshold), dtype=dtype) elif max_value == 6: # if no threshold, then can use nn.relu6 native TF op for performance x = tf.nn.relu6(x) clip_max = False else: x = tf.nn.relu(x) if clip_max: max_value = _constant_to_tensor(max_value, x.dtype.base_dtype) zero = _constant_to_tensor(0, x.dtype.base_dtype) x = tf.clip_by_value(x, zero, max_value) if alpha != 0.: alpha = _to_tensor(alpha, x.dtype.base_dtype) x -= alpha * negative_part return x @keras_export('keras.backend.elu') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def elu(x, alpha=1.): """Exponential linear unit. Args: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor. """ res = tf.nn.elu(x) if alpha == 1: return res else: return tf.where(x > 0, res, alpha * res) @keras_export('keras.backend.softmax') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def softmax(x, axis=-1): """Softmax of a tensor. Args: x: A tensor or variable. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. Returns: A tensor. """ return tf.nn.softmax(x, axis=axis) @keras_export('keras.backend.softplus') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def softplus(x): """Softplus of a tensor. Args: x: A tensor or variable. Returns: A tensor. """ return tf.math.softplus(x) @keras_export('keras.backend.softsign') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def softsign(x): """Softsign of a tensor. Args: x: A tensor or variable. Returns: A tensor. """ return tf.math.softsign(x) @keras_export('keras.backend.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def categorical_crossentropy(target, output, from_logits=False, axis=-1): """Categorical crossentropy between an output tensor and a target tensor. Args: target: A tensor of the same shape as `output`. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last`, and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor. Raises: ValueError: if `axis` is neither -1 nor one of the axes of `output`. Example: >>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3]) >>> print(a) tf.Tensor( [[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]], shape=(3, 3), dtype=float32) >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94], shape=[3,3]) >>> print(b) tf.Tensor( [[0.9 0.05 0.05] [0.05 0.89 0.06] [0.05 0.01 0.94]], shape=(3, 3), dtype=float32) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.11653 0.06188] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0.] """ target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) target.shape.assert_is_compatible_with(output.shape) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, '_keras_logits'): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`categorical_crossentropy` received `from_logits=True`, but ' 'the `output` argument was produced by a sigmoid or softmax ' 'activation and thus does not represent logits. Was this intended?"') from_logits = True if from_logits: return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output, axis=axis) if (not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == 'Softmax') and not hasattr(output, '_keras_history'): # When softmax activation function is used for output operation, we # use logits from the softmax function directly to compute loss in order # to prevent collapsing zero when training. # See b/117284466 assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output, axis=axis) # scale preds so that the class probas of each sample sum to 1 output = output / tf.reduce_sum(output, axis, True) # Compute cross entropy from probabilities. epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1. - epsilon_) return -tf.reduce_sum(target * tf.math.log(output), axis) @keras_export('keras.backend.sparse_categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): """Categorical crossentropy with integer targets. Args: target: An integer tensor. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last`, and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor. Raises: ValueError: if `axis` is neither -1 nor one of the axes of `output`. """ target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, '_keras_logits'): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`sparse_categorical_crossentropy` received `from_logits=True`, but ' 'the `output` argument was produced by a sigmoid or softmax ' 'activation and thus does not represent logits. Was this intended?"') from_logits = True elif (not from_logits and not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == 'Softmax') and not hasattr(output, '_keras_history'): # When softmax activation function is used for output operation, we # use logits from the softmax function directly to compute loss in order # to prevent collapsing zero when training. # See b/117284466 assert len(output.op.inputs) == 1 output = output.op.inputs[0] from_logits = True elif not from_logits: epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1 - epsilon_) output = tf.math.log(output) if isinstance(output.shape, (tuple, list)): output_rank = len(output.shape) else: output_rank = output.shape.ndims if output_rank is not None: axis %= output_rank if axis != output_rank - 1: permutation = list( itertools.chain(range(axis), range(axis + 1, output_rank), [axis])) output = tf.compat.v1.transpose(output, perm=permutation) elif axis != -1: raise ValueError( 'Cannot compute sparse categorical crossentropy with `axis={}` on an ' 'output tensor with unknown rank'.format(axis)) target = cast(target, 'int64') # Try to adjust the shape so that rank of labels = rank of logits - 1. output_shape = tf.shape(output) target_rank = target.shape.ndims update_shape = ( target_rank is not None and output_rank is not None and target_rank != output_rank - 1) if update_shape: target = flatten(target) output = tf.reshape(output, [-1, output_shape[-1]]) if py_any(_is_symbolic_tensor(v) for v in [target, output]): with get_graph().as_default(): res = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=output) else: res = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=output) if update_shape and output_rank >= 3: # If our output includes timesteps or spatial dimensions we need to reshape return tf.reshape(res, output_shape[:-1]) else: return res @keras_export('keras.backend.binary_crossentropy') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def binary_crossentropy(target, output, from_logits=False): """Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor. """ target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, '_keras_logits'): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`binary_crossentropy` received `from_logits=True`, but the `output`' ' argument was produced by a sigmoid or softmax activation and thus ' 'does not represent logits. Was this intended?"') from_logits = True if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) if (not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == 'Sigmoid') and not hasattr(output, '_keras_history'): # When sigmoid activation function is used for output operation, we # use logits from the sigmoid function directly to compute loss in order # to prevent collapsing zero when training. assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1. - epsilon_) # Compute cross entropy from probabilities. bce = target * tf.math.log(output + epsilon()) bce += (1 - target) * tf.math.log(1 - output + epsilon()) return -bce @keras_export('keras.backend.sigmoid') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sigmoid(x): """Element-wise sigmoid. Args: x: A tensor or variable. Returns: A tensor. """ return tf.sigmoid(x) @keras_export('keras.backend.hard_sigmoid') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def hard_sigmoid(x): """Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. Args: x: A tensor or variable. Returns: A tensor. """ point_two = _constant_to_tensor(0.2, x.dtype.base_dtype) point_five = _constant_to_tensor(0.5, x.dtype.base_dtype) x = tf.multiply(x, point_two) x = tf.add(x, point_five) x = tf.clip_by_value(x, 0., 1.) return x @keras_export('keras.backend.tanh') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def tanh(x): """Element-wise tanh. Args: x: A tensor or variable. Returns: A tensor. """ return tf.tanh(x) @keras_export('keras.backend.dropout') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def dropout(x, level, noise_shape=None, seed=None): """Sets entries in `x` to zero at random, while scaling the entire tensor. Args: x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of `x` seed: random seed to ensure determinism. Returns: A tensor. """ if seed is None: seed = np.random.randint(10e6) return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed) @keras_export('keras.backend.l2_normalize') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def l2_normalize(x, axis=None): """Normalizes a tensor wrt the L2 norm alongside the specified axis. Args: x: Tensor or variable. axis: axis along which to perform normalization. Returns: A tensor. """ return tf.linalg.l2_normalize(x, axis=axis) @keras_export('keras.backend.in_top_k') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def in_top_k(predictions, targets, k): """Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. """ return tf.compat.v1.math.in_top_k(predictions, targets, k) # CONVOLUTIONS def _preprocess_conv1d_input(x, data_format): """Transpose and cast the input before the conv1d. Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor. """ tf_data_format = 'NWC' # to pass TF Conv2dNative operations if data_format == 'channels_first': if not _has_nchw_support(): x = tf.compat.v1.transpose(x, (0, 2, 1)) # NCW -> NWC else: tf_data_format = 'NCW' return x, tf_data_format def _preprocess_conv2d_input(x, data_format, force_transpose=False): """Transpose and cast the input before the conv2d. Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. force_transpose: Boolean. If True, the input will always be transposed from NCHW to NHWC if `data_format` is `"channels_first"`. If False, the transposition only occurs on CPU (GPU ops are assumed to support NCHW). Returns: A tensor. """ tf_data_format = 'NHWC' if data_format == 'channels_first': if not _has_nchw_support() or force_transpose: x = tf.compat.v1.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC else: tf_data_format = 'NCHW' return x, tf_data_format def _preprocess_conv3d_input(x, data_format): """Transpose and cast the input before the conv3d. Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor. """ tf_data_format = 'NDHWC' if data_format == 'channels_first': if not _has_nchw_support(): x = tf.compat.v1.transpose(x, (0, 2, 3, 4, 1)) else: tf_data_format = 'NCDHW' return x, tf_data_format def _preprocess_padding(padding): """Convert keras' padding to TensorFlow's padding. Args: padding: string, one of 'same' , 'valid' Returns: a string, one of 'SAME', 'VALID'. Raises: ValueError: if invalid `padding'` """ if padding == 'same': padding = 'SAME' elif padding == 'valid': padding = 'VALID' else: raise ValueError('Invalid padding: ' + str(padding)) return padding @keras_export('keras.backend.conv1d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) kernel_shape = kernel.shape.as_list() if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' padding = _preprocess_padding(padding) x, tf_data_format = _preprocess_conv1d_input(x, data_format) x = tf.compat.v1.nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NWC': x = tf.compat.v1.transpose(x, (0, 2, 1)) # NWC -> NCW return x @keras_export('keras.backend.conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: `"channels_last"` or `"channels_first"`. dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) x = tf.compat.v1.nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x @keras_export('keras.backend.conv2d_transpose') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D deconvolution (i.e. transposed convolution). Args: x: Tensor or variable. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: Tuple of 2 integers. Returns: A tensor, result of transposed 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) # `atrous_conv2d_transpose` only supports NHWC format, even on GPU. if data_format == 'channels_first' and dilation_rate != (1, 1): force_transpose = True else: force_transpose = False x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose) if data_format == 'channels_first' and tf_data_format == 'NHWC': output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[1]) if output_shape[0] is None: output_shape = (shape(x)[0],) + tuple(output_shape[1:]) if isinstance(output_shape, (tuple, list)): output_shape = tf.stack(list(output_shape)) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides if dilation_rate == (1, 1): x = tf.compat.v1.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) else: assert dilation_rate[0] == dilation_rate[1] x = tf.nn.atrous_conv2d_transpose( x, kernel, output_shape, rate=dilation_rate[0], padding=padding) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: stride integer. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: integer dilation rate. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if isinstance(strides, int): strides = (strides,) if isinstance(dilation_rate, int): dilation_rate = (dilation_rate,) x, tf_data_format = _preprocess_conv1d_input(x, data_format) padding = _preprocess_padding(padding) if not isinstance(strides, tuple): strides = tuple(strides) if tf_data_format == 'NWC': spatial_start_dim = 1 strides = (1,) + strides * 2 + (1,) else: spatial_start_dim = 2 strides = (1, 1) + strides * 2 x = tf.expand_dims(x, spatial_start_dim) depthwise_kernel = tf.expand_dims(depthwise_kernel, 0) pointwise_kernel = tf.expand_dims(pointwise_kernel, 0) dilation_rate = (1,) + dilation_rate x = tf.compat.v1.nn.separable_conv2d( x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) x = tf.squeeze(x, [spatial_start_dim]) if data_format == 'channels_first' and tf_data_format == 'NWC': x = tf.compat.v1.transpose(x, (0, 2, 1)) # NWC -> NCW return x @keras_export('keras.backend.separable_conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. ValueError: if `strides` is not a tuple of 2 integers. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if len(strides) != 2: raise ValueError('`strides` must be a tuple of 2 integers.') x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if not isinstance(strides, tuple): strides = tuple(strides) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = tf.compat.v1.nn.separable_conv2d( x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x @keras_export('keras.backend.depthwise_conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = tf.compat.v1.nn.depthwise_conv2d( x, depthwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x @keras_export('keras.backend.conv3d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): """3D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of 3 integers. Returns: A tensor, result of 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) x = tf.compat.v1.nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = tf.compat.v1.transpose(x, (0, 4, 1, 2, 3)) return x def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): """3D deconvolution (i.e. transposed convolution). Args: x: input tensor. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, "same" or "valid". data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor, result of transposed 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if isinstance(output_shape, (tuple, list)): output_shape = tf.stack(output_shape) x, tf_data_format = _preprocess_conv3d_input(x, data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[4], output_shape[1]) if output_shape[0] is None: output_shape = (tf.shape(x)[0],) + tuple(output_shape[1:]) output_shape = tf.stack(list(output_shape)) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = tf.compat.v1.nn.conv3d_transpose( x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = tf.compat.v1.transpose(x, (0, 4, 1, 2, 3)) return x @keras_export('keras.backend.pool2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): """2D Pooling. Args: x: Tensor or variable. pool_size: tuple of 2 integers. strides: tuple of 2 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 2D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_size` is not a tuple of 2 integers. ValueError: if `strides` is not a tuple of 2 integers. ValueError: if `pool_mode` is neither `"max"` or `"avg"`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if len(pool_size) != 2: raise ValueError('`pool_size` must be a tuple of 2 integers.') if len(strides) != 2: raise ValueError('`strides` must be a tuple of 2 integers.') x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = tf.compat.v1.nn.max_pool( x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = tf.compat.v1.nn.avg_pool( x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x @keras_export('keras.backend.pool3d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): """3D Pooling. Args: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 3D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_mode` is neither `"max"` or `"avg"`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = tf.nn.max_pool3d( x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = tf.nn.avg_pool3d( x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = tf.compat.v1.transpose(x, (0, 4, 1, 2, 3)) return x def local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format=None): """Apply N-D convolution with un-shared weights. Args: inputs: (N+2)-D tensor with shape (batch_size, channels_in, d_in1, ..., d_inN) if data_format='channels_first', or (batch_size, d_in1, ..., d_inN, channels_in) if data_format='channels_last'. kernel: the unshared weight for N-D convolution, with shape (output_items, feature_dim, channels_out), where feature_dim = np.prod(kernel_size) * channels_in, output_items = np.prod(output_shape). kernel_size: a tuple of N integers, specifying the spatial dimensions of the N-D convolution window. strides: a tuple of N integers, specifying the strides of the convolution along the spatial dimensions. output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial dimensionality of the output. data_format: string, "channels_first" or "channels_last". Returns: An (N+2)-D tensor with shape: (batch_size, channels_out) + output_shape if data_format='channels_first', or: (batch_size,) + output_shape + (channels_out,) if data_format='channels_last'. Raises: ValueError: if `data_format` is neither `channels_last` nor `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) kernel_shape = int_shape(kernel) feature_dim = kernel_shape[1] channels_out = kernel_shape[-1] ndims = len(output_shape) spatial_dimensions = list(range(ndims)) xs = [] output_axes_ticks = [range(axis_max) for axis_max in output_shape] for position in itertools.product(*output_axes_ticks): slices = [slice(None)] if data_format == 'channels_first': slices.append(slice(None)) slices.extend( slice(position[d] * strides[d], position[d] * strides[d] + kernel_size[d]) for d in spatial_dimensions) if data_format == 'channels_last': slices.append(slice(None)) xs.append(reshape(inputs[slices], (1, -1, feature_dim))) x_aggregate = concatenate(xs, axis=0) output = batch_dot(x_aggregate, kernel) output = reshape(output, output_shape + (-1, channels_out)) if data_format == 'channels_first': permutation = [ndims, ndims + 1] + spatial_dimensions else: permutation = [ndims] + spatial_dimensions + [ndims + 1] return permute_dimensions(output, permutation) @keras_export('keras.backend.local_conv1d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): """Apply 1D conv with un-shared weights. Args: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is "channels_last" or (batch_size, input_dim, steps) if data_format is "channels_first". kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters). kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window. strides: a tuple of a single integer, specifying the stride length of the convolution. data_format: the data format, channels_first or channels_last. Returns: A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'. """ output_shape = (kernel.shape[0],) return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format) @keras_export('keras.backend.local_conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): """Apply 2D conv with un-shared weights. Args: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. kernel: the unshared weight for convolution, with shape (output_items, feature_dim, filters). kernel_size: a tuple of 2 integers, specifying the width and height of the 2D convolution window. strides: a tuple of 2 integers, specifying the strides of the convolution along the width and height. output_shape: a tuple with (output_row, output_col). data_format: the data format, channels_first or channels_last. Returns: A 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. """ return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format) @keras_export('keras.backend.bias_add') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def bias_add(x, bias, data_format=None): """Adds a bias vector to a tensor. Args: x: Tensor or variable. bias: Bias tensor to add. data_format: string, `"channels_last"` or `"channels_first"`. Returns: Output tensor. Raises: ValueError: In one of the two cases below: 1. invalid `data_format` argument. 2. invalid bias shape. the bias should be either a vector or a tensor with ndim(x) - 1 dimension """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) bias_shape = int_shape(bias) if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1: raise ValueError( 'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' % (len(bias_shape), ndim(x) - 1)) if len(bias_shape) == 1: if data_format == 'channels_first': return tf.nn.bias_add(x, bias, data_format='NCHW') return tf.nn.bias_add(x, bias, data_format='NHWC') if ndim(x) in (3, 4, 5): if data_format == 'channels_first': bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1] return x + reshape(bias, bias_reshape_axis) return x + reshape(bias, (1,) + bias_shape) return tf.nn.bias_add(x, bias) # RANDOMNESS @keras_export('keras.backend.random_normal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with normal distribution of values. It is an alias to `tf.random.normal`. Args: shape: A tuple of integers, the shape of tensor to create. mean: A float, the mean value of the normal distribution to draw samples. Default to 0.0. stddev: A float, the standard deviation of the normal distribution to draw samples. Default to 1.0. dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras backend dtype which is float32. seed: Integer, random seed. Will use a random numpy integer when not specified. Returns: A tensor with normal distribution of values. Example: >>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3), ... mean=0.0, stddev=1.0) >>> random_normal_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.random.normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) @keras_export('keras.backend.random_uniform') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """Returns a tensor with uniform distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. maxval: A float, upper boundary of the uniform distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3), ... minval=0.0, maxval=1.0) >>> random_uniform_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.random.uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) @keras_export('keras.backend.random_binomial') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_binomial(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random binomial distribution of values. DEPRECATED, use `tf.keras.backend.random_bernoulli` instead. The binomial distribution with parameters `n` and `p` is the probability distribution of the number of successful Bernoulli process. Only supports `n` = 1 for now. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3), ... p=0.5) >>> random_binomial_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)> """ warnings.warn('`tf.keras.backend.random_binomial` is deprecated, ' 'and will be removed in a future version.' 'Please use `tf.keras.backend.random_bernoulli` instead.') return random_bernoulli(shape, p, dtype, seed) @keras_export('keras.backend.random_bernoulli') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_bernoulli(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random bernoulli distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of bernoulli distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.where( tf.random.uniform(shape, dtype=dtype, seed=seed) <= p, tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype)) @keras_export('keras.backend.truncated_normal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with truncated random normal distribution of values. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. Args: shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.random.truncated_normal( shape, mean, stddev, dtype=dtype, seed=seed) # CTC # TensorFlow has a native implementation, but it uses sparse tensors # and therefore requires a wrapper for Keras. The functions below convert # dense to sparse tensors and also wraps up the beam search code that is # in TensorFlow's CTC implementation @keras_export('keras.backend.ctc_label_dense_to_sparse') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ctc_label_dense_to_sparse(labels, label_lengths): """Converts CTC labels from dense to sparse. Args: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the labels. """ label_shape = tf.shape(labels) num_batches_tns = tf.stack([label_shape[0]]) max_num_labels_tns = tf.stack([label_shape[1]]) def range_less_than(old_input, current_input): return tf.expand_dims( tf.range(tf.shape(old_input)[1]), 0) < tf.fill( max_num_labels_tns, current_input) init = tf.cast( tf.fill([1, label_shape[1]], 0), tf.bool) dense_mask = tf.compat.v1.scan( range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape( tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask) batch_array = tf.compat.v1.transpose( tf.reshape( tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask) indices = tf.compat.v1.transpose( tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = tf.compat.v1.gather_nd(labels, indices) return tf.SparseTensor( tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64)) @keras_export('keras.backend.ctc_batch_cost') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ctc_batch_cost(y_true, y_pred, input_length, label_length): """Runs CTC loss algorithm on each batch element. Args: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_pred`. label_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_true`. Returns: Tensor with shape (samples,1) containing the CTC loss of each element. """ label_length = tf.cast( tf.squeeze(label_length, axis=-1), tf.int32) input_length = tf.cast( tf.squeeze(input_length, axis=-1), tf.int32) sparse_labels = tf.cast( ctc_label_dense_to_sparse(y_true, label_length), tf.int32) y_pred = tf.math.log(tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) return tf.expand_dims( tf.compat.v1.nn.ctc_loss( inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1) @keras_export('keras.backend.ctc_decode') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): """Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence. """ input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log(tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths) decoded_dense = [] for st in decoded: st = tf.SparseTensor( st.indices, st.values, (num_samples, num_steps)) decoded_dense.append( tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS @keras_export('keras.backend.map_fn') @doc_controls.do_not_generate_docs def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. Args: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph dtype: Output data type. Returns: Tensor with dtype `dtype`. """ return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype) @keras_export('keras.backend.foldl') @doc_controls.do_not_generate_docs def foldl(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from left to right. Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[0]` in case of None) name: A string name for the foldl node in the graph Returns: Tensor with same type and shape as `initializer`. """ return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name) @keras_export('keras.backend.foldr') @doc_controls.do_not_generate_docs def foldr(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from right to left. Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[-1]` in case of None) name: A string name for the foldr node in the graph Returns: Same type and shape as initializer """ return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name) # Load Keras default configuration from config file if present. # Set Keras base dir path given KERAS_HOME env variable, if applicable. # Otherwise either ~/.keras or /tmp. if 'KERAS_HOME' in os.environ: _keras_dir = os.environ.get('KERAS_HOME') else: _keras_base_dir = os.path.expanduser('~') _keras_dir = os.path.join(_keras_base_dir, '.keras') _config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json')) if os.path.exists(_config_path): try: with open(_config_path) as fh: _config = json.load(fh) except ValueError: _config = {} _floatx = _config.get('floatx', floatx()) assert _floatx in {'float16', 'float32', 'float64'} _epsilon = _config.get('epsilon', epsilon()) assert isinstance(_epsilon, float) _image_data_format = _config.get('image_data_format', image_data_format()) assert _image_data_format in {'channels_last', 'channels_first'} set_floatx(_floatx) set_epsilon(_epsilon) set_image_data_format(_image_data_format) # Save config file. if not os.path.exists(_keras_dir): try: os.makedirs(_keras_dir) except OSError: # Except permission denied and potential race conditions # in multi-threaded environments. pass if not os.path.exists(_config_path): _config = { 'floatx': floatx(), 'epsilon': epsilon(), 'backend': 'tensorflow', 'image_data_format': image_data_format() } try: with open(_config_path, 'w') as f: f.write(json.dumps(_config, indent=4)) except IOError: # Except permission denied. pass def configure_and_create_distributed_session(distribution_strategy): """Configure session config and create a session with it.""" def _create_session(distribution_strategy): """Create the Distributed Strategy session.""" session_config = get_default_session_config() # If a session already exists, merge in its config; in the case there is a # conflict, take values of the existing config. global _SESSION if getattr(_SESSION, 'session', None) and _SESSION.session._config: session_config.MergeFrom(_SESSION.session._config) if is_tpu_strategy(distribution_strategy): # TODO(priyag, yuefengz): Remove this workaround when Distribute # Coordinator is integrated with keras and we can create a session from # there. distribution_strategy.configure(session_config) master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access session = tf.compat.v1.Session(config=session_config, target=master) else: worker_context = dc.get_current_worker_context() if worker_context: dc_session_config = worker_context.session_config # Merge the default session config to the one from distribute # coordinator, which is fine for now since they don't have # conflicting configurations. dc_session_config.MergeFrom(session_config) session = tf.compat.v1.Session( config=dc_session_config, target=worker_context.master_target) else: distribution_strategy.configure(session_config) session = tf.compat.v1.Session(config=session_config) set_session(session) if distribution_strategy.extended._in_multi_worker_mode(): dc.run_distribute_coordinator( _create_session, distribution_strategy) else: _create_session(distribution_strategy) def _is_tpu_strategy_class(clz): is_tpu_strat = lambda k: k.__name__.startswith('TPUStrategy') if is_tpu_strat(clz): return True return py_any(map(_is_tpu_strategy_class, clz.__bases__)) def is_tpu_strategy(strategy): """Returns whether input is a TPUStrategy instance or subclass instance.""" return _is_tpu_strategy_class(strategy.__class__) def cast_variables_to_tensor(tensors): def _cast_variables_to_tensor(tensor): if isinstance(tensor, tf.Variable): return tf.identity(tensor) return tensor return tf.nest.map_structure(_cast_variables_to_tensor, tensors) def _is_symbolic_tensor(x): return tf.is_tensor(x) and not isinstance(x, tf.__internal__.EagerTensor) def convert_inputs_if_ragged(inputs): """Converts any ragged tensors to dense.""" def _convert_ragged_input(inputs): if isinstance(inputs, tf.RaggedTensor): return inputs.to_tensor() return inputs flat_inputs = tf.nest.flatten(inputs) contains_ragged = py_any( isinstance(i, tf.RaggedTensor) for i in flat_inputs) if not contains_ragged: return inputs, None inputs = tf.nest.map_structure(_convert_ragged_input, inputs) # Multiple mask are not yet supported, so one mask is used on all inputs. # We approach this similarly when using row lengths to ignore steps. nested_row_lengths = tf.cast(flat_inputs[0].nested_row_lengths()[0], 'int32') return inputs, nested_row_lengths def maybe_convert_to_ragged(is_ragged_input, output, nested_row_lengths, go_backwards=False): """Converts any ragged input back to its initial structure.""" if not is_ragged_input: return output if go_backwards: # Reverse based on the timestep dim, so that nested_row_lengths will mask # from the correct direction. Return the reverse ragged tensor. output = reverse(output, [1]) ragged = tf.RaggedTensor.from_tensor(output, nested_row_lengths) return reverse(ragged, [1]) else: return tf.RaggedTensor.from_tensor(output, nested_row_lengths) class ContextValueCache(weakref.WeakKeyDictionary): """Container that caches (possibly tensor) values based on the context. This class is similar to defaultdict, where values may be produced by the default factory specified during initialization. This class also has a default value for the key (when key is `None`) -- the key is set to the current graph or eager context. The default factories for key and value are only used in `__getitem__` and `setdefault`. The `.get()` behavior remains the same. This object will return the value of the current graph or closest parent graph if the current graph is a function. This is to reflect the fact that if a tensor is created in eager/graph, child functions may capture that tensor. The default factory method may accept keyword arguments (unlike defaultdict, which only accepts callables with 0 arguments). To pass keyword arguments to `default_factory`, use the `setdefault` method instead of `__getitem__`. An example of how this class can be used in different contexts: ``` cache = ContextValueCache(int) # Eager mode cache[None] += 2 cache[None] += 4 assert cache[None] == 6 # Graph mode with tf.Graph().as_default() as g: cache[None] += 5 cache[g] += 3 assert cache[g] == 8 ``` Example of a default factory with arguments: ``` cache = ContextValueCache(lambda x: x + 1) g = tf.get_default_graph() # Example with keyword argument. value = cache.setdefault(key=g, kwargs={'x': 3}) assert cache[g] == 4 ``` """ def __init__(self, default_factory): self.default_factory = default_factory weakref.WeakKeyDictionary.__init__(self) def _key(self): if tf.executing_eagerly(): return _DUMMY_EAGER_GRAPH.key else: return tf.compat.v1.get_default_graph() def _get_parent_graph(self, graph): """Returns the parent graph or dummy eager object.""" # TODO(b/149317164): Currently FuncGraphs use ops.get_default_graph() as the # outer graph. This results in outer_graph always being a Graph, # even in eager mode (get_default_graph will create a new Graph if there # isn't a default graph). Because of this bug, we have to specially set the # key when eager execution is enabled. parent_graph = graph.outer_graph if (not isinstance(parent_graph, tf.__internal__.FuncGraph) and tf.compat.v1.executing_eagerly_outside_functions()): return _DUMMY_EAGER_GRAPH.key return parent_graph def _get_recursive(self, key): """Gets the value at key or the closest parent graph.""" value = self.get(key) if value is not None: return value # Since FuncGraphs are able to capture tensors and variables from their # parent graphs, recursively search to see if there is a value stored for # one of the parent graphs. if isinstance(key, tf.__internal__.FuncGraph): return self._get_recursive(self._get_parent_graph(key)) return None def __getitem__(self, key): """Gets the value at key (or current context), or sets default value. Args: key: May be `None` or `Graph`object. When `None`, the key is set to the current context. Returns: Either the cached or default value. """ if key is None: key = self._key() value = self._get_recursive(key) if value is None: value = self[key] = self.default_factory() # pylint:disable=not-callable return value def setdefault(self, key=None, default=None, kwargs=None): """Sets the default value if key is not in dict, and returns the value.""" if key is None: key = self._key() kwargs = kwargs or {} if default is None and key not in self: default = self.default_factory(**kwargs) return weakref.WeakKeyDictionary.setdefault(self, key, default) # This dictionary holds a mapping {graph: learning_phase}. In eager mode, a # dummy object is used. # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). _GRAPH_LEARNING_PHASES = ContextValueCache(_default_learning_phase) # This dictionary holds a mapping between a graph and variables to initialize # in the graph. _GRAPH_VARIABLES = ContextValueCache(object_identity.ObjectIdentityWeakSet) # This dictionary holds a mapping between a graph and TF optimizers created in # the graph. _GRAPH_TF_OPTIMIZERS = ContextValueCache(object_identity.ObjectIdentityWeakSet)
210,327
30.680675
112
py
keras
keras-master/keras/callbacks_v1.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-import-not-at-top # pylint: disable=g-classes-have-attributes """Callbacks: utilities called at certain points during model training.""" import tensorflow.compat.v2 as tf import os import numpy as np from keras import backend as K from keras import callbacks from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export @keras_export(v1=['keras.callbacks.TensorBoard']) class TensorBoard(callbacks.TensorBoard): # pylint: disable=line-too-long """Enable visualizations for TensorBoard. TensorBoard is a visualization tool provided with TensorFlow. This callback logs events for TensorBoard, including: * Metrics summary plots * Training graph visualization * Activation histograms * Sampled profiling If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: ```sh tensorboard --logdir=path_to_your_logs ``` You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). Args: log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. histogram_freq: frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations. write_graph: whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True. write_grads: whether to visualize gradient histograms in TensorBoard. `histogram_freq` must be greater than 0. batch_size: size of batch of inputs to feed to the network for histograms computation. write_images: whether to write model weights to visualize as image in TensorBoard. embeddings_freq: frequency (in epochs) at which selected embedding layers will be saved. If set to 0, embeddings won't be computed. Data to be visualized in TensorBoard's Embedding tab must be passed as `embeddings_data`. embeddings_layer_names: a list of names of layers to keep eye on. If None or empty list all the embedding layer will be watched. embeddings_metadata: a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. [Here are details]( https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional) about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed. embeddings_data: data to be embedded at layers specified in `embeddings_layer_names`. Numpy array (if the model has a single input) or list of Numpy arrays (if the model has multiple inputs). Learn more about embeddings [in this guide]( https://www.tensorflow.org/programmers_guide/embedding). update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, writes the losses and metrics to TensorBoard after each batch. The same applies for `'epoch'`. If using an integer, let's say `1000`, the callback will write the metrics and losses to TensorBoard every 1000 samples. Note that writing too frequently to TensorBoard can slow down your training. profile_batch: Profile the batch to sample compute characteristics. By default, it will profile the second batch. Set profile_batch=0 to disable profiling. Raises: ValueError: If histogram_freq is set and no validation data is provided. @compatibility(eager) Using the `TensorBoard` callback will work when eager execution is enabled, with the restriction that outputting histogram summaries of weights and gradients is not supported. Consequently, `histogram_freq` will be ignored. @end_compatibility """ # pylint: enable=line-too-long def __init__(self, log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch', profile_batch=2): # Don't call super's init since it is an eager-only version. callbacks.Callback.__init__(self) self.log_dir = log_dir self.histogram_freq = histogram_freq if self.histogram_freq and tf.executing_eagerly(): logging.warning( UserWarning('Weight and gradient histograms not supported for eager' 'execution, setting `histogram_freq` to `0`.')) self.histogram_freq = 0 self.merged = None self.write_graph = write_graph self.write_grads = write_grads self.write_images = write_images self.batch_size = batch_size self._current_batch = 0 self._total_batches_seen = 0 self._total_val_batches_seen = 0 self.embeddings_freq = embeddings_freq self.embeddings_layer_names = embeddings_layer_names self.embeddings_metadata = embeddings_metadata self.embeddings_data = embeddings_data if update_freq == 'batch': self.update_freq = 1 else: self.update_freq = update_freq self._samples_seen = 0 self._samples_seen_at_last_write = 0 # TODO(fishx): Add a link to the full profiler tutorial. self._profile_batch = profile_batch # True when the profiler was successfully started by this callback. # We track the status here to make sure callbacks do not interfere with # each other. The callback will only stop the profiler it started. self._profiler_started = False # TensorBoard should only write summaries on the chief when in a # Multi-Worker setting. self._chief_worker_only = True def _init_writer(self, model): """Sets file writer.""" if tf.executing_eagerly(): self.writer = tf.summary.create_file_writer(self.log_dir) if not model.run_eagerly and self.write_graph: with self.writer.as_default(): tf.summary.graph(K.get_graph()) elif self.write_graph: self.writer = tf.compat.v1.summary.FileWriter(self.log_dir, K.get_graph()) else: self.writer = tf.compat.v1.summary.FileWriter(self.log_dir) def _make_histogram_ops(self, model): """Defines histogram ops when histogram_freq > 0.""" # only make histogram summary op if it hasn't already been made if self.histogram_freq and self.merged is None: for layer in self.model.layers: for weight in layer.weights: mapped_weight_name = weight.name.replace(':', '_') tf.compat.v1.summary.histogram(mapped_weight_name, weight) if self.write_images: w_img = tf.compat.v1.squeeze(weight) shape = K.int_shape(w_img) if len(shape) == 2: # dense layer kernel case if shape[0] > shape[1]: w_img = tf.compat.v1.transpose(w_img) shape = K.int_shape(w_img) w_img = tf.reshape(w_img, [1, shape[0], shape[1], 1]) elif len(shape) == 3: # convnet case if K.image_data_format() == 'channels_last': # switch to channels_first to display # every kernel as a separate image w_img = tf.compat.v1.transpose(w_img, perm=[2, 0, 1]) shape = K.int_shape(w_img) w_img = tf.reshape(w_img, [shape[0], shape[1], shape[2], 1]) elif len(shape) == 1: # bias case w_img = tf.reshape(w_img, [1, shape[0], 1, 1]) else: # not possible to handle 3D convnets etc. continue shape = K.int_shape(w_img) assert len(shape) == 4 and shape[-1] in [1, 3, 4] tf.compat.v1.summary.image(mapped_weight_name, w_img) if self.write_grads: for weight in layer.trainable_weights: mapped_weight_name = weight.name.replace(':', '_') grads = model.optimizer.get_gradients(model.total_loss, weight) def is_indexed_slices(grad): return type(grad).__name__ == 'IndexedSlices' grads = [ grad.values if is_indexed_slices(grad) else grad for grad in grads ] tf.compat.v1.summary.histogram('{}_grad'.format(mapped_weight_name), grads) if hasattr(layer, 'output'): if isinstance(layer.output, list): for i, output in enumerate(layer.output): tf.compat.v1.summary.histogram('{}_out_{}'.format(layer.name, i), output) else: tf.compat.v1.summary.histogram('{}_out'.format(layer.name), layer.output) def set_model(self, model): """Sets Keras model and creates summary ops.""" self.model = model self._init_writer(model) # histogram summaries only enabled in graph mode if not tf.executing_eagerly(): self._make_histogram_ops(model) self.merged = tf.compat.v1.summary.merge_all() # If both embedding_freq and embeddings_data are available, we will # visualize embeddings. if self.embeddings_freq and self.embeddings_data is not None: # Avoid circular dependency. from keras.engine import training_utils_v1 # pylint: disable=g-import-not-at-top self.embeddings_data = training_utils_v1.standardize_input_data( self.embeddings_data, model.input_names) # If embedding_layer_names are not provided, get all of the embedding # layers from the model. embeddings_layer_names = self.embeddings_layer_names if not embeddings_layer_names: embeddings_layer_names = [ layer.name for layer in self.model.layers if type(layer).__name__ == 'Embedding' ] self.assign_embeddings = [] embeddings_vars = {} self.batch_id = batch_id = tf.compat.v1.placeholder(tf.int32) self.step = step = tf.compat.v1.placeholder(tf.int32) for layer in self.model.layers: if layer.name in embeddings_layer_names: embedding_input = self.model.get_layer(layer.name).output embedding_size = np.prod(embedding_input.shape[1:]) embedding_input = tf.reshape(embedding_input, (step, int(embedding_size))) shape = (self.embeddings_data[0].shape[0], int(embedding_size)) embedding = tf.Variable( tf.zeros(shape), name=layer.name + '_embedding') embeddings_vars[layer.name] = embedding batch = tf.compat.v1.assign(embedding[batch_id:batch_id + step], embedding_input) self.assign_embeddings.append(batch) self.saver = tf.compat.v1.train.Saver(list(embeddings_vars.values())) # Create embeddings_metadata dictionary if isinstance(self.embeddings_metadata, str): embeddings_metadata = { layer_name: self.embeddings_metadata for layer_name in embeddings_vars.keys() } else: # If embedding_metadata is already a dictionary embeddings_metadata = self.embeddings_metadata try: from tensorboard.plugins import projector except ImportError: raise ImportError('Failed to import TensorBoard. Please make sure that ' 'TensorBoard integration is complete."') # TODO(psv): Add integration tests to test embedding visualization # with TensorBoard callback. We are unable to write a unit test for this # because TensorBoard dependency assumes TensorFlow package is installed. config = projector.ProjectorConfig() for layer_name, tensor in embeddings_vars.items(): embedding = config.embeddings.add() embedding.tensor_name = tensor.name if (embeddings_metadata is not None and layer_name in embeddings_metadata): embedding.metadata_path = embeddings_metadata[layer_name] projector.visualize_embeddings(self.writer, config) def _fetch_callback(self, summary): self.writer.add_summary(summary, self._total_val_batches_seen) self._total_val_batches_seen += 1 def _write_custom_summaries(self, step, logs=None): """Writes metrics out as custom scalar summaries. Args: step: the global step to use for TensorBoard. logs: dict. Keys are scalar summary names, values are NumPy scalars. """ logs = logs or {} if tf.executing_eagerly(): # use v2 summary ops with self.writer.as_default(), tf.summary.record_if(True): for name, value in logs.items(): if isinstance(value, np.ndarray): value = value.item() tf.summary.scalar(name, value, step=step) else: # use FileWriter from v1 summary for name, value in logs.items(): if isinstance(value, np.ndarray): value = value.item() summary = tf.compat.v1.Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name self.writer.add_summary(summary, step) self.writer.flush() def on_train_batch_begin(self, batch, logs=None): if self._total_batches_seen == self._profile_batch - 1: self._start_profiler() def on_train_batch_end(self, batch, logs=None): return self.on_batch_end(batch, logs) def on_test_begin(self, logs=None): pass def on_test_end(self, logs=None): pass def on_batch_end(self, batch, logs=None): """Writes scalar summaries for metrics on every training batch. Performs profiling if current batch is in profiler_batches. """ # Don't output batch_size and batch number as TensorBoard summaries logs = logs or {} self._samples_seen += logs.get('size', 1) samples_seen_since = self._samples_seen - self._samples_seen_at_last_write if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq: batch_logs = {('batch_' + k): v for k, v in logs.items() if k not in ['batch', 'size', 'num_steps']} self._write_custom_summaries(self._total_batches_seen, batch_logs) self._samples_seen_at_last_write = self._samples_seen self._total_batches_seen += 1 self._stop_profiler() def on_train_begin(self, logs=None): pass def on_epoch_begin(self, epoch, logs=None): """Add histogram op to Model eval_function callbacks, reset batch count.""" # check if histogram summary should be run for this epoch if self.histogram_freq and epoch % self.histogram_freq == 0: # pylint: disable=protected-access # add the histogram summary op if it should run this epoch self.model._make_test_function() if self.merged not in self.model.test_function.fetches: self.model.test_function.fetches.append(self.merged) self.model.test_function.fetch_callbacks[ self.merged] = self._fetch_callback # pylint: enable=protected-access def on_epoch_end(self, epoch, logs=None): """Checks if summary ops should run next epoch, logs scalar summaries.""" # don't output batch_size and # batch number as TensorBoard summaries logs = {('epoch_' + k): v for k, v in logs.items() if k not in ['batch', 'size', 'num_steps']} if self.update_freq == 'epoch': step = epoch else: step = self._samples_seen self._write_custom_summaries(step, logs) # pop the histogram summary op after each epoch if self.histogram_freq: # pylint: disable=protected-access if self.merged in self.model.test_function.fetches: self.model.test_function.fetches.remove(self.merged) if self.merged in self.model.test_function.fetch_callbacks: self.model.test_function.fetch_callbacks.pop(self.merged) # pylint: enable=protected-access if self.embeddings_data is None and self.embeddings_freq: raise ValueError('To visualize embeddings, embeddings_data must ' 'be provided.') if self.embeddings_freq and self.embeddings_data is not None: if epoch % self.embeddings_freq == 0: # We need a second forward-pass here because we're passing # the `embeddings_data` explicitly. This design allows to pass # arbitrary data as `embeddings_data` and results from the fact # that we need to know the size of the `tf.Variable`s which # hold the embeddings in `set_model`. At this point, however, # the `validation_data` is not yet set. embeddings_data = self.embeddings_data n_samples = embeddings_data[0].shape[0] i = 0 sess = K.get_session() while i < n_samples: step = min(self.batch_size, n_samples - i) batch = slice(i, i + step) if isinstance(self.model.input, list): feed_dict = { model_input: embeddings_data[idx][batch] for idx, model_input in enumerate(self.model.input) } else: feed_dict = {self.model.input: embeddings_data[0][batch]} feed_dict.update({self.batch_id: i, self.step: step}) if not isinstance(K.learning_phase(), int): feed_dict[K.learning_phase()] = False sess.run(self.assign_embeddings, feed_dict=feed_dict) self.saver.save(sess, os.path.join(self.log_dir, 'keras_embedding.ckpt'), epoch) i += self.batch_size def on_train_end(self, logs=None): self._stop_profiler() self.writer.close() def _start_profiler(self): """Starts the profiler if currently inactive.""" if self._profiler_started: return try: tf.profiler.experimental.start(logdir=self.log_dir) self._profiler_started = True except tf.errors.AlreadyExistsError as e: # Profiler errors should not be fatal. logging.error('Failed to start profiler: %s', e.message) def _stop_profiler(self): """Stops the profiler if currently active.""" if not self._profiler_started: return try: tf.profiler.experimental.stop() except tf.errors.UnavailableError as e: # Profiler errors should not be fatal. logging.error('Failed to stop profiler: %s', e.message) finally: self._profiler_started = False
19,422
39.890526
87
py
keras
keras-master/keras/models.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Code for model cloning, plus model-related API entries.""" import tensorflow.compat.v2 as tf from keras import backend from keras import metrics as metrics_module from keras import optimizer_v1 from keras.engine import functional from keras.engine import sequential from keras.engine import training from keras.engine import training_v1 from keras.engine.base_layer import AddMetric from keras.engine.base_layer import Layer from keras.engine.input_layer import Input from keras.engine.input_layer import InputLayer from keras.saving import model_config from keras.saving import save from keras.utils import generic_utils from keras.utils import version_utils from keras.utils.generic_utils import CustomObjectScope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export # API entries importable from `keras.models`: Model = training.Model # pylint: disable=invalid-name Sequential = sequential.Sequential # pylint: disable=invalid-name Functional = functional.Functional # pylint: disable=invalid-name save_model = save.save_model load_model = save.load_model model_from_config = model_config.model_from_config model_from_yaml = model_config.model_from_yaml model_from_json = model_config.model_from_json # Callable used to clone a layer with weights preserved. def share_weights(layer): return layer def _clone_layer(layer): return layer.__class__.from_config(layer.get_config()) def _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes): """Inserts ancillary layers into the model with the proper order.""" # Sort `AddMetric` layers so they agree with metrics_names. metric_layers = [ layer for layer in ancillary_layers if isinstance(layer, AddMetric) ] metric_layers.sort(key=lambda layer: metrics_names.index(layer.metric_name)) ancillary_layers = [ layer for layer in ancillary_layers if not isinstance(layer, AddMetric) ] + metric_layers model._insert_layers(ancillary_layers, relevant_nodes=list(new_nodes)) def _make_new_nodes(nodes_by_depth, layer_fn, layer_map, tensor_map): """Uses the layers in `layer_map` to make new nodes based on `nodes_by_depth`. Args: nodes_by_depth: Provides structure information to create new nodes. layer_fn: Function to clone layers. layer_map: Map from layers in `model` to new layers. tensor_map: Map from tensors in `model` to newly compute tensors. Returns: A set of new nodes. `layer_map` and `tensor_map` are updated. """ # Iterated over every node in the reference model, in depth order. new_nodes = set() depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) for depth in depth_keys: nodes = nodes_by_depth[depth] for node in nodes: # Recover the corresponding layer. layer = node.outbound_layer # Get or create layer. if layer not in layer_map: new_layer = layer_fn(layer) layer_map[layer] = new_layer layer = new_layer else: # Reuse previously cloned layer. layer = layer_map[layer] # Don't call InputLayer multiple times. if isinstance(layer, InputLayer): continue # If all previous input tensors are available in tensor_map, # then call node.inbound_layer on them. if all( tensor in tensor_map for tensor in tf.nest.flatten(node.input_tensors)): # Call layer. args = tf.nest.map_structure(lambda t: tensor_map.get(t, t), node.call_args) kwargs = tf.nest.map_structure(lambda t: tensor_map.get(t, t), node.call_kwargs) output_tensors = layer(*args, **kwargs) # Thread-safe way to keep track of what node was created. first_output_tensor = tf.nest.flatten(output_tensors)[0] new_nodes.add( layer._inbound_nodes[first_output_tensor._keras_history.node_index]) for x, y in zip( tf.nest.flatten(node.output_tensors), tf.nest.flatten(output_tensors)): tensor_map[x] = y return new_nodes def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer): """Clone a functional `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Input layers are always cloned. Args: model: Instance of `Model`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value. """ if not isinstance(model, Model): raise ValueError('Expected `model` argument ' f'to be a `Model` instance. Received: model={model}') if isinstance(model, Sequential): raise ValueError('Expected `model` argument ' 'to be a functional `Model` instance, ' f'got a `Sequential` instance instead: {model}') if not model._is_graph_network: raise ValueError('Expected `model` argument ' 'to be a functional `Model` instance, ' f'but got a subclassed model instead: {model}') new_input_layers = {} # Cache for created layers. if input_tensors is not None: # Make sure that all input tensors come from a Keras layer. input_tensors = tf.nest.flatten(input_tensors) for i, input_tensor in enumerate(input_tensors): original_input_layer = model._input_layers[i] # Cache input layer. Create a new layer if the tensor is originally not # from a Keras layer. if not backend.is_keras_tensor(input_tensor): name = original_input_layer.name input_tensor = Input(tensor=input_tensor, name='input_wrapper_for_' + name) newly_created_input_layer = input_tensor._keras_history.layer new_input_layers[original_input_layer] = newly_created_input_layer else: new_input_layers[ original_input_layer] = input_tensor._keras_history.layer if not callable(layer_fn): raise ValueError('Expected `layer_fn` argument to be a callable. ' f'Received: layer_fn={layer_fn}') model_configs, created_layers = _clone_layers_and_model_config( model, new_input_layers, layer_fn) # Reconstruct model from the config, using the cloned layers. input_tensors, output_tensors, created_layers = ( functional.reconstruct_from_config(model_configs, created_layers=created_layers)) metrics_names = model.metrics_names model = Model(input_tensors, output_tensors, name=model.name) # Layers not directly tied to outputs of the Model, such as loss layers # created in `add_loss` and `add_metric`. ancillary_layers = [ layer for layer in created_layers.values() if layer not in model.layers ] # TODO(b/162887610): This may need to adjust the inbound node index if the # created layers had already been used to define other models. if ancillary_layers: new_nodes = tf.nest.flatten([ layer.inbound_nodes[1:] if functional._should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values() ]) _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes) return model def _clone_layers_and_model_config(model, input_layers, layer_fn): """Clones all layers, and returns the model config without serializing layers. This function ensures that only the node graph is retrieved when getting the model config. The `layer_fn` used to clone layers might not rely on `layer.get_config()`, so some custom layers do not define `get_config`. Trying to retrieve the config results in errors. Args: model: A Functional model. input_layers: Dictionary mapping input layers in `model` to new input layers layer_fn: Function used to clone all non-input layers. Returns: Model config object, and a dictionary of newly created layers. """ created_layers = {} def _copy_layer(layer): # Whenever the network config attempts to get the layer serialization, # return a dummy dictionary. if layer in input_layers: created_layers[layer.name] = input_layers[layer] elif layer in model._input_layers: created_layers[layer.name] = InputLayer(**layer.get_config()) else: created_layers[layer.name] = layer_fn(layer) return {} config = functional.get_network_config( model, serialize_layer_fn=_copy_layer) return config, created_layers def _remove_ancillary_layers(model, layer_map, layers): """Removes and returns any ancillary layers from `layers` based on `model`. Ancillary layers are part of the model topology but not used to compute the model outputs, e.g., layers from `add_loss` and `add_metric`. Args: model: A Keras Model. layer_map: A map to from layers in the `model` to those in `layers`. layers: A list of all layers. Returns: Two lists of layers: (1) `layers` with the ancillary layers removed, and (2) the ancillary layers. """ ancillary_layers = [] # Additional layers for computing losses and metrics. if not model._is_graph_network: return layers, ancillary_layers # Ancillary layers are those with depth < 0. depths = [depth for depth in model._nodes_by_depth.keys() if depth < 0] depths.sort(reverse=True) # Order topologically from inputs to outputs. for depth in depths: for node in model._nodes_by_depth[depth]: ancillary_layers.append(layer_map[node.outbound_layer]) return [l for l in layers if l not in ancillary_layers], ancillary_layers def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer): """Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Args: model: Instance of `Sequential`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Sequential` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value. """ if not isinstance(model, Sequential): raise ValueError('Expected `model` argument ' 'to be a `Sequential` model instance. ' f'Received: model={model}') if not callable(layer_fn): raise ValueError( 'Expected `layer_fn` argument to be a callable. ' f'Received: layer_fn={layer_fn}') layers = [] # Layers needed to compute the model's outputs. layer_map = {} # Ensure that all layers are cloned. The model's layers # property will exclude the initial InputLayer (if it exists) in the model, # resulting in a different Sequential model structure. for layer in model._flatten_layers(include_self=False, recursive=False): if isinstance(layer, InputLayer) and input_tensors is not None: # If input tensors are provided, the original model's InputLayer is # overwritten with a different InputLayer. continue cloned_layer = ( _clone_layer(layer) if isinstance(layer, InputLayer) else layer_fn(layer)) layers.append(cloned_layer) layer_map[layer] = cloned_layer layers, ancillary_layers = _remove_ancillary_layers(model, layer_map, layers) if input_tensors is None: cloned_model = Sequential(layers=layers, name=model.name) elif len(generic_utils.to_list(input_tensors)) != 1: raise ValueError( 'To clone a `Sequential` model, we expect at most one tensor as part ' f'of `input_tensors`. Received: input_tensors={input_tensors}') else: # Overwrite the original model's input layer. if isinstance(input_tensors, tuple): input_tensors = list(input_tensors) x = generic_utils.to_list(input_tensors)[0] if backend.is_keras_tensor(x): origin_layer = x._keras_history.layer if isinstance(origin_layer, InputLayer): cloned_model = Sequential( layers=[origin_layer] + layers, name=model.name) else: raise ValueError('Cannot clone a `Sequential` model on top ' 'of a tensor that comes from a Keras layer ' 'other than an `InputLayer`. ' 'Use the Functional API instead. ' f'Received: input_tensors={input_tensors}') else: input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name)) input_layer = input_tensor._keras_history.layer cloned_model = Sequential(layers=[input_layer] + layers, name=model.name) if not ancillary_layers: return cloned_model tensor_map = {} # Maps tensors from `model` to those in `cloned_model`. for depth, cloned_nodes in cloned_model._nodes_by_depth.items(): nodes = model._nodes_by_depth[depth] # This should be safe in a Sequential model. In an arbitrary network, you # need to sort using the outbound layer of the node as a key. for cloned_node, node in zip(cloned_nodes, nodes): if isinstance(cloned_node.output_tensors, list): for j, output_tensor in enumerate(cloned_node.output_tensors): tensor_map[node.output_tensors[j]] = output_tensor else: tensor_map[node.output_tensors] = cloned_node.output_tensors # Ancillary nodes have negative depth. new_nodes = _make_new_nodes( { depth: nodes for depth, nodes in model._nodes_by_depth.items() if depth < 0 }, layer_fn, layer_map, tensor_map) _insert_ancillary_layers(cloned_model, ancillary_layers, model.metrics_names, new_nodes) return cloned_model @keras_export('keras.models.clone_model') def clone_model(model, input_tensors=None, clone_function=None): """Clone a Functional or Sequential `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Note that `clone_model` will not preserve the uniqueness of shared objects within the model (e.g. a single variable attached to two distinct layers will be restored as two separate variables). Args: model: Instance of `Model` (could be a Functional model or a Sequential model). input_tensors: optional list of input tensors or InputLayer objects to build the model upon. If not provided, new `Input` objects will be created. clone_function: Callable to be used to clone each layer in the target model (except `InputLayer` instances). It takes as argument the layer instance to be cloned, and returns the corresponding layer instance to be used in the model copy. If unspecified, this callable defaults to the following serialization/deserialization function: `lambda layer: layer.__class__.from_config(layer.get_config())`. By passing a custom callable, you can customize your copy of the model, e.g. by wrapping certain layers of interest (you might want to replace all `LSTM` instances with equivalent `Bidirectional(LSTM(...))` instances, for example). Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. The cloned model may behave differently from the original model if a custom `clone_function` modifies the layer. Example: ```python # Create a test Sequential model. model = keras.Sequential([ keras.Input(shape=(728,)), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(1, activation='sigmoid'), ]) # Create a copy of the test model (with freshly initialized weights). new_model = clone_model(model) ``` Note that subclassed models cannot be cloned, since their internal layer structure is not known. To achieve equivalent functionality as `clone_model` in the case of a subclassed model, simply make sure that the model class implements `get_config()` (and optionally `from_config()`), and call: ```python new_model = model.__class__.from_config(model.get_config()) ``` """ with generic_utils.DisableSharedObjectScope(): if clone_function is None: clone_function = _clone_layer if isinstance(model, Sequential): return _clone_sequential_model( model, input_tensors=input_tensors, layer_fn=clone_function) else: return _clone_functional_model( model, input_tensors=input_tensors, layer_fn=clone_function) # "Clone" a subclassed model by reseting all of the attributes. def _in_place_subclassed_model_reset(model): """Substitute for model cloning that works for subclassed models. Subclassed models cannot be cloned because their topology is not serializable. To "instantiate" an identical model in a new TF graph, we reuse the original model object, but we clear its state. After calling this function on a model instance, you can use the model instance as if it were a model clone (in particular you can use it in a new graph). This method clears the state of the input model. It is thus destructive. However the original state can be restored fully by calling `_in_place_subclassed_model_state_restoration`. Args: model: Instance of a Keras model created via subclassing. Raises: ValueError: In case the model uses a subclassed model as inner layer. """ assert not model._is_graph_network # Only makes sense for subclassed networks # Select correct base class for new Model. version_utils.swap_class(model.__class__, training.Model, training_v1.Model, tf.compat.v1.executing_eagerly_outside_functions()) # Retrieve all layers tracked by the model as well as their attribute names attributes_cache = {} for name in dir(model): # Skip attrs that track other trackables. if name == 'submodules' or name == '_self_tracked_trackables': continue try: value = getattr(model, name) except (AttributeError, ValueError, TypeError): continue if isinstance(value, Layer): attributes_cache[name] = value assert value in model.layers if hasattr(value, 'layers') and value.layers: raise ValueError('We do not support the use of nested layers ' 'in `model_to_estimator` at this time. Found nested ' f'layer: {value}') elif isinstance( value, (list, tuple)) and name not in ('layers', '_layers', 'metrics', '_compile_metric_functions', '_output_loss_metrics'): # Handle case: list/tuple of layers (also tracked by the Network API). if value and all(isinstance(val, Layer) for val in value): raise ValueError('We do not support the use of list-of-layers ' 'attributes in subclassed models used with ' '`model_to_estimator` at this time. Found list ' f'model: {name}') # Replace layers on the model with fresh layers layers_to_names = {value: key for key, value in attributes_cache.items()} original_layers = list( model._flatten_layers(include_self=False, recursive=False)) setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for layer in original_layers: # We preserve layer order. config = layer.get_config() # This will not work for nested subclassed models used as layers. # This would be theoretically possible to support, but would add complexity. # Only do it if users complain. if isinstance(layer, training.Model) and not layer._is_graph_network: raise ValueError('We do not support the use of nested subclassed models ' 'in `model_to_estimator` at this time. Found nested ' f'model: {layer}') fresh_layer = layer.__class__.from_config(config) name = layers_to_names[layer] setattr(model, name, fresh_layer) model._self_tracked_trackables.append(fresh_layer) # Cache original model build attributes (in addition to layers) if (not hasattr(model, '_original_attributes_cache') or model._original_attributes_cache is None): if model.built: attributes_to_cache = [ 'inputs', 'outputs', 'total_loss', 'optimizer', 'train_function', 'test_function', 'predict_function', '_training_endpoints', '_collected_trainable_weights', '_feed_inputs', '_feed_input_names', '_feed_input_shapes', ] for name in attributes_to_cache: attributes_cache[name] = getattr(model, name) model._original_attributes_cache = attributes_cache _reset_build_compile_trackers(model) model._setattr_tracking = setattr_tracking def _reset_build_compile_trackers(model): """Reset state trackers for model. Note that we do not actually zero out attributes such as optimizer, but instead rely on the expectation that all of the attrs will be over-written on calling build/compile/etc. This is somewhat fragile, insofar as we check elsewhere for the presence of these attributes as evidence of having been built/compiled/etc. Pending a better way to do this, we reset key attributes here to allow building and compiling. Args: model: the model that is being reset """ # Reset build state model.built = False model.inputs = None model.outputs = None # Reset compile state model._is_compiled = False # pylint:disable=protected-access if not tf.compat.v1.executing_eagerly_outside_functions(): model._v1_compile_was_called = False model.optimizer = None @keras_export( 'keras.__internal__.models.in_place_subclassed_model_state_restoration', v1=[]) def in_place_subclassed_model_state_restoration(model): """Restores the original state of a model after it was "reset". This undoes this action of `_in_place_subclassed_model_reset`, which is called in `clone_and_build_model` if `in_place_reset` is set to True. Args: model: Instance of a Keras model created via subclassing, on which `_in_place_subclassed_model_reset` was previously called. """ assert not model._is_graph_network # Restore layers and build attributes if (hasattr(model, '_original_attributes_cache') and model._original_attributes_cache is not None): # Models have sticky attribute assignment, so we want to be careful to add # back the previous attributes and track Layers by their original names # without adding dependencies on "utility" attributes which Models exempt # when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Restore to the state of a never-called model. _reset_build_compile_trackers(model) @keras_export('keras.__internal__.models.clone_and_build_model', v1=[]) def clone_and_build_model( model, input_tensors=None, target_tensors=None, custom_objects=None, compile_clone=True, in_place_reset=False, optimizer_iterations=None, optimizer_config=None): """Clone a `Model` and build/compile it with the same settings used before. This function can be run in the same graph or in a separate graph from the model. When using a separate graph, `in_place_reset` must be `False`. Note that, currently, the clone produced from this function may not work with TPU DistributionStrategy. Try at your own risk. Args: model: `tf.keras.Model` object. Can be Functional, Sequential, or sub-classed. input_tensors: Optional list or dictionary of input tensors to build the model upon. If not provided, placeholders will be created. target_tensors: Optional list of target tensors for compiling the model. If not provided, placeholders will be created. custom_objects: Optional dictionary mapping string names to custom classes or functions. compile_clone: Boolean, whether to compile model clone (default `True`). in_place_reset: Boolean, whether to reset the model in place. Only used if the model is a subclassed model. In the case of a subclassed model, this argument must be set to `True` (default `False`). To restore the original model, use the function `in_place_subclassed_model_state_restoration(model)`. optimizer_iterations: An iterations variable that will be incremented by the optimizer if the clone is compiled. This argument is used when a Keras model is cloned into an Estimator model function, because Estimators create their own global step variable. optimizer_config: Optimizer config dictionary or list of dictionary returned from `get_config()`. This argument should be defined if `clone_and_build_model` is called in a different graph or session from the original model, and the optimizer is an instance of `OptimizerV2`. Returns: Clone of the model. Raises: ValueError: Cloning fails in the following cases - cloning a subclassed model with `in_place_reset` set to False. - compiling the clone when the original model has not been compiled. """ # Grab optimizer now, as we reset-in-place for subclassed models, but # want to maintain access to the original optimizer. orig_optimizer = model.optimizer if compile_clone and not orig_optimizer: raise ValueError( 'Error when cloning model: `compile_clone` was set to True, but the ' f'original model has not been compiled. Received: model={model}') if compile_clone: compile_args = model._get_compile_args() # pylint: disable=protected-access # Allows this method to be robust to switching graph and eager classes. model._get_compile_args = lambda: compile_args with CustomObjectScope(custom_objects or {}): if model._is_graph_network: clone = clone_model(model, input_tensors=input_tensors) elif isinstance(model, Sequential): clone = clone_model(model, input_tensors=input_tensors) if (not clone._is_graph_network and model._build_input_shape is not None): if tf.compat.v1.executing_eagerly_outside_functions(): clone.build(model._build_input_shape) else: clone._set_inputs( backend.placeholder( model._build_input_shape, dtype=model.inputs[0].dtype)) else: try: # Prefer cloning the model if serial/deserial logic is implemented for # subclassed model. clone = model.__class__.from_config(model.get_config()) except NotImplementedError: logging.warning('This model is a subclassed model. Please implement ' '`get_config` and `from_config` to better support ' 'cloning the model.') if not in_place_reset: raise ValueError( f'This model ({model}) is a subclassed model. ' 'Such a model cannot be cloned, but there is a workaround where ' 'the model is reset in-place. To use this, please set the ' 'argument `in_place_reset` to `True`. This will reset the ' 'attributes in the original model. To restore the attributes, ' 'call `in_place_subclassed_model_state_restoration(model)`.') clone = model _in_place_subclassed_model_reset(clone) if input_tensors is not None: if isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1: input_tensors = input_tensors[0] clone._set_inputs(input_tensors) if compile_clone: if isinstance(orig_optimizer, optimizer_v1.TFOptimizer): optimizer = optimizer_v1.TFOptimizer( orig_optimizer.optimizer, optimizer_iterations) backend.track_tf_optimizer(optimizer) else: if not isinstance(orig_optimizer, (tuple, list)): orig_optimizer = [orig_optimizer] if optimizer_config is None: optimizer = [ opt.__class__.from_config(opt.get_config()) for opt in orig_optimizer ] elif isinstance(optimizer_config, dict): optimizer = [orig_optimizer[0].__class__.from_config(optimizer_config)] else: # optimizer config is list of dict, same order as orig_optimizer. optimizer = [ opt.__class__.from_config(opt_config) for (opt, opt_config) in zip(orig_optimizer, optimizer_config) ] if optimizer_iterations is not None: for opt in optimizer: opt.iterations = optimizer_iterations if len(optimizer) == 1: optimizer = optimizer[0] compile_args['optimizer'] = optimizer if target_tensors is not None: compile_args['target_tensors'] = target_tensors # Ensure Metric objects in new model are separate from existing model. compile_args['metrics'] = metrics_module.clone_metrics( compile_args['metrics']) compile_args['weighted_metrics'] = metrics_module.clone_metrics( compile_args['weighted_metrics']) clone.compile(**compile_args) return clone
31,803
41.236388
83
py
keras
keras-master/keras/metrics.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-classes-have-attributes # pylint: disable=g-doc-return-or-yield """Built-in metrics.""" import tensorflow.compat.v2 as tf import abc import types import warnings import numpy as np from keras import activations from keras import backend from keras.engine import base_layer from keras.engine import base_layer_utils from keras.engine import keras_tensor from keras.losses import binary_crossentropy from keras.losses import categorical_crossentropy from keras.losses import categorical_hinge from keras.losses import hinge from keras.losses import kullback_leibler_divergence from keras.losses import logcosh from keras.losses import mean_absolute_error from keras.losses import mean_absolute_percentage_error from keras.losses import mean_squared_error from keras.losses import mean_squared_logarithmic_error from keras.losses import poisson from keras.losses import sparse_categorical_crossentropy from keras.losses import squared_hinge from keras.saving.saved_model import metric_serialization from keras.utils import generic_utils from keras.utils import losses_utils from keras.utils import metrics_utils from keras.utils.generic_utils import deserialize_keras_object from keras.utils.generic_utils import serialize_keras_object from keras.utils.generic_utils import to_list from keras.utils.tf_utils import is_tensor_or_variable from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING = """Accumulates metric statistics. For sparse categorical metrics, the shapes of `y_true` and `y_pred` are different. Args: y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or shape = `[batch_size, d0, .. dN-1, 1]`. y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`. sample_weight: Optional `sample_weight` acts as a coefficient for the metric. If a scalar is provided, then the metric is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the metric for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each metric element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on `dN-1`: all metric functions reduce by 1 dimension, usually the last axis (-1)). Returns: Update op. """ @keras_export('keras.metrics.Metric') class Metric(base_layer.Layer, metaclass=abc.ABCMeta): """Encapsulates metric logic and state. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: Additional layer keywords arguments. Standalone usage: ```python m = SomeMetric(...) for input in ...: m.update_state(input) print('Final result: ', m.result().numpy()) ``` Usage with `compile()` API: ```python model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=[tf.keras.metrics.CategoricalAccuracy()]) data = np.random.random((1000, 32)) labels = np.random.random((1000, 10)) dataset = tf.data.Dataset.from_tensor_slices((data, labels)) dataset = dataset.batch(32) model.fit(dataset, epochs=10) ``` To be implemented by subclasses: * `__init__()`: All state variables should be created in this method by calling `self.add_weight()` like: `self.var = self.add_weight(...)` * `update_state()`: Has all updates to the state variables like: self.var.assign_add(...). * `result()`: Computes and returns a value for the metric from the state variables. Example subclass implementation: ```python class BinaryTruePositives(tf.keras.metrics.Metric): def __init__(self, name='binary_true_positives', **kwargs): super(BinaryTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name='tp', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True)) values = tf.cast(values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight = tf.broadcast_to(sample_weight, values.shape) values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives ``` """ def __init__(self, name=None, dtype=None, **kwargs): super(Metric, self).__init__(name=name, dtype=dtype, **kwargs) self.stateful = True # All metric layers are stateful. self.built = True if not base_layer_utils.v2_dtype_behavior_enabled(): # We only do this when the V2 behavior is not enabled, as when it is # enabled, the dtype already defaults to floatx. self._dtype = (backend.floatx() if dtype is None else tf.as_dtype(dtype).name) def __new__(cls, *args, **kwargs): obj = super(Metric, cls).__new__(cls) # If `update_state` is not in eager/tf.function and it is not from a # built-in metric, wrap it in `tf.function`. This is so that users writing # custom metrics in v1 need not worry about control dependencies and # return ops. if (base_layer_utils.is_in_eager_or_tf_function() or is_built_in(cls)): obj_update_state = obj.update_state def update_state_fn(*args, **kwargs): control_status = tf.__internal__.autograph.control_status_ctx() ag_update_state = tf.__internal__.autograph.tf_convert(obj_update_state, control_status) return ag_update_state(*args, **kwargs) else: if isinstance(obj.update_state, tf.__internal__.function.Function): update_state_fn = obj.update_state else: update_state_fn = tf.function(obj.update_state) obj.update_state = types.MethodType( metrics_utils.update_state_wrapper(update_state_fn), obj) obj_result = obj.result def result_fn(*args, **kwargs): control_status = tf.__internal__.autograph.control_status_ctx() ag_result = tf.__internal__.autograph.tf_convert(obj_result, control_status) return ag_result(*args, **kwargs) obj.result = types.MethodType(metrics_utils.result_wrapper(result_fn), obj) return obj def __call__(self, *args, **kwargs): """Accumulates statistics and then computes metric result value. Args: *args: **kwargs: A mini-batch of inputs to the Metric, passed on to `update_state()`. Returns: The metric value tensor. """ def replica_local_fn(*args, **kwargs): """Updates the state of the metric in a replica-local context.""" if any( isinstance(arg, keras_tensor.KerasTensor) for arg in tf.nest.flatten((args, kwargs))): update_op = None else: update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable update_ops = [] if update_op is not None: update_ops.append(update_op) with tf.control_dependencies(update_ops): result_t = self.result() # pylint: disable=not-callable # We are adding the metric object as metadata on the result tensor. # This is required when we want to use a metric with `add_metric` API on # a Model/Layer in graph mode. This metric instance will later be used # to reset variable state after each epoch of training. # Example: # model = Model() # mean = Mean() # model.add_metric(mean(values), name='mean') result_t._metric_obj = self # pylint: disable=protected-access return result_t from keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top return distributed_training_utils.call_replica_local_fn( replica_local_fn, *args, **kwargs) def __str__(self): args = ','.join(f'{k}={v}' for k, v in self.get_config().items()) return f'{self.__class__.__name__}({args})' @property def dtype(self): return self._dtype def get_config(self): """Returns the serializable config of the metric.""" return {'name': self.name, 'dtype': self.dtype} def reset_state(self): """Resets all of the metric state variables. This function is called between epochs/steps, when a metric is evaluated during training. """ if not generic_utils.is_default(self.reset_states): warnings.warn('Metric %s implements a `reset_states()` method; rename it ' 'to `reset_state()` (without the final "s"). The name ' '`reset_states()` has been deprecated to improve API ' 'consistency.' % (self.__class__.__name__,)) return self.reset_states() else: backend.batch_set_value([(v, 0) for v in self.variables]) @abc.abstractmethod def update_state(self, *args, **kwargs): """Accumulates statistics for the metric. Note: This function is executed as a graph function in graph mode. This means: a) Operations on the same resource are executed in textual order. This should make it easier to do things like add the updated value of a variable to another, for example. b) You don't need to worry about collecting the update ops to execute. All update ops added to the graph by this function will be executed. As a result, code should generally work the same way with graph or eager execution. Args: *args: **kwargs: A mini-batch of inputs to the Metric. """ raise NotImplementedError('Must be implemented in subclasses.') def merge_state(self, metrics): """Merges the state from one or more metrics. This method can be used by distributed systems to merge the state computed by different metric instances. Typically the state will be stored in the form of the metric's weights. For example, a tf.keras.metrics.Mean metric contains a list of two weight values: a total and a count. If there were two instances of a tf.keras.metrics.Accuracy that each independently aggregated partial state for an overall accuracy calculation, these two metric's states could be combined as follows: >>> m1 = tf.keras.metrics.Accuracy() >>> _ = m1.update_state([[1], [2]], [[0], [2]]) >>> m2 = tf.keras.metrics.Accuracy() >>> _ = m2.update_state([[3], [4]], [[3], [4]]) >>> m2.merge_state([m1]) >>> m2.result().numpy() 0.75 Args: metrics: an iterable of metrics. The metrics must have compatible state. Raises: ValueError: If the provided iterable does not contain metrics matching the metric's required specifications. """ assign_add_ops = [] for metric in metrics: if len(self.weights) != len(metric.weights): raise ValueError(f'Metric {metric} is not compatible with {self}') for weight, weight_to_add in zip(self.weights, metric.weights): assign_add_ops.append(weight.assign_add(weight_to_add)) return assign_add_ops @abc.abstractmethod def result(self): """Computes and returns the metric value tensor. Result computation is an idempotent operation that simply calculates the metric value using the state variables. """ raise NotImplementedError('Must be implemented in subclasses.') ### For use by subclasses ### @doc_controls.for_subclass_implementers def add_weight( self, name, shape=(), aggregation=tf.VariableAggregation.SUM, synchronization=tf.VariableSynchronization.ON_READ, initializer=None, dtype=None): """Adds state variable. Only for use by subclasses.""" if tf.distribute.has_strategy(): strategy = tf.distribute.get_strategy() else: strategy = None # TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU. if backend.is_tpu_strategy(strategy): synchronization = tf.VariableSynchronization.ON_WRITE with tf.init_scope(): return super(Metric, self).add_weight( name=name, shape=shape, dtype=self._dtype if dtype is None else dtype, trainable=False, initializer=initializer, collections=[], synchronization=synchronization, aggregation=aggregation) ### End: For use by subclasses ### @property def trainable_weights(self): # Overridden from Layer class to track submetric weights. if self.trainable: trainable_weights = self._trainable_weights for m in self._metrics: trainable_weights += m.trainable_weights return self._dedup_weights(trainable_weights) else: return [] @property def non_trainable_weights(self): # Overridden from Layer class to track submetric weights. if self.trainable: non_trainable_weights = self._non_trainable_weights for m in self._metrics: non_trainable_weights += m.non_trainable_weights else: non_trainable_weights = ( self._non_trainable_weights + self._trainable_weights) for m in self._metrics: non_trainable_weights += m.weights return self._dedup_weights(non_trainable_weights) @property def _trackable_saved_model_saver(self): return metric_serialization.MetricSavedModelSaver(self) @generic_utils.default @doc_controls.do_not_generate_docs def reset_states(self): # Backwards compatibility alias of `reset_state`. New classes should # only implement `reset_state`. return self.reset_state() class Reduce(Metric): """Encapsulates metrics that perform a reduce operation on the values. Args: reduction: a `tf.keras.metrics.Reduction` enum value. name: string name of the metric instance. dtype: (Optional) data type of the metric result. """ def __init__(self, reduction, name, dtype=None): super(Reduce, self).__init__(name=name, dtype=dtype) self.reduction = reduction self.total = self.add_weight( 'total', initializer='zeros') if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE, metrics_utils.Reduction.WEIGHTED_MEAN]: self.count = self.add_weight( 'count', initializer='zeros') def update_state(self, values, sample_weight=None): """Accumulates statistics for computing the metric. Args: values: Per-example value. sample_weight: Optional weighting of each example. Defaults to 1. Returns: Update op. """ [values], sample_weight = \ metrics_utils.ragged_assert_compatible_and_get_flat_values( [values], sample_weight) try: values = tf.cast(values, self._dtype) except (ValueError, TypeError): msg = ('The output of a metric function can only be a single Tensor. ' f'Received: {values}. ') if isinstance(values, dict): msg += ('To return a dict of values, implement a custom Metric ' 'subclass.') raise RuntimeError(msg) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self._dtype) # Update dimensions of weights to match with values if possible. values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions( values, sample_weight=sample_weight) try: # Broadcast weights if possible. sample_weight = tf.__internal__.ops.broadcast_weights( sample_weight, values) except ValueError: # Reduce values to same ndim as weight array ndim = backend.ndim(values) weight_ndim = backend.ndim(sample_weight) if self.reduction == metrics_utils.Reduction.SUM: values = tf.reduce_sum( values, axis=list(range(weight_ndim, ndim))) else: values = tf.reduce_mean( values, axis=list(range(weight_ndim, ndim))) values = tf.multiply(values, sample_weight) value_sum = tf.reduce_sum(values) with tf.control_dependencies([value_sum]): update_total_op = self.total.assign_add(value_sum) # Exit early if the reduction doesn't have a denominator. if self.reduction == metrics_utils.Reduction.SUM: return update_total_op # Update `count` for reductions that require a denominator. if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE: num_values = tf.cast(tf.size(values), self._dtype) elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN: if sample_weight is None: num_values = tf.cast(tf.size(values), self._dtype) else: num_values = tf.reduce_sum(sample_weight) else: raise NotImplementedError( f'Reduction "{self.reduction}" not implemented. Expected ' '"sum", "weighted_mean", or "sum_over_batch_size".') with tf.control_dependencies([update_total_op]): return self.count.assign_add(num_values) def result(self): if self.reduction == metrics_utils.Reduction.SUM: return tf.identity(self.total) elif self.reduction in [ metrics_utils.Reduction.WEIGHTED_MEAN, metrics_utils.Reduction.SUM_OVER_BATCH_SIZE ]: return tf.math.divide_no_nan(self.total, self.count) else: raise NotImplementedError( f'Reduction "{self.reduction}" not implemented. Expected ' '"sum", "weighted_mean", or "sum_over_batch_size".') @keras_export('keras.metrics.Sum') class Sum(Reduce): """Computes the (weighted) sum of the given values. For example, if values is [1, 3, 5, 7] then the sum is 16. If the weights were specified as [1, 1, 0, 0] then the sum would be 4. This metric creates one variable, `total`, that is used to compute the sum of `values`. This is ultimately returned as `sum`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Sum() >>> m.update_state([1, 3, 5, 7]) >>> m.result().numpy() 16.0 Usage with `compile()` API: ```python model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs)) model.compile(optimizer='sgd', loss='mse') ``` """ def __init__(self, name='sum', dtype=None): super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM, name=name, dtype=dtype) @keras_export('keras.metrics.Mean') class Mean(Reduce): """Computes the (weighted) mean of the given values. For example, if values is [1, 3, 5, 7] then the mean is 4. If the weights were specified as [1, 1, 0, 0] then the mean would be 2. This metric creates two variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Mean() >>> m.update_state([1, 3, 5, 7]) >>> m.result().numpy() 4.0 >>> m.reset_state() >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) >>> m.result().numpy() 2.0 Usage with `compile()` API: ```python model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs)) model.compile(optimizer='sgd', loss='mse') ``` """ def __init__(self, name='mean', dtype=None): super(Mean, self).__init__( reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype) @keras_export('keras.metrics.MeanRelativeError') class MeanRelativeError(Mean): """Computes the mean relative error by normalizing with the given values. This metric creates two local variables, `total` and `count` that are used to compute the mean relative error. This is weighted by `sample_weight`, and it is ultimately returned as `mean_relative_error`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: normalizer: The normalizer values with same shape as predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3]) >>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8]) >>> # metric = mean(|y_pred - y_true| / normalizer) >>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3]) >>> # = 5/4 = 1.25 >>> m.result().numpy() 1.25 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])]) ``` """ def __init__(self, normalizer, name=None, dtype=None): super(MeanRelativeError, self).__init__(name=name, dtype=dtype) normalizer = tf.cast(normalizer, self._dtype) self.normalizer = normalizer def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) [y_pred, y_true], sample_weight = \ metrics_utils.ragged_assert_compatible_and_get_flat_values( [y_pred, y_true], sample_weight) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions( y_pred, y_true) y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions( y_pred, self.normalizer) y_pred.shape.assert_is_compatible_with(y_true.shape) relative_errors = tf.math.divide_no_nan( tf.abs(y_true - y_pred), self.normalizer) return super(MeanRelativeError, self).update_state( relative_errors, sample_weight=sample_weight) def get_config(self): n = self.normalizer config = {'normalizer': backend.eval(n) if is_tensor_or_variable(n) else n} base_config = super(MeanRelativeError, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.MeanMetricWrapper') class MeanMetricWrapper(Mean): """Wraps a stateless metric function with the Mean metric. You could use this class to quickly build a mean metric from a function. The function needs to have the signature `fn(y_true, y_pred)` and return a per-sample loss array. `MeanMetricWrapper.result()` will return the average metric value across all samples seen so far. For example: ```python def accuracy(y_true, y_pred): return tf.cast(tf.math.equal(y_true, y_pred), tf.float32) accuracy_metric = tf.keras.metrics.MeanMetricWrapper(fn=accuracy) keras_model.compile(..., metrics=accuracy_metric) ``` Args: fn: The metric function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: Keyword arguments to pass on to `fn`. """ def __init__(self, fn, name=None, dtype=None, **kwargs): super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype) self._fn = fn self._fn_kwargs = kwargs def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates metric statistics. `y_true` and `y_pred` should have the same shape. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. sample_weight: Optional `sample_weight` acts as a coefficient for the metric. If a scalar is provided, then the metric is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the metric for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each metric element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on `dN-1`: all metric functions reduce by 1 dimension, usually the last axis (-1)). Returns: Update op. """ y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) [y_true, y_pred], sample_weight = ( metrics_utils.ragged_assert_compatible_and_get_flat_values( [y_true, y_pred], sample_weight)) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions( y_pred, y_true) ag_fn = tf.__internal__.autograph.tf_convert(self._fn, tf.__internal__.autograph.control_status_ctx()) matches = ag_fn(y_true, y_pred, **self._fn_kwargs) return super(MeanMetricWrapper, self).update_state( matches, sample_weight=sample_weight) def get_config(self): config = {} if type(self) is MeanMetricWrapper: # pylint: disable=unidiomatic-typecheck # Only include function argument when the object is a MeanMetricWrapper # and not a subclass. config['fn'] = self._fn for k, v in self._fn_kwargs.items(): config[k] = backend.eval(v) if is_tensor_or_variable(v) else v base_config = super(MeanMetricWrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): # Note that while MeanMetricWrapper itself isn't public, objects of this # class may be created and added to the model by calling model.compile. fn = config.pop('fn', None) if cls is MeanMetricWrapper: return cls(get(fn), **config) return super(MeanMetricWrapper, cls).from_config(config) @keras_export('keras.metrics.Accuracy') class Accuracy(MeanMetricWrapper): """Calculates how often predictions equal labels. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `binary accuracy`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Accuracy() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]]) >>> m.result().numpy() 0.75 >>> m.reset_state() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]], ... sample_weight=[1, 1, 0, 0]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Accuracy()]) ``` """ def __init__(self, name='accuracy', dtype=None): super(Accuracy, self).__init__(accuracy, name, dtype=dtype) @keras_export('keras.metrics.BinaryAccuracy') class BinaryAccuracy(MeanMetricWrapper): """Calculates how often predictions match binary labels. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `binary accuracy`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Standalone usage: >>> m = tf.keras.metrics.BinaryAccuracy() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]]) >>> m.result().numpy() 0.75 >>> m.reset_state() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.BinaryAccuracy()]) ``` """ def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5): super(BinaryAccuracy, self).__init__( binary_accuracy, name, dtype=dtype, threshold=threshold) @keras_export('keras.metrics.CategoricalAccuracy') class CategoricalAccuracy(MeanMetricWrapper): """Calculates how often predictions match one-hot labels. You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `categorical accuracy`: an idempotent operation that simply divides `total` by `count`. `y_pred` and `y_true` should be passed in as vectors of probabilities, rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.CategoricalAccuracy() >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], ... [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], ... [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CategoricalAccuracy()]) ``` """ def __init__(self, name='categorical_accuracy', dtype=None): super(CategoricalAccuracy, self).__init__( categorical_accuracy, name, dtype=dtype) @keras_export('keras.metrics.SparseCategoricalAccuracy') class SparseCategoricalAccuracy(MeanMetricWrapper): """Calculates how often predictions match integer labels. ```python acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1)) ``` You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `sparse categorical accuracy`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SparseCategoricalAccuracy() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) ``` """ def __init__(self, name='sparse_categorical_accuracy', dtype=None): super(SparseCategoricalAccuracy, self).__init__( sparse_categorical_accuracy, name, dtype=dtype) SparseCategoricalAccuracy.update_state.__doc__ = _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING @keras_export('keras.metrics.TopKCategoricalAccuracy') class TopKCategoricalAccuracy(MeanMetricWrapper): """Computes how often targets are in the top `K` predictions. Args: k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1) >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.TopKCategoricalAccuracy()]) ``` """ def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None): super(TopKCategoricalAccuracy, self).__init__( top_k_categorical_accuracy, name, dtype=dtype, k=k) @keras_export('keras.metrics.SparseTopKCategoricalAccuracy') class SparseTopKCategoricalAccuracy(MeanMetricWrapper): """Computes how often integer targets are in the top `K` predictions. Args: k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1) >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()]) ``` """ def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None): super(SparseTopKCategoricalAccuracy, self).__init__( sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k) SparseTopKCategoricalAccuracy.update_state.__doc__ = _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING class _ConfusionMatrixConditionCount(Metric): """Calculates the number of the given confusion matrix condition. Args: confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions. thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ def __init__(self, confusion_matrix_cond, thresholds=None, name=None, dtype=None): super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype) self._confusion_matrix_cond = confusion_matrix_cond self.init_thresholds = thresholds self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=0.5) self._thresholds_distributed_evenly = ( metrics_utils.is_evenly_distributed_thresholds(self.thresholds)) self.accumulator = self.add_weight( 'accumulator', shape=(len(self.thresholds),), initializer=tf.compat.v1.zeros_initializer) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates the metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( {self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight) def result(self): if len(self.thresholds) == 1: result = self.accumulator[0] else: result = self.accumulator return tf.convert_to_tensor(result) def reset_state(self): backend.batch_set_value([ (v, np.zeros(v.shape.as_list())) for v in self.variables ]) def get_config(self): config = {'thresholds': self.init_thresholds} base_config = super(_ConfusionMatrixConditionCount, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.FalsePositives') class FalsePositives(_ConfusionMatrixConditionCount): """Calculates the number of false positives. If `sample_weight` is given, calculates the sum of the weights of false positives. This metric creates one local variable, `accumulator` that is used to keep track of the number of false positives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.FalsePositives() >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1]) >>> m.result().numpy() 2.0 >>> m.reset_state() >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.FalsePositives()]) ``` """ def __init__(self, thresholds=None, name=None, dtype=None): super(FalsePositives, self).__init__( confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES, thresholds=thresholds, name=name, dtype=dtype) @keras_export('keras.metrics.FalseNegatives') class FalseNegatives(_ConfusionMatrixConditionCount): """Calculates the number of false negatives. If `sample_weight` is given, calculates the sum of the weights of false negatives. This metric creates one local variable, `accumulator` that is used to keep track of the number of false negatives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.FalseNegatives() >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) >>> m.result().numpy() 2.0 >>> m.reset_state() >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.FalseNegatives()]) ``` """ def __init__(self, thresholds=None, name=None, dtype=None): super(FalseNegatives, self).__init__( confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype) @keras_export('keras.metrics.TrueNegatives') class TrueNegatives(_ConfusionMatrixConditionCount): """Calculates the number of true negatives. If `sample_weight` is given, calculates the sum of the weights of true negatives. This metric creates one local variable, `accumulator` that is used to keep track of the number of true negatives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TrueNegatives() >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0]) >>> m.result().numpy() 2.0 >>> m.reset_state() >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.TrueNegatives()]) ``` """ def __init__(self, thresholds=None, name=None, dtype=None): super(TrueNegatives, self).__init__( confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype) @keras_export('keras.metrics.TruePositives') class TruePositives(_ConfusionMatrixConditionCount): """Calculates the number of true positives. If `sample_weight` is given, calculates the sum of the weights of true positives. This metric creates one local variable, `true_positives` that is used to keep track of the number of true positives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TruePositives() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result().numpy() 2.0 >>> m.reset_state() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.TruePositives()]) ``` """ def __init__(self, thresholds=None, name=None, dtype=None): super(TruePositives, self).__init__( confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES, thresholds=thresholds, name=name, dtype=dtype) @keras_export('keras.metrics.Precision') class Precision(Metric): """Computes the precision of the predictions with respect to the labels. The metric creates two local variables, `true_positives` and `false_positives` that are used to compute the precision. This value is ultimately returned as `precision`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_positives`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `top_k` is set, we'll calculate precision as how often on average a class among the top-k classes with the highest predicted values of a batch entry is correct and can be found in the label for that entry. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold and/or in the top-k highest predictions, and computing the fraction of them for which `class_id` is indeed a correct label. Args: thresholds: (Optional) A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. If neither thresholds nor top_k are set, the default is to calculate precision with `thresholds=0.5`. top_k: (Optional) Unset by default. An int value specifying the top-k predictions to consider when calculating precision. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Precision() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result().numpy() 0.6666667 >>> m.reset_state() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 >>> # With top_k=2, it will calculate precision over y_true[:2] and y_pred[:2] >>> m = tf.keras.metrics.Precision(top_k=2) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result().numpy() 0.0 >>> # With top_k=4, it will calculate precision over y_true[:4] and y_pred[:4] >>> m = tf.keras.metrics.Precision(top_k=4) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Precision()]) ``` """ def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): super(Precision, self).__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self._thresholds_distributed_evenly = ( metrics_utils.is_evenly_distributed_thresholds(self.thresholds)) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds),), initializer=tf.compat.v1.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds),), initializer=tf.compat.v1.zeros_initializer) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates true positive and false positive statistics. Args: y_true: The ground truth values, with the same dimensions as `y_pred`. Will be cast to `bool`. y_pred: The predicted values. Each element must be in the range `[0, 1]`. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives }, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight) def result(self): result = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_positives)) return result[0] if len(self.thresholds) == 1 else result def reset_state(self): num_thresholds = len(to_list(self.thresholds)) backend.batch_set_value([(v, np.zeros((num_thresholds,))) for v in (self.true_positives, self.false_positives)]) def get_config(self): config = { 'thresholds': self.init_thresholds, 'top_k': self.top_k, 'class_id': self.class_id } base_config = super(Precision, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.Recall') class Recall(Metric): """Computes the recall of the predictions with respect to the labels. This metric creates two local variables, `true_positives` and `false_negatives`, that are used to compute the recall. This value is ultimately returned as `recall`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_negatives`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `top_k` is set, recall will be computed as how often on average a class among the labels of a batch entry is in the top-k predictions. If `class_id` is specified, we calculate recall by considering only the entries in the batch for which `class_id` is in the label, and computing the fraction of them for which `class_id` is above the threshold and/or in the top-k predictions. Args: thresholds: (Optional) A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. If neither thresholds nor top_k are set, the default is to calculate recall with `thresholds=0.5`. top_k: (Optional) Unset by default. An int value specifying the top-k predictions to consider when calculating recall. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Recall() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result().numpy() 0.6666667 >>> m.reset_state() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Recall()]) ``` """ def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): super(Recall, self).__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self._thresholds_distributed_evenly = ( metrics_utils.is_evenly_distributed_thresholds(self.thresholds)) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds),), initializer=tf.compat.v1.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds),), initializer=tf.compat.v1.zeros_initializer) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates true positive and false negative statistics. Args: y_true: The ground truth values, with the same dimensions as `y_pred`. Will be cast to `bool`. y_pred: The predicted values. Each element must be in the range `[0, 1]`. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives }, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight) def result(self): result = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_negatives)) return result[0] if len(self.thresholds) == 1 else result def reset_state(self): num_thresholds = len(to_list(self.thresholds)) backend.batch_set_value([(v, np.zeros((num_thresholds,))) for v in (self.true_positives, self.false_negatives)]) def get_config(self): config = { 'thresholds': self.init_thresholds, 'top_k': self.top_k, 'class_id': self.class_id } base_config = super(Recall, self).get_config() return dict(list(base_config.items()) + list(config.items())) class SensitivitySpecificityBase(Metric, metaclass=abc.ABCMeta): """Abstract base class for computing sensitivity and specificity. For additional information about specificity and sensitivity, see [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). """ def __init__(self, value, num_thresholds=200, class_id=None, name=None, dtype=None): super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype) if num_thresholds <= 0: raise ValueError( 'Argument `num_thresholds` must be an integer > 0. ' f'Received: num_thresholds={num_thresholds}') self.value = value self.class_id = class_id self.true_positives = self.add_weight( 'true_positives', shape=(num_thresholds,), initializer=tf.compat.v1.zeros_initializer) self.true_negatives = self.add_weight( 'true_negatives', shape=(num_thresholds,), initializer=tf.compat.v1.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(num_thresholds,), initializer=tf.compat.v1.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(num_thresholds,), initializer=tf.compat.v1.zeros_initializer) # Compute `num_thresholds` thresholds in [0, 1] if num_thresholds == 1: self.thresholds = [0.5] self._thresholds_distributed_evenly = False else: thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)] self.thresholds = [0.0] + thresholds + [1.0] self._thresholds_distributed_evenly = True def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, }, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight) def reset_state(self): num_thresholds = len(self.thresholds) confusion_matrix_variables = (self.true_positives, self.true_negatives, self.false_positives, self.false_negatives) backend.batch_set_value([ (v, np.zeros((num_thresholds,))) for v in confusion_matrix_variables ]) def get_config(self): config = {'class_id': self.class_id} base_config = super(SensitivitySpecificityBase, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _find_max_under_constraint(self, constrained, dependent, predicate): """Returns the maximum of dependent_statistic that satisfies the constraint. Args: constrained: Over these values the constraint is specified. A rank-1 tensor. dependent: From these values the maximum that satiesfies the constraint is selected. Values in this tensor and in `constrained` are linked by having the same threshold at each position, hence this tensor must have the same shape. predicate: A binary boolean functor to be applied to arguments `constrained` and `self.value`, e.g. `tf.greater`. Returns maximal dependent value, if no value satiesfies the constraint 0.0. """ feasible = tf.where(predicate(constrained, self.value)) feasible_exists = tf.greater(tf.size(feasible), 0) max_dependent = tf.reduce_max(tf.gather(dependent, feasible)) return tf.where(feasible_exists, max_dependent, 0.0) @keras_export('keras.metrics.SensitivityAtSpecificity') class SensitivityAtSpecificity(SensitivitySpecificityBase): """Computes best sensitivity where specificity is >= specified value. the sensitivity at a given specificity. `Sensitivity` measures the proportion of actual positives that are correctly identified as such (tp / (tp + fn)). `Specificity` measures the proportion of actual negatives that are correctly identified as such (tn / (tn + fp)). This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the sensitivity at the given specificity. The threshold for the given specificity value is computed and used to evaluate the corresponding sensitivity. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold predictions, and computing the fraction of them for which `class_id` is indeed a correct label. For additional information about specificity and sensitivity, see [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). Args: specificity: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given specificity. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SensitivityAtSpecificity(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], ... sample_weight=[1, 1, 2, 2, 1]) >>> m.result().numpy() 0.333333 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SensitivityAtSpecificity()]) ``` """ def __init__(self, specificity, num_thresholds=200, class_id=None, name=None, dtype=None): if specificity < 0 or specificity > 1: raise ValueError( 'Argument `specificity` must be in the range [0, 1]. ' f'Received: specificity={specificity}') self.specificity = specificity self.num_thresholds = num_thresholds super(SensitivityAtSpecificity, self).__init__( specificity, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) def result(self): specificities = tf.math.divide_no_nan( self.true_negatives, tf.math.add(self.true_negatives, self.false_positives)) sensitivities = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_negatives)) return self._find_max_under_constraint( specificities, sensitivities, tf.greater_equal) def get_config(self): config = { 'num_thresholds': self.num_thresholds, 'specificity': self.specificity } base_config = super(SensitivityAtSpecificity, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.SpecificityAtSensitivity') class SpecificityAtSensitivity(SensitivitySpecificityBase): """Computes best specificity where sensitivity is >= specified value. `Sensitivity` measures the proportion of actual positives that are correctly identified as such (tp / (tp + fn)). `Specificity` measures the proportion of actual negatives that are correctly identified as such (tn / (tn + fp)). This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the specificity at the given sensitivity. The threshold for the given sensitivity value is computed and used to evaluate the corresponding specificity. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold predictions, and computing the fraction of them for which `class_id` is indeed a correct label. For additional information about specificity and sensitivity, see [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). Args: sensitivity: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given sensitivity. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SpecificityAtSensitivity(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result().numpy() 0.66666667 >>> m.reset_state() >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], ... sample_weight=[1, 1, 2, 2, 2]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SpecificityAtSensitivity()]) ``` """ def __init__(self, sensitivity, num_thresholds=200, class_id=None, name=None, dtype=None): if sensitivity < 0 or sensitivity > 1: raise ValueError( 'Argument `sensitivity` must be in the range [0, 1]. ' f'Received: sensitivity={sensitivity}') self.sensitivity = sensitivity self.num_thresholds = num_thresholds super(SpecificityAtSensitivity, self).__init__( sensitivity, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) def result(self): sensitivities = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_negatives)) specificities = tf.math.divide_no_nan( self.true_negatives, tf.math.add(self.true_negatives, self.false_positives)) return self._find_max_under_constraint( sensitivities, specificities, tf.greater_equal) def get_config(self): config = { 'num_thresholds': self.num_thresholds, 'sensitivity': self.sensitivity } base_config = super(SpecificityAtSensitivity, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.PrecisionAtRecall') class PrecisionAtRecall(SensitivitySpecificityBase): """Computes best precision where recall is >= specified value. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the precision at the given recall. The threshold for the given recall value is computed and used to evaluate the corresponding precision. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold predictions, and computing the fraction of them for which `class_id` is indeed a correct label. Args: recall: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given recall. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.PrecisionAtRecall(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], ... sample_weight=[2, 2, 2, 1, 1]) >>> m.result().numpy() 0.33333333 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)]) ``` """ def __init__(self, recall, num_thresholds=200, class_id=None, name=None, dtype=None): if recall < 0 or recall > 1: raise ValueError( 'Argument `recall` must be in the range [0, 1]. ' f'Received: recall={recall}') self.recall = recall self.num_thresholds = num_thresholds super(PrecisionAtRecall, self).__init__( value=recall, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) def result(self): recalls = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_negatives)) precisions = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_positives)) return self._find_max_under_constraint( recalls, precisions, tf.greater_equal) def get_config(self): config = {'num_thresholds': self.num_thresholds, 'recall': self.recall} base_config = super(PrecisionAtRecall, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.RecallAtPrecision') class RecallAtPrecision(SensitivitySpecificityBase): """Computes best recall where precision is >= specified value. For a given score-label-distribution the required precision might not be achievable, in this case 0.0 is returned as recall. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the recall at the given precision. The threshold for the given precision value is computed and used to evaluate the corresponding recall. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold predictions, and computing the fraction of them for which `class_id` is indeed a correct label. Args: precision: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given precision. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.RecallAtPrecision(0.8) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)]) ``` """ def __init__(self, precision, num_thresholds=200, class_id=None, name=None, dtype=None): if precision < 0 or precision > 1: raise ValueError( 'Argument `precision` must be in the range [0, 1]. ' f'Received: precision={precision}') self.precision = precision self.num_thresholds = num_thresholds super(RecallAtPrecision, self).__init__( value=precision, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) def result(self): precisions = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_positives)) recalls = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_negatives)) return self._find_max_under_constraint( precisions, recalls, tf.greater_equal) def get_config(self): config = {'num_thresholds': self.num_thresholds, 'precision': self.precision} base_config = super(RecallAtPrecision, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.AUC') class AUC(Metric): """Approximates the AUC (Area under the curve) of the ROC or PR curves. The AUC (Area under the curve) of the ROC (Receiver operating characteristic; default) or PR (Precision Recall) curves are quality measures of binary classifiers. Unlike the accuracy, and like cross-entropy losses, ROC-AUC and PR-AUC evaluate all the operational points of a model. This class approximates AUCs using a Riemann sum. During the metric accumulation phrase, predictions are accumulated within predefined buckets by value. The AUC is then computed by interpolating per-bucket averages. These buckets define the evaluated operational points. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. This value is ultimately returned as `auc`, an idempotent operation that computes the area under a discretized curve of precision versus recall values (computed using the aforementioned variables). The `num_thresholds` variable controls the degree of discretization with larger numbers of thresholds more closely approximating the true AUC. The quality of the approximation may vary dramatically depending on `num_thresholds`. The `thresholds` parameter can be used to manually specify thresholds which split the predictions more evenly. For a best approximation of the real AUC, `predictions` should be distributed approximately uniformly in the range [0, 1] (if `from_logits=False`). The quality of the AUC approximation may be poor if this is not the case. Setting `summation_method` to 'minoring' or 'majoring' can help quantify the error in the approximation by providing lower or upper bound estimate of the AUC. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: num_thresholds: (Optional) Defaults to 200. The number of thresholds to use when discretizing the roc curve. Values must be > 1. curve: (Optional) Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. summation_method: (Optional) Specifies the [Riemann summation method]( https://en.wikipedia.org/wiki/Riemann_sum) used. 'interpolation' (default) applies mid-point summation scheme for `ROC`. For PR-AUC, interpolates (true/false) positives but not the ratio that is precision (see Davis & Goadrich 2006 for details); 'minoring' applies left summation for increasing intervals and right summation for decreasing intervals; 'majoring' does the opposite. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. thresholds: (Optional) A list of floating point values to use as the thresholds for discretizing the curve. If set, the `num_thresholds` parameter is ignored. Values should be in [0, 1]. Endpoint thresholds equal to {-epsilon, 1+epsilon} for a small positive epsilon value will be automatically included with these to correctly handle predictions equal to exactly 0 or 1. multi_label: boolean indicating whether multilabel data should be treated as such, wherein AUC is computed separately for each label and then averaged across labels, or (when False) if the data should be flattened into a single label before AUC computation. In the latter case, when multilabel data is passed to AUC, each label-prediction pair is treated as an individual data point. Should be set to False for multi-class data. num_labels: (Optional) The number of labels, used when `multi_label` is True. If `num_labels` is not specified, then state variables get created on the first call to `update_state`. label_weights: (Optional) list, array, or tensor of non-negative weights used to compute AUCs for multilabel data. When `multi_label` is True, the weights are applied to the individual label AUCs when they are averaged to produce the multi-label AUC. When it's False, they are used to weight the individual label predictions in computing the confusion matrix on the flattened data. Note that this is unlike class_weights in that class_weights weights the example depending on the value of its label, whereas label_weights depends only on the index of that label before flattening; therefore `label_weights` should not be used for multi-class data. from_logits: boolean indicating whether the predictions (`y_pred` in `update_state`) are probabilities or sigmoid logits. As a rule of thumb, when using a keras loss, the `from_logits` constructor argument of the loss should match the AUC `from_logits` constructor argument. Standalone usage: >>> m = tf.keras.metrics.AUC(num_thresholds=3) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7] >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2] >>> # tp_rate = recall = [1, 0.5, 0], fp_rate = [1, 0, 0] >>> # auc = ((((1+0.5)/2)*(1-0)) + (((0.5+0)/2)*(0-0))) = 0.75 >>> m.result().numpy() 0.75 >>> m.reset_state() >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python # Reports the AUC of a model outputing a probability. model.compile(optimizer='sgd', loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()]) # Reports the AUC of a model outputing a logit. model.compile(optimizer='sgd', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=[tf.keras.metrics.AUC(from_logits=True)]) ``` """ def __init__(self, num_thresholds=200, curve='ROC', summation_method='interpolation', name=None, dtype=None, thresholds=None, multi_label=False, num_labels=None, label_weights=None, from_logits=False): # Validate configurations. if isinstance(curve, metrics_utils.AUCCurve) and curve not in list( metrics_utils.AUCCurve): raise ValueError( f'Invalid `curve` argument value "{curve}". ' f'Expected one of: {list(metrics_utils.AUCCurve)}') if isinstance( summation_method, metrics_utils.AUCSummationMethod) and summation_method not in list( metrics_utils.AUCSummationMethod): raise ValueError( f'Invalid `summation_method` argument value "{summation_method}". ' f'Expected one of: {list(metrics_utils.AUCSummationMethod)}') # Update properties. self._init_from_thresholds = thresholds is not None if thresholds is not None: # If specified, use the supplied thresholds. self.num_thresholds = len(thresholds) + 2 thresholds = sorted(thresholds) self._thresholds_distributed_evenly = ( metrics_utils.is_evenly_distributed_thresholds( np.array([0.0] + thresholds + [1.0]))) else: if num_thresholds <= 1: raise ValueError('Argument `num_thresholds` must be an integer > 1. ' f'Received: num_thresholds={num_thresholds}') # Otherwise, linearly interpolate (num_thresholds - 2) thresholds in # (0, 1). self.num_thresholds = num_thresholds thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)] self._thresholds_distributed_evenly = True # Add an endpoint "threshold" below zero and above one for either # threshold method to account for floating point imprecisions. self._thresholds = np.array([0.0 - backend.epsilon()] + thresholds + [1.0 + backend.epsilon()]) if isinstance(curve, metrics_utils.AUCCurve): self.curve = curve else: self.curve = metrics_utils.AUCCurve.from_str(curve) if isinstance(summation_method, metrics_utils.AUCSummationMethod): self.summation_method = summation_method else: self.summation_method = metrics_utils.AUCSummationMethod.from_str( summation_method) super(AUC, self).__init__(name=name, dtype=dtype) # Handle multilabel arguments. self.multi_label = multi_label if label_weights is not None: label_weights = tf.constant(label_weights, dtype=self.dtype) checks = [ tf.compat.v1.assert_non_negative( label_weights, message='All values of `label_weights` must be non-negative.') ] with tf.control_dependencies(checks): self.label_weights = label_weights else: self.label_weights = None self._from_logits = from_logits self._built = False if self.multi_label: if num_labels: shape = tf.TensorShape([None, num_labels]) self._build(shape) else: if num_labels: raise ValueError( '`num_labels` is needed only when `multi_label` is True.') self._build(None) @property def thresholds(self): """The thresholds used for evaluating AUC.""" return list(self._thresholds) def _build(self, shape): """Initialize TP, FP, TN, and FN tensors, given the shape of the data.""" if self.multi_label: if shape.ndims != 2: raise ValueError( '`y_true` must have rank 2 when `multi_label=True`. ' f'Found rank {shape.ndims}. ' f'Full shape received for `y_true`: {shape}') self._num_labels = shape[1] variable_shape = tf.TensorShape( [tf.compat.v1.Dimension(self.num_thresholds), self._num_labels]) else: variable_shape = tf.TensorShape( [tf.compat.v1.Dimension(self.num_thresholds)]) self._build_input_shape = shape # Create metric variables self.true_positives = self.add_weight( 'true_positives', shape=variable_shape, initializer=tf.compat.v1.zeros_initializer) self.true_negatives = self.add_weight( 'true_negatives', shape=variable_shape, initializer=tf.compat.v1.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=variable_shape, initializer=tf.compat.v1.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=variable_shape, initializer=tf.compat.v1.zeros_initializer) if self.multi_label: with tf.init_scope(): # This should only be necessary for handling v1 behavior. In v2, AUC # should be initialized outside of any tf.functions, and therefore in # eager mode. if not tf.executing_eagerly(): backend._initialize_variables(backend._get_session()) # pylint: disable=protected-access self._built = True def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ deps = [] if not self._built: self._build(tf.TensorShape(y_pred.shape)) if self.multi_label or (self.label_weights is not None): # y_true should have shape (number of examples, number of labels). shapes = [ (y_true, ('N', 'L')) ] if self.multi_label: # TP, TN, FP, and FN should all have shape # (number of thresholds, number of labels). shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))]) if self.label_weights is not None: # label_weights should be of length equal to the number of labels. shapes.append((self.label_weights, ('L',))) deps = [ tf.compat.v1.debugging.assert_shapes( shapes, message='Number of labels is not consistent.') ] # Only forward label_weights to update_confusion_matrix_variables when # multi_label is False. Otherwise the averaging of individual label AUCs is # handled in AUC.result label_weights = None if self.multi_label else self.label_weights if self._from_logits: y_pred = activations.sigmoid(y_pred) with tf.control_dependencies(deps): return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, }, y_true, y_pred, self._thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights) def interpolate_pr_auc(self): """Interpolation formula inspired by section 4 of Davis & Goadrich 2006. https://www.biostat.wisc.edu/~page/rocpr.pdf Note here we derive & use a closed formula not present in the paper as follows: Precision = TP / (TP + FP) = TP / P Modeling all of TP (true positive), FP (false positive) and their sum P = TP + FP (predicted positive) as varying linearly within each interval [A, B] between successive thresholds, we get Precision slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A) = (TP - TP_A) / (P - P_A) Precision = (TP_A + slope * (P - P_A)) / P The area within the interval is (slope / total_pos_weight) times int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P} int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P} where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A) Bringing back the factor (slope / total_pos_weight) we'd put aside, we get slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight where dTP == TP_B - TP_A. Note that when P_A == 0 the above calculation simplifies into int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A) which is really equivalent to imputing constant precision throughout the first bucket having >0 true positives. Returns: pr_auc: an approximation of the area under the P-R curve. """ dtp = self.true_positives[:self.num_thresholds - 1] - self.true_positives[1:] p = tf.math.add(self.true_positives, self.false_positives) dp = p[:self.num_thresholds - 1] - p[1:] prec_slope = tf.math.divide_no_nan( dtp, tf.maximum(dp, 0), name='prec_slope') intercept = self.true_positives[1:] - tf.multiply(prec_slope, p[1:]) safe_p_ratio = tf.where( tf.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0), tf.math.divide_no_nan( p[:self.num_thresholds - 1], tf.maximum(p[1:], 0), name='recall_relative_ratio'), tf.ones_like(p[1:])) pr_auc_increment = tf.math.divide_no_nan( prec_slope * (dtp + intercept * tf.math.log(safe_p_ratio)), tf.maximum(self.true_positives[1:] + self.false_negatives[1:], 0), name='pr_auc_increment') if self.multi_label: by_label_auc = tf.reduce_sum( pr_auc_increment, name=self.name + '_by_label', axis=0) if self.label_weights is None: # Evenly weighted average of the label AUCs. return tf.reduce_mean(by_label_auc, name=self.name) else: # Weighted average of the label AUCs. return tf.math.divide_no_nan( tf.reduce_sum( tf.multiply(by_label_auc, self.label_weights)), tf.reduce_sum(self.label_weights), name=self.name) else: return tf.reduce_sum(pr_auc_increment, name='interpolate_pr_auc') def result(self): if (self.curve == metrics_utils.AUCCurve.PR and self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION ): # This use case is different and is handled separately. return self.interpolate_pr_auc() # Set `x` and `y` values for the curves based on `curve` config. recall = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_negatives)) if self.curve == metrics_utils.AUCCurve.ROC: fp_rate = tf.math.divide_no_nan( self.false_positives, tf.math.add(self.false_positives, self.true_negatives)) x = fp_rate y = recall else: # curve == 'PR'. precision = tf.math.divide_no_nan( self.true_positives, tf.math.add(self.true_positives, self.false_positives)) x = recall y = precision # Find the rectangle heights based on `summation_method`. if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION: # Note: the case ('PR', 'interpolation') has been handled above. heights = (y[:self.num_thresholds - 1] + y[1:]) / 2. elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING: heights = tf.minimum(y[:self.num_thresholds - 1], y[1:]) else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING: heights = tf.maximum(y[:self.num_thresholds - 1], y[1:]) # Sum up the areas of all the rectangles. if self.multi_label: riemann_terms = tf.multiply(x[:self.num_thresholds - 1] - x[1:], heights) by_label_auc = tf.reduce_sum( riemann_terms, name=self.name + '_by_label', axis=0) if self.label_weights is None: # Unweighted average of the label AUCs. return tf.reduce_mean(by_label_auc, name=self.name) else: # Weighted average of the label AUCs. return tf.math.divide_no_nan( tf.reduce_sum( tf.multiply(by_label_auc, self.label_weights)), tf.reduce_sum(self.label_weights), name=self.name) else: return tf.reduce_sum( tf.multiply(x[:self.num_thresholds - 1] - x[1:], heights), name=self.name) def reset_state(self): if self._built: confusion_matrix_variables = (self.true_positives, self.true_negatives, self.false_positives, self.false_negatives) if self.multi_label: backend.batch_set_value( [(v, np.zeros((self.num_thresholds, self._num_labels))) for v in confusion_matrix_variables]) else: backend.batch_set_value([(v, np.zeros((self.num_thresholds,))) for v in confusion_matrix_variables]) def get_config(self): if is_tensor_or_variable(self.label_weights): label_weights = backend.eval(self.label_weights) else: label_weights = self.label_weights config = { 'num_thresholds': self.num_thresholds, 'curve': self.curve.value, 'summation_method': self.summation_method.value, 'multi_label': self.multi_label, 'label_weights': label_weights } # optimization to avoid serializing a large number of generated thresholds if self._init_from_thresholds: # We remove the endpoint thresholds as an inverse of how the thresholds # were initialized. This ensures that a metric initialized from this # config has the same thresholds. config['thresholds'] = self.thresholds[1:-1] base_config = super(AUC, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.CosineSimilarity') class CosineSimilarity(MeanMetricWrapper): """Computes the cosine similarity between the labels and predictions. `cosine similarity = (a . b) / ||a|| ||b||` See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity). This metric keeps the average cosine similarity between `predictions` and `labels` over a stream of data. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Standalone usage: >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 >>> m = tf.keras.metrics.CosineSimilarity(axis=1) >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) >>> m.result().numpy() 0.49999997 >>> m.reset_state() >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]], ... sample_weight=[0.3, 0.7]) >>> m.result().numpy() 0.6999999 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CosineSimilarity(axis=1)]) ``` """ def __init__(self, name='cosine_similarity', dtype=None, axis=-1): super(CosineSimilarity, self).__init__( cosine_similarity, name, dtype=dtype, axis=axis) @keras_export('keras.metrics.MeanAbsoluteError') class MeanAbsoluteError(MeanMetricWrapper): """Computes the mean absolute error between the labels and predictions. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsoluteError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.25 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsoluteError()]) ``` """ def __init__(self, name='mean_absolute_error', dtype=None): super(MeanAbsoluteError, self).__init__( mean_absolute_error, name, dtype=dtype) @keras_export('keras.metrics.MeanAbsolutePercentageError') class MeanAbsolutePercentageError(MeanMetricWrapper): """Computes the mean absolute percentage error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 250000000.0 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 500000000.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsolutePercentageError()]) ``` """ def __init__(self, name='mean_absolute_percentage_error', dtype=None): super(MeanAbsolutePercentageError, self).__init__( mean_absolute_percentage_error, name, dtype=dtype) @keras_export('keras.metrics.MeanSquaredError') class MeanSquaredError(MeanMetricWrapper): """Computes the mean squared error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.25 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredError()]) ``` """ def __init__(self, name='mean_squared_error', dtype=None): super(MeanSquaredError, self).__init__( mean_squared_error, name, dtype=dtype) @keras_export('keras.metrics.MeanSquaredLogarithmicError') class MeanSquaredLogarithmicError(MeanMetricWrapper): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredLogarithmicError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.12011322 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.24022643 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()]) ``` """ def __init__(self, name='mean_squared_logarithmic_error', dtype=None): super(MeanSquaredLogarithmicError, self).__init__( mean_squared_logarithmic_error, name, dtype=dtype) @keras_export('keras.metrics.Hinge') class Hinge(MeanMetricWrapper): """Computes the hinge metric between `y_true` and `y_pred`. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Hinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.3 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.1 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()]) ``` """ def __init__(self, name='hinge', dtype=None): super(Hinge, self).__init__(hinge, name, dtype=dtype) @keras_export('keras.metrics.SquaredHinge') class SquaredHinge(MeanMetricWrapper): """Computes the squared hinge metric between `y_true` and `y_pred`. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SquaredHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.86 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.46 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SquaredHinge()]) ``` """ def __init__(self, name='squared_hinge', dtype=None): super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype) @keras_export('keras.metrics.CategoricalHinge') class CategoricalHinge(MeanMetricWrapper): """Computes the categorical hinge metric between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.CategoricalHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.4000001 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.2 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CategoricalHinge()]) ``` """ def __init__(self, name='categorical_hinge', dtype=None): super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype) @keras_export('keras.metrics.RootMeanSquaredError') class RootMeanSquaredError(Mean): """Computes root mean squared error metric between `y_true` and `y_pred`. Standalone usage: >>> m = tf.keras.metrics.RootMeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.70710677 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.RootMeanSquaredError()]) ``` """ def __init__(self, name='root_mean_squared_error', dtype=None): super(RootMeanSquaredError, self).__init__(name, dtype=dtype) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates root mean squared error statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions( y_pred, y_true) error_sq = tf.math.squared_difference(y_pred, y_true) return super(RootMeanSquaredError, self).update_state( error_sq, sample_weight=sample_weight) def result(self): return tf.sqrt(tf.math.divide_no_nan(self.total, self.count)) @keras_export('keras.metrics.LogCoshError') class LogCoshError(MeanMetricWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true) Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.LogCoshError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.10844523 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.21689045 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.LogCoshError()]) ``` """ def __init__(self, name='logcosh', dtype=None): super(LogCoshError, self).__init__(logcosh, name, dtype=dtype) @keras_export('keras.metrics.Poisson') class Poisson(MeanMetricWrapper): """Computes the Poisson metric between `y_true` and `y_pred`. `metric = y_pred - y_true * log(y_pred)` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.49999997 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.99999994 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Poisson()]) ``` """ def __init__(self, name='poisson', dtype=None): super(Poisson, self).__init__(poisson, name, dtype=dtype) @keras_export('keras.metrics.KLDivergence') class KLDivergence(MeanMetricWrapper): """Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`. `metric = y_true * log(y_true / y_pred)` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.KLDivergence() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.45814306 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162892 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.KLDivergence()]) ``` """ def __init__(self, name='kullback_leibler_divergence', dtype=None): super(KLDivergence, self).__init__( kullback_leibler_divergence, name, dtype=dtype) @keras_export('keras.metrics.MeanIoU') class MeanIoU(Metric): """Computes the mean Intersection-Over-Union metric. Mean Intersection-Over-Union is a common evaluation metric for semantic image segmentation, which first computes the IOU for each semantic class and then computes the average over classes. IOU is defined as follows: IOU = true_positive / (true_positive + false_positive + false_negative). The predictions are accumulated in a confusion matrix, weighted by `sample_weight` and the metric is then calculated from it. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> # cm = [[1, 1], >>> # [1, 1]] >>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] >>> # iou = true_positives / (sum_row + sum_col - true_positives)) >>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33 >>> m = tf.keras.metrics.MeanIoU(num_classes=2) >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1]) >>> m.result().numpy() 0.33333334 >>> m.reset_state() >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1], ... sample_weight=[0.3, 0.3, 0.3, 0.1]) >>> m.result().numpy() 0.23809525 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanIoU(num_classes=2)]) ``` """ def __init__(self, num_classes, name=None, dtype=None): super(MeanIoU, self).__init__(name=name, dtype=dtype) self.num_classes = num_classes # Variable to accumulate the predictions in the confusion matrix. self.total_cm = self.add_weight( 'total_confusion_matrix', shape=(num_classes, num_classes), initializer=tf.compat.v1.zeros_initializer) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates the confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) # Flatten the input if its rank > 1. if y_pred.shape.ndims > 1: y_pred = tf.reshape(y_pred, [-1]) if y_true.shape.ndims > 1: y_true = tf.reshape(y_true, [-1]) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self._dtype) if sample_weight.shape.ndims > 1: sample_weight = tf.reshape(sample_weight, [-1]) # Accumulate the prediction to current confusion matrix. current_cm = tf.math.confusion_matrix( y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self._dtype) return self.total_cm.assign_add(current_cm) def result(self): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = tf.cast( tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype) sum_over_col = tf.cast( tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype) true_positives = tf.cast( tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype) # sum_over_row + sum_over_col = # 2 * true_positives + false_positives + false_negatives. denominator = sum_over_row + sum_over_col - true_positives # The mean is only computed over classes that appear in the # label or prediction tensor. If the denominator is 0, we need to # ignore the class. num_valid_entries = tf.reduce_sum( tf.cast(tf.not_equal(denominator, 0), dtype=self._dtype)) iou = tf.math.divide_no_nan(true_positives, denominator) return tf.math.divide_no_nan( tf.reduce_sum(iou, name='mean_iou'), num_valid_entries) def reset_state(self): backend.set_value( self.total_cm, np.zeros((self.num_classes, self.num_classes))) def get_config(self): config = {'num_classes': self.num_classes} base_config = super(MeanIoU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.metrics.MeanTensor') class MeanTensor(Metric): """Computes the element-wise (weighted) mean of the given tensors. `MeanTensor` returns a tensor with the same shape of the input tensors. The mean value is updated by keeping local variables `total` and `count`. The `total` tracks the sum of the weighted values, and `count` stores the sum of the weighted counts. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. shape: (Optional) A list of integers, a tuple of integers, or a 1-D Tensor of type int32. If not specified, the shape is inferred from the values at the first call of update_state. Standalone usage: >>> m = tf.keras.metrics.MeanTensor() >>> m.update_state([0, 1, 2, 3]) >>> m.update_state([4, 5, 6, 7]) >>> m.result().numpy() array([2., 3., 4., 5.], dtype=float32) >>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1]) >>> m.result().numpy() array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32) >>> m = tf.keras.metrics.MeanTensor(dtype=tf.float64, shape=(1, 4)) >>> m.result().numpy() array([[0., 0., 0., 0.]]) >>> m.update_state([[0, 1, 2, 3]]) >>> m.update_state([[4, 5, 6, 7]]) >>> m.result().numpy() array([[2., 3., 4., 5.]]) """ def __init__(self, name='mean_tensor', dtype=None, shape=None): super(MeanTensor, self).__init__(name=name, dtype=dtype) self._shape = None self._total = None self._count = None self._built = False if shape is not None: self._build(shape) def _build(self, shape): self._shape = tf.TensorShape(shape) self._build_input_shape = self._shape # Create new state variables self._total = self.add_weight( 'total', shape=shape, initializer=tf.compat.v1.zeros_initializer) self._count = self.add_weight( 'count', shape=shape, initializer=tf.compat.v1.zeros_initializer) with tf.init_scope(): if not tf.executing_eagerly(): backend._initialize_variables(backend._get_session()) # pylint: disable=protected-access self._built = True @property def total(self): return self._total if self._built else None @property def count(self): return self._count if self._built else None def update_state(self, values, sample_weight=None): """Accumulates statistics for computing the element-wise mean. Args: values: Per-example value. sample_weight: Optional weighting of each example. Defaults to 1. Returns: Update op. """ values = tf.cast(values, self._dtype) if not self._built: self._build(values.shape) elif values.shape != self._shape: raise ValueError( 'MeanTensor input values must always have the same ' f'shape. Expected shape (set during the first call): {self._shape}. ' f'Got: {values.shape}.') num_values = tf.ones_like(values) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self._dtype) # Update dimensions of weights to match with values if possible. values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions( values, sample_weight=sample_weight) try: # Broadcast weights if possible. sample_weight = tf.__internal__.ops.broadcast_weights( sample_weight, values) except ValueError: # Reduce values to same ndim as weight array ndim = backend.ndim(values) weight_ndim = backend.ndim(sample_weight) values = tf.reduce_mean( values, axis=list(range(weight_ndim, ndim))) num_values = tf.multiply(num_values, sample_weight) values = tf.multiply(values, sample_weight) update_total_op = self._total.assign_add(values) with tf.control_dependencies([update_total_op]): return self._count.assign_add(num_values) def result(self): if not self._built: raise ValueError( 'MeanTensor does not have any value yet. Please call the MeanTensor ' 'instance or use `.update_state(value)` before retrieving the result.' ) return tf.math.divide_no_nan(self.total, self.count) def reset_state(self): if self._built: backend.batch_set_value([ (v, np.zeros(v.shape.as_list())) for v in self.variables ]) @keras_export('keras.metrics.BinaryCrossentropy') class BinaryCrossentropy(MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are only two label classes (0 and 1). Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional )Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`". Standalone usage: >>> m = tf.keras.metrics.BinaryCrossentropy() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.81492424 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162905 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.BinaryCrossentropy()]) ``` """ def __init__(self, name='binary_crossentropy', dtype=None, from_logits=False, label_smoothing=0): super(BinaryCrossentropy, self).__init__( binary_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing) @keras_export('keras.metrics.CategoricalCrossentropy') class CategoricalCrossentropy(MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are multiple label classes (2 or more). Here we assume that labels are given as a `one_hot` representation. eg., When labels values are [2, 0, 1], `y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`" Standalone usage: >>> # EPSILON = 1e-7, y = y_true, y` = y_pred >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(y'), axis = -1) >>> # = -((log 0.95), (log 0.1)) >>> # = [0.051, 2.302] >>> # Reduced xent = (0.051 + 2.302) / 2 >>> m = tf.keras.metrics.CategoricalCrossentropy() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result().numpy() 1.1769392 >>> m.reset_state() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> m.result().numpy() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CategoricalCrossentropy()]) ``` """ def __init__(self, name='categorical_crossentropy', dtype=None, from_logits=False, label_smoothing=0): super(CategoricalCrossentropy, self).__init__( categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing) @keras_export('keras.metrics.SparseCategoricalCrossentropy') class SparseCategoricalCrossentropy(MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. Use this crossentropy metric when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` metric. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. axis: (Optional) Defaults to -1. The dimension along which the metric is computed. Standalone usage: >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] >>> # logits = log(y_pred) >>> # softmax = exp(logits) / sum(exp(logits), axis=-1) >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(softmax), 1) >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181], >>> # [-2.3026, -0.2231, -2.3026]] >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] >>> # xent = [0.0513, 2.3026] >>> # Reduced xent = (0.0513 + 2.3026) / 2 >>> m = tf.keras.metrics.SparseCategoricalCrossentropy() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result().numpy() 1.1769392 >>> m.reset_state() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> m.result().numpy() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()]) ``` """ def __init__(self, name='sparse_categorical_crossentropy', dtype=None, from_logits=False, axis=-1): super(SparseCategoricalCrossentropy, self).__init__( sparse_categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, axis=axis) SparseCategoricalCrossentropy.update_state.__doc__ = _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING class SumOverBatchSize(Reduce): """Computes the weighted sum over batch size of the given values. For example, if values is [1, 3, 5, 7] then the metric value is 4. If the weights were specified as [1, 1, 0, 0] then the value would be 1. This metric creates two variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as sum over batch size which is an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. """ def __init__(self, name='sum_over_batch_size', dtype=None): super(SumOverBatchSize, self).__init__( reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE, name=name, dtype=dtype) class SumOverBatchSizeMetricWrapper(SumOverBatchSize): """Wraps a function with the `SumOverBatchSizeMetricWrapper` metric.""" def __init__(self, fn, name=None, dtype=None, **kwargs): """Creates a `SumOverBatchSizeMetricWrapper` instance. Args: fn: The metric function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: The keyword arguments that are passed on to `fn`. """ super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype) self._fn = fn self._fn_kwargs = kwargs def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions( y_pred, y_true) ag_fn = tf.__internal__.autograph.tf_convert(self._fn, tf.__internal__.autograph.control_status_ctx()) matches = ag_fn(y_true, y_pred, **self._fn_kwargs) return super(SumOverBatchSizeMetricWrapper, self).update_state( matches, sample_weight=sample_weight) def get_config(self): config = {} for k, v in self._fn_kwargs.items(): config[k] = backend.eval(v) if is_tensor_or_variable(v) else v base_config = super(SumOverBatchSizeMetricWrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) def accuracy(y_true, y_pred): [y_pred, y_true], _ = \ metrics_utils.ragged_assert_compatible_and_get_flat_values( [y_pred, y_true]) y_true.shape.assert_is_compatible_with(y_pred.shape) if y_true.dtype != y_pred.dtype: y_pred = tf.cast(y_pred, y_true.dtype) return tf.cast(tf.equal(y_true, y_pred), backend.floatx()) @keras_export('keras.metrics.binary_accuracy') @tf.__internal__.dispatch.add_dispatch_support def binary_accuracy(y_true, y_pred, threshold=0.5): """Calculates how often predictions match binary labels. Standalone usage: >>> y_true = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]] >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> assert m.shape == (4,) >>> m.numpy() array([1., 1., 1., 1.], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Returns: Binary accuracy values. shape = `[batch_size, d0, .. dN-1]` """ y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return backend.mean(tf.equal(y_true, y_pred), axis=-1) @keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support def categorical_accuracy(y_true, y_pred): """Calculates how often predictions match one-hot labels. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. Args: y_true: One-hot ground truth values. y_pred: The prediction values. Returns: Categorical accuracy values. """ return tf.cast( tf.equal( tf.compat.v1.argmax(y_true, axis=-1), tf.compat.v1.argmax(y_pred, axis=-1)), backend.floatx()) @keras_export('keras.metrics.sparse_categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support def sparse_categorical_accuracy(y_true, y_pred): """Calculates how often predictions match integer labels. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. Args: y_true: Integer ground truth values. y_pred: The prediction values. Returns: Sparse categorical accuracy values. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.convert_to_tensor(y_true) y_pred_rank = y_pred.shape.ndims y_true_rank = y_true.shape.ndims # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None) and (len( backend.int_shape(y_true)) == len(backend.int_shape(y_pred))): y_true = tf.squeeze(y_true, [-1]) y_pred = tf.compat.v1.argmax(y_pred, axis=-1) # If the predicted output and actual output types don't match, force cast them # to match. if backend.dtype(y_pred) != backend.dtype(y_true): y_pred = tf.cast(y_pred, backend.dtype(y_true)) return tf.cast(tf.equal(y_true, y_pred), backend.floatx()) @keras_export('keras.metrics.top_k_categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support def top_k_categorical_accuracy(y_true, y_pred, k=5): """Computes how often targets are in the top `K` predictions. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: The ground truth values. y_pred: The prediction values. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Top K categorical accuracy value. """ return tf.cast( tf.compat.v1.math.in_top_k( y_pred, tf.compat.v1.argmax(y_true, axis=-1), k), backend.floatx()) @keras_export('keras.metrics.sparse_top_k_categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): """Computes how often integer targets are in the top `K` predictions. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy( ... y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Sparse top K categorical accuracy value. """ y_pred_rank = tf.convert_to_tensor(y_pred).shape.ndims y_true_rank = tf.convert_to_tensor(y_true).shape.ndims # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None): if y_pred_rank > 2: y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]]) if y_true_rank > 1: y_true = tf.reshape(y_true, [-1]) return tf.cast( tf.compat.v1.math.in_top_k(y_pred, tf.cast(y_true, 'int32'), k), backend.floatx()) def cosine_proximity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. Args: y_true: The ground truth values. y_pred: The prediction values. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Returns: Cosine similarity value. """ y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred, axis=axis) return tf.reduce_sum(y_true * y_pred, axis=axis) # Aliases acc = ACC = accuracy bce = BCE = binary_crossentropy mse = MSE = mean_squared_error mae = MAE = mean_absolute_error mape = MAPE = mean_absolute_percentage_error msle = MSLE = mean_squared_logarithmic_error cosine_similarity = cosine_proximity log_cosh = logcosh def clone_metric(metric): """Returns a clone of the metric if stateful, otherwise returns it as is.""" if isinstance(metric, Metric): with tf.init_scope(): return metric.__class__.from_config(metric.get_config()) return metric def clone_metrics(metrics): """Clones the given metric list/dict.""" return tf.nest.map_structure(clone_metric, metrics) @keras_export('keras.metrics.serialize') def serialize(metric): """Serializes metric function or `Metric` instance. Args: metric: A Keras `Metric` instance or a metric function. Returns: Metric configuration dictionary. """ return serialize_keras_object(metric) @keras_export('keras.metrics.deserialize') def deserialize(config, custom_objects=None): """Deserializes a serialized metric class/function instance. Args: config: Metric configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `Metric` instance or a metric function. """ return deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='metric function') @keras_export('keras.metrics.get') def get(identifier): """Retrieves a Keras metric as a `function`/`Metric` class instance. The `identifier` may be the string name of a metric function or class. >>> metric = tf.keras.metrics.get("categorical_crossentropy") >>> type(metric) <class 'function'> >>> metric = tf.keras.metrics.get("CategoricalCrossentropy") >>> type(metric) <class '...keras.metrics.CategoricalCrossentropy'> You can also specify `config` of the metric to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Metric` class >>> identifier = {"class_name": "CategoricalCrossentropy", ... "config": {"from_logits": True}} >>> metric = tf.keras.metrics.get(identifier) >>> type(metric) <class '...keras.metrics.CategoricalCrossentropy'> Args: identifier: A metric identifier. One of None or string name of a metric function/class or metric configuration dictionary or a metric function or a metric class instance Returns: A Keras metric as a `function`/ `Metric` class instance. Raises: ValueError: If `identifier` cannot be interpreted. """ if isinstance(identifier, dict): return deserialize(identifier) elif isinstance(identifier, str): return deserialize(str(identifier)) elif callable(identifier): return identifier else: raise ValueError( f'Could not interpret metric identifier: {identifier}') def is_built_in(cls): return cls.__module__ == Metric.__module__
133,570
34.215133
106
py
keras
keras-master/keras/activations.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in activation functions.""" import tensorflow.compat.v2 as tf from keras import backend from keras.layers import advanced_activations from keras.utils.generic_utils import deserialize_keras_object from keras.utils.generic_utils import serialize_keras_object from tensorflow.python.util.tf_export import keras_export # b/123041942 # In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras # layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the # internal method name is returned in serialization. This results in errors in # model exporting and loading as Keras can't find any activation function with # the name of `softmax_v2`. # This dict maps the activation function name from its v2 version to its # canonical name. _TF_ACTIVATIONS_V2 = { 'softmax_v2': 'softmax', } @keras_export('keras.activations.softmax') @tf.__internal__.dispatch.add_dispatch_support def softmax(x, axis=-1): """Softmax converts a vector of values to a probability distribution. The elements of the output vector are in range (0, 1) and sum to 1. Each vector is handled independently. The `axis` argument sets which axis of the input the function is applied along. Softmax is often used as the activation for the last layer of a classification network because the result could be interpreted as a probability distribution. The softmax of each vector x is computed as `exp(x) / tf.reduce_sum(exp(x))`. The input values in are the log-odds of the resulting probability. Args: x : Input tensor. axis: Integer, axis along which the softmax normalization is applied. Returns: Tensor, output of softmax transformation (all values are non-negative and sum to 1). Examples: **Example 1: standalone usage** >>> inputs = tf.random.normal(shape=(32, 10)) >>> outputs = tf.keras.activations.softmax(inputs) >>> tf.reduce_sum(outputs[0, :]) # Each sample in the batch now sums to 1 <tf.Tensor: shape=(), dtype=float32, numpy=1.0000001> **Example 2: usage in a `Dense` layer** >>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax) """ if x.shape.rank > 1: if isinstance(axis, int): output = tf.nn.softmax(x, axis=axis) else: # nn.softmax does not support tuple axis. e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True)) s = tf.reduce_sum(e, axis=axis, keepdims=True) output = e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D. ' f'Received input: {x}') # Cache the logits to use for crossentropy loss. output._keras_logits = x # pylint: disable=protected-access return output @keras_export('keras.activations.elu') @tf.__internal__.dispatch.add_dispatch_support def elu(x, alpha=1.0): """Exponential Linear Unit. The exponential linear unit (ELU) with `alpha > 0` is: `x` if `x > 0` and `alpha * (exp(x) - 1)` if `x < 0` The ELU hyperparameter `alpha` controls the value to which an ELU saturates for negative net inputs. ELUs diminish the vanishing gradient effect. ELUs have negative values which pushes the mean of the activations closer to zero. Mean activations that are closer to zero enable faster learning as they bring the gradient closer to the natural gradient. ELUs saturate to a negative value when the argument gets smaller. Saturation means a small derivative which decreases the variation and the information that is propagated to the next layer. Example Usage: >>> import tensorflow as tf >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu', ... input_shape=(28, 28, 1))) >>> model.add(tf.keras.layers.MaxPooling2D((2, 2))) >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu')) >>> model.add(tf.keras.layers.MaxPooling2D((2, 2))) >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu')) <tensorflow.python.keras.engine.sequential.Sequential object ...> Args: x: Input tensor. alpha: A scalar, slope of negative section. `alpha` controls the value to which an ELU saturates for negative net inputs. Returns: The exponential linear unit (ELU) activation function: `x` if `x > 0` and `alpha * (exp(x) - 1)` if `x < 0`. Reference: [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289) """ return backend.elu(x, alpha) @keras_export('keras.activations.selu') @tf.__internal__.dispatch.add_dispatch_support def selu(x): """Scaled Exponential Linear Unit (SELU). The Scaled Exponential Linear Unit (SELU) activation function is defined as: - `if x > 0: return scale * x` - `if x < 0: return scale * alpha * (exp(x) - 1)` where `alpha` and `scale` are pre-defined constants (`alpha=1.67326324` and `scale=1.05070098`). Basically, the SELU activation function multiplies `scale` (> 1) with the output of the `tf.keras.activations.elu` function to ensure a slope larger than one for positive inputs. The values of `alpha` and `scale` are chosen so that the mean and variance of the inputs are preserved between two consecutive layers as long as the weights are initialized correctly (see `tf.keras.initializers.LecunNormal` initializer) and the number of input units is "large enough" (see reference paper for more information). Example Usage: >>> num_classes = 10 # 10-class problem >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) Args: x: A tensor or variable to compute the activation function for. Returns: The scaled exponential unit activation: `scale * elu(x, alpha)`. Notes: - To be used together with the `tf.keras.initializers.LecunNormal` initializer. - To be used together with the dropout variant `tf.keras.layers.AlphaDropout` (not regular dropout). References: - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515) """ return tf.nn.selu(x) @keras_export('keras.activations.softplus') @tf.__internal__.dispatch.add_dispatch_support def softplus(x): """Softplus activation function, `softplus(x) = log(exp(x) + 1)`. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.softplus(a) >>> b.numpy() array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The softplus activation: `log(exp(x) + 1)`. """ return tf.math.softplus(x) @keras_export('keras.activations.softsign') @tf.__internal__.dispatch.add_dispatch_support def softsign(x): """Softsign activation function, `softsign(x) = x / (abs(x) + 1)`. Example Usage: >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.softsign(a) >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) Args: x: Input tensor. Returns: The softsign activation: `x / (abs(x) + 1)`. """ return tf.math.softsign(x) @keras_export('keras.activations.swish') @tf.__internal__.dispatch.add_dispatch_support def swish(x): """Swish activation function, `swish(x) = x * sigmoid(x)`. Swish activation function which returns `x*sigmoid(x)`. It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.swish(a) >>> b.numpy() array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The swish activation applied to `x` (see reference paper for details). Reference: - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941) """ return tf.nn.silu(x) @keras_export('keras.activations.relu') @tf.__internal__.dispatch.add_dispatch_support def relu(x, alpha=0., max_value=None, threshold=0.): """Applies the rectified linear unit activation function. With default values, this returns the standard ReLU activation: `max(x, 0)`, the element-wise maximum of 0 and the input tensor. Modifying default parameters allows you to use non-zero thresholds, change the max value of the activation, and to use a non-zero multiple of the input for values below the threshold. For example: >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32) >>> tf.keras.activations.relu(foo).numpy() array([ 0., 0., 0., 5., 10.], dtype=float32) >>> tf.keras.activations.relu(foo, alpha=0.5).numpy() array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32) >>> tf.keras.activations.relu(foo, max_value=5.).numpy() array([0., 0., 0., 5., 5.], dtype=float32) >>> tf.keras.activations.relu(foo, threshold=5.).numpy() array([-0., -0., 0., 0., 10.], dtype=float32) Args: x: Input `tensor` or `variable`. alpha: A `float` that governs the slope for values lower than the threshold. max_value: A `float` that sets the saturation threshold (the largest value the function will return). threshold: A `float` giving the threshold value of the activation function below which values will be damped or set to zero. Returns: A `Tensor` representing the input tensor, transformed by the relu activation function. Tensor will be of the same shape and dtype of input `x`. """ return backend.relu(x, alpha=alpha, max_value=max_value, threshold=threshold) @keras_export('keras.activations.gelu', v1=[]) @tf.__internal__.dispatch.add_dispatch_support def gelu(x, approximate=False): """Applies the Gaussian error linear unit (GELU) activation function. Gaussian error linear unit (GELU) computes `x * P(X <= x)`, where `P(X) ~ N(0, 1)`. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU. For example: >>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32) >>> y = tf.keras.activations.gelu(x) >>> y.numpy() array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ], dtype=float32) >>> y = tf.keras.activations.gelu(x, approximate=True) >>> y.numpy() array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ], dtype=float32) Args: x: Input tensor. approximate: A `bool`, whether to enable approximation. Returns: The gaussian error linear activation: `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` if `approximate` is `True` or `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where `P(X) ~ N(0, 1)`, if `approximate` is `False`. Reference: - [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415) """ return tf.nn.gelu(x, approximate) @keras_export('keras.activations.tanh') @tf.__internal__.dispatch.add_dispatch_support def tanh(x): """Hyperbolic tangent activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.tanh(a) >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) Args: x: Input tensor. Returns: Tensor of same shape and dtype of input `x`, with tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`. """ return tf.tanh(x) @keras_export('keras.activations.sigmoid') @tf.__internal__.dispatch.add_dispatch_support def sigmoid(x): """Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`. Applies the sigmoid activation function. For small values (<-5), `sigmoid` returns a value close to zero, and for large values (>5) the result of the function gets close to 1. Sigmoid is equivalent to a 2-element Softmax, where the second element is assumed to be zero. The sigmoid function always returns a value between 0 and 1. For example: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.sigmoid(a) >>> b.numpy() array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01, 1.0000000e+00], dtype=float32) Args: x: Input tensor. Returns: Tensor with the sigmoid activation: `1 / (1 + exp(-x))`. """ output = tf.sigmoid(x) # Cache the logits to use for crossentropy loss. output._keras_logits = x # pylint: disable=protected-access return output @keras_export('keras.activations.exponential') @tf.__internal__.dispatch.add_dispatch_support def exponential(x): """Exponential activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.exponential(a) >>> b.numpy() array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) Args: x: Input tensor. Returns: Tensor with exponential activation: `exp(x)`. """ return tf.exp(x) @keras_export('keras.activations.hard_sigmoid') @tf.__internal__.dispatch.add_dispatch_support def hard_sigmoid(x): """Hard sigmoid activation function. A faster approximation of the sigmoid activation. Piecewise linear approximation of the sigmoid function. Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid' For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.hard_sigmoid(a) >>> b.numpy() array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32) Args: x: Input tensor. Returns: The hard sigmoid activation, defined as: - `if x < -2.5: return 0` - `if x > 2.5: return 1` - `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5` """ return backend.hard_sigmoid(x) @keras_export('keras.activations.linear') @tf.__internal__.dispatch.add_dispatch_support def linear(x): """Linear activation function (pass-through). For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.linear(a) >>> b.numpy() array([-3., -1., 0., 1., 3.], dtype=float32) Args: x: Input tensor. Returns: The input, unmodified. """ return x @keras_export('keras.activations.serialize') @tf.__internal__.dispatch.add_dispatch_support def serialize(activation): """Returns the string identifier of an activation function. Args: activation : Function object. Returns: String denoting the name attribute of the input function For example: >>> tf.keras.activations.serialize(tf.keras.activations.tanh) 'tanh' >>> tf.keras.activations.serialize(tf.keras.activations.sigmoid) 'sigmoid' >>> tf.keras.activations.serialize('abcd') Traceback (most recent call last): ... ValueError: ('Cannot serialize', 'abcd') Raises: ValueError: The input function is not a valid one. """ if (hasattr(activation, '__name__') and activation.__name__ in _TF_ACTIVATIONS_V2): return _TF_ACTIVATIONS_V2[activation.__name__] return serialize_keras_object(activation) # Add additional globals so that deserialize can find these common activation # functions leaky_relu = tf.nn.leaky_relu log_softmax = tf.nn.log_softmax relu6 = tf.nn.relu6 silu = tf.nn.silu @keras_export('keras.activations.deserialize') @tf.__internal__.dispatch.add_dispatch_support def deserialize(name, custom_objects=None): """Returns activation function given a string identifier. Args: name: The name of the activation function. custom_objects: Optional `{function_name: function_obj}` dictionary listing user-provided activation functions. Returns: Corresponding activation function. For example: >>> tf.keras.activations.deserialize('linear') <function linear at 0x1239596a8> >>> tf.keras.activations.deserialize('sigmoid') <function sigmoid at 0x123959510> >>> tf.keras.activations.deserialize('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Raises: ValueError: `Unknown activation function` if the input string does not denote any defined Tensorflow activation function. """ globs = globals() # only replace missing activations advanced_activations_globs = advanced_activations.get_globals() for key, val in advanced_activations_globs.items(): if key not in globs: globs[key] = val return deserialize_keras_object( name, module_objects=globs, custom_objects=custom_objects, printable_module_name='activation function') @keras_export('keras.activations.get') @tf.__internal__.dispatch.add_dispatch_support def get(identifier): """Returns function. Args: identifier: Function or string Returns: Function corresponding to the input string or input function. For example: >>> tf.keras.activations.get('softmax') <function softmax at 0x1222a3d90> >>> tf.keras.activations.get(tf.keras.activations.softmax) <function softmax at 0x1222a3d90> >>> tf.keras.activations.get(None) <function linear at 0x1239596a8> >>> tf.keras.activations.get(abs) <built-in function abs> >>> tf.keras.activations.get('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Raises: ValueError: Input is an unknown function or string, i.e., the input does not denote any defined function. """ if identifier is None: return linear if isinstance(identifier, str): identifier = str(identifier) return deserialize(identifier) elif isinstance(identifier, dict): return deserialize(identifier) elif callable(identifier): return identifier else: raise TypeError( f'Could not interpret activation function identifier: {identifier}')
19,055
30.549669
80
py
keras
keras-master/keras/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of the Keras API, the high-level API of TensorFlow. Detailed documentation and user guides are available at [keras.io](https://keras.io). """ # pylint: disable=unused-import from tensorflow.python import tf2 from keras import distribute # See b/110718070#comment18 for more details about this import. from keras import models from keras.engine.input_layer import Input from keras.engine.sequential import Sequential from keras.engine.training import Model from tensorflow.python.util.tf_export import keras_export __version__ = '2.7.0' keras_export('keras.__version__').export_constant(__name__, '__version__')
1,324
35.805556
80
py
keras
keras-master/keras/metrics_functional_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras metrics functions.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import backend from keras import combinations from keras import metrics class KerasFunctionalMetricsTest(tf.test.TestCase, parameterized.TestCase): def test_metrics(self): with self.cached_session(): y_a = backend.variable(np.random.random((6, 7))) y_b = backend.variable(np.random.random((6, 7))) for metric in [metrics.binary_accuracy, metrics.categorical_accuracy]: output = metric(y_a, y_b) self.assertEqual(backend.eval(output).shape, (6,)) def test_sparse_categorical_accuracy_int(self): with self.cached_session(): metric = metrics.sparse_categorical_accuracy y_true = backend.variable(np.random.randint(0, 7, (6,))) y_pred = backend.variable(np.random.random((6, 7))) self.assertEqual(backend.eval(metric(y_true, y_pred)).shape, (6,)) # Test correctness if the shape of y_true is (num_samples,) y_true = backend.variable([1., 0., 0., 0.]) y_pred = backend.variable( [[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]]) self.assertAllEqual( backend.eval(metric(y_true, y_pred)), [0., 1., 1., 1.]) # Test correctness if the shape of y_true is (num_samples, 1) y_true = backend.variable([[1.], [0.], [0.], [0.]]) y_pred = backend.variable( [[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]]) self.assertAllEqual( backend.eval(metric(y_true, y_pred)), [0., 1., 1., 1.]) # Test correctness if the shape of y_true is (batch_size, seq_length) and # y_pred is (batch_size, seq_length, num_classes) y_pred = backend.variable( np.array([[[0.2, 0.3, 0.1], [0.1, 0.2, 0.7]], [[0.3, 0.2, 0.1], [0.7, 0.2, 0.1]]])) y_true = backend.variable(np.array([[1, 0], [1, 0]])) self.assertAllEqual( backend.eval(metric(y_true, y_pred)), [[1., 0.], [0., 1.]]) def test_sparse_categorical_accuracy_float(self): with self.cached_session(): metric = metrics.sparse_categorical_accuracy y_true = backend.variable(np.random.random((6,))) y_pred = backend.variable(np.random.random((6, 7))) self.assertEqual(backend.eval(metric(y_true, y_pred)).shape, (6,)) @combinations.generate(combinations.combine(mode=['eager'])) def test_sparse_categorical_accuracy_eager(self): """Tests that ints passed in via Eager return results. See b/113504761.""" metric = metrics.sparse_categorical_accuracy y_true = np.arange(6).reshape([6, 1]) y_pred = np.arange(36).reshape([6, 6]) self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.]) @combinations.generate(combinations.combine(mode=['eager'])) def test_sparse_categorical_accuracy_float_eager(self): """Tests that floats passed in via Eager return results. See b/113504761.""" metric = metrics.sparse_categorical_accuracy y_true = np.arange(6, dtype=np.float32).reshape([6, 1]) y_pred = np.arange(36).reshape([6, 6]) self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.]) def test_sparse_top_k_categorical_accuracy(self): with self.cached_session(): # Test correctness if the shape of y_true is (num_samples, 1) y_pred = backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])) y_true = backend.variable(np.array([[1], [0]])) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3)) self.assertEqual(np.mean(result), 1) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2)) self.assertEqual(np.mean(result), 0.5) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1)) self.assertEqual(np.mean(result), 0.) # Test correctness if the shape of y_true is (num_samples,) y_pred = backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])) y_true = backend.variable(np.array([1, 0])) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3)) self.assertEqual(np.mean(result), 1) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2)) self.assertEqual(np.mean(result), 0.5) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1)) self.assertEqual(np.mean(result), 0.) # Test correctness if the shape of y_true is (batch_size, seq_length) and # y_pred is (batch_size, seq_length, num_classes) y_pred = backend.variable( np.array([[[0.3, 0.2, 0.1], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]], [[0.3, 0.2, 0.1], [0.1, 0.2, 0.7], [0.3, 0.2, 0.1]]])) y_true = backend.variable(np.array([[1, 0, 0], [1, 0, 1]])) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3)) self.assertEqual(np.mean(result), 1) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2)) self.assertEqual(np.mean(result), 0.5) result = backend.eval( metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1)) self.assertEqual(np.mean(result), 0.) def test_top_k_categorical_accuracy(self): with self.cached_session(): y_pred = backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])) y_true = backend.variable(np.array([[0, 1, 0], [1, 0, 0]])) result = backend.eval( metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)) self.assertEqual(np.mean(result), 1) result = backend.eval( metrics.top_k_categorical_accuracy(y_true, y_pred, k=2)) self.assertEqual(np.mean(result), 0.5) result = backend.eval( metrics.top_k_categorical_accuracy(y_true, y_pred, k=1)) self.assertEqual(np.mean(result), 0.) if __name__ == '__main__': tf.test.main()
6,740
43.642384
80
py
keras
keras-master/keras/metrics_correctness_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests metrics correctness using Keras model.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import keras_parameterized from keras import layers from keras import losses from keras import metrics from keras import testing_utils from keras.utils import losses_utils def get_multi_io_model(): inp_1 = layers.Input(shape=(1,), name='input_1') inp_2 = layers.Input(shape=(1,), name='input_2') x = layers.Dense(3, kernel_initializer='ones', trainable=False) out_1 = layers.Dense( 1, kernel_initializer='ones', name='output_1', trainable=False) out_2 = layers.Dense( 1, kernel_initializer='ones', name='output_2', trainable=False) branch_a = [inp_1, x, out_1] branch_b = [inp_2, x, out_2] return testing_utils.get_multi_io_model(branch_a, branch_b) def custom_generator_multi_io(sample_weights=None): batch_size = 2 num_samples = 5 inputs = np.asarray([[1.], [2.], [3.], [4.], [5.]]) targets_1 = np.asarray([[2.], [4.], [6.], [8.], [10.]]) targets_2 = np.asarray([[1.], [2.], [3.], [4.], [5.]]) start = 0 while True: if start > num_samples: start = 0 end = start + batch_size x = [inputs[start:end], inputs[start:end]] y = [targets_1[start:end], targets_2[start:end]] if sample_weights: sw = tf.nest.map_structure(lambda w: w[start:end], sample_weights) else: sw = None start = end yield x, y, sw @keras_parameterized.run_with_all_model_types(exclude_models=['sequential']) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase): def _get_compiled_multi_io_model(self): model = get_multi_io_model() model.compile( optimizer='rmsprop', loss='mse', metrics=[metrics.MeanSquaredError(name='mean_squared_error')], weighted_metrics=[ metrics.MeanSquaredError(name='mean_squared_error_2') ], run_eagerly=testing_utils.should_run_eagerly()) return model def setUp(self): super(TestMetricsCorrectnessMultiIO, self).setUp() self.x = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.y1 = np.asarray([[2.], [4.], [6.], [8.], [10.]]) self.y2 = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.sample_weight_1 = np.asarray([2., 3., 4., 5., 6.]) self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5, 3.]) # y_true_1 = [[2.], [4.], [6.], [8.], [10.]] # y_pred_1 = [[3.], [6.], [9.], [12.], [15.]] # y_true_2 = [[1.], [2.], [3.], [4.], [5.]] # y_pred_2 = [[3.], [6.], [9.], [12.], [15.]] # Weighted metric `output_1`: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) + # ((15 - 10)^2 * 6) # = 280 # Count = (2 + 3) + (4 + 5) + 6 = 20 # Result = 14 # Weighted metric `output_2`: # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) + # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5) + # (15 - 5)^2 * 3.0 # = 440 # Count = (3.5 + 2.5) + (1.5 + 0.5) + 3.0 = 11.0 # Result = 40 # Loss `output_1` with weights: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) + # ((15 - 10)^2 * 6) # = 280 # Count = 2 + 2 + 1 # Result = 56 # Loss `output_1` without weights/Metric `output_1`: # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) + (15 - 10)^2 # = 55 # Count = 2 + 2 + 1 # Result = 11 # Loss `output_2` with weights: # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) + # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5) + # (15 - 5)^2 * 3.0 # = 440 # Count = 2 + 2 + 1 # Result = 88 # Loss `output_2` without weights/Metric `output_2`: # Total = ((3 - 1)^2 + (6 - 2)^2) + ((9 - 3)^2 + (12 - 4)^2) + (15 - 5)^2 # = 220 # Count = 2 + 2 + 1 # Result = 44 # Total loss with weights = 56 + 88 = 144 # Total loss without weights = 11 + 44 = 55 self.wmse = 'mean_squared_error_2' self.expected_fit_result_with_weights = { 'output_1_mean_squared_error': [11, 11], 'output_2_mean_squared_error': [44, 44], 'output_1_' + self.wmse: [14, 14], 'output_2_' + self.wmse: [40, 40], 'loss': [144, 144], 'output_1_loss': [56, 56], 'output_2_loss': [88, 88], } self.expected_fit_result_with_weights_output_2 = { 'output_1_mean_squared_error': [11, 11], 'output_2_mean_squared_error': [44, 44], 'output_1_' + self.wmse: [11, 11], 'output_2_' + self.wmse: [40, 40], 'loss': [99, 99], 'output_1_loss': [11, 11], 'output_2_loss': [88, 88], } self.expected_fit_result = { 'output_1_mean_squared_error': [11, 11], 'output_2_mean_squared_error': [44, 44], 'output_1_' + self.wmse: [11, 11], 'output_2_' + self.wmse: [44, 44], 'loss': [55, 55], 'output_1_loss': [11, 11], 'output_2_loss': [44, 44], } # In the order: 'loss', 'output_1_loss', 'output_2_loss', # 'output_1_mean_squared_error', 'output_1_mean_squared_error_2', # 'output_2_mean_squared_error', 'output_2_mean_squared_error_2' self.expected_batch_result_with_weights = [144, 56, 88, 11, 14, 44, 40] self.expected_batch_result_with_weights_output_2 = [ 99, 11, 88, 11, 11, 44, 40 ] self.expected_batch_result = [55, 11, 44, 11, 11, 44, 44] def test_fit(self): model = self._get_compiled_multi_io_model() history = model.fit([self.x, self.x], [self.y1, self.y2], batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_sample_weight(self): model = self._get_compiled_multi_io_model() history = model.fit([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) # Set weights for one output (use batch size). history = model.fit([self.x, self.x], [self.y1, self.y2], sample_weight={'output_2': self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_with_sample_weight(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. model = self._get_compiled_multi_io_model() eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ 'output_2': self.sample_weight_2, }) self.assertAllClose(eval_result, self.expected_batch_result_with_weights_output_2, 1e-3) # Verify that metric value is same with arbitrary weights and batch size. x = np.random.random((50, 1)) y = np.random.random((50, 1)) w = np.random.random((50,)) mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=5)[3] mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=10)[3] self.assertAllClose(mse1, mse2, 1e-3) def test_train_on_batch(self): model = self._get_compiled_multi_io_model() result = model.train_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_train_on_batch_with_sample_weight(self): model = self._get_compiled_multi_io_model() result = model.train_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. result = model.train_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights_output_2, 1e-3) def test_test_on_batch(self): model = self._get_compiled_multi_io_model() result = model.test_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_test_on_batch_with_sample_weight(self): model = self._get_compiled_multi_io_model() result = model.test_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. result = model.test_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights_output_2, 1e-3) def test_fit_generator(self): model = self._get_compiled_multi_io_model() history = model.fit_generator( custom_generator_multi_io(), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_sample_weight(self): model = self._get_compiled_multi_io_model() history = model.fit_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) # Set weights for one output. history = model.fit_generator( custom_generator_multi_io( sample_weights={'output_2': self.sample_weight_2}), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval_generator(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=3) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_generator_with_sample_weight(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps=3) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights={'output_2': self.sample_weight_2}), steps=3) self.assertAllClose(eval_result, self.expected_batch_result_with_weights_output_2, 1e-3) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase): def _get_model(self): x = layers.Dense(3, kernel_initializer='ones', trainable=False) out = layers.Dense( 1, kernel_initializer='ones', name='output', trainable=False) model = testing_utils.get_model_from_layers([x, out], input_shape=(1,)) model.compile( optimizer='rmsprop', loss='mse', metrics=[metrics.MeanSquaredError(name='mean_squared_error')], weighted_metrics=[ metrics.MeanSquaredError(name='mean_squared_error_2') ], run_eagerly=testing_utils.should_run_eagerly()) return model def _custom_generator(self, sample_weight=None): batch_size = 2 num_samples = 4 x = np.asarray([[1.], [2.], [3.], [4.]]) y = np.asarray([[2.], [4.], [6.], [8.]]) w = sample_weight i = 0 while True: batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + batch_size yield x[start:end], y[start:end], None if w is None else w[start:end] def setUp(self): super(TestMetricsCorrectnessSingleIO, self).setUp() self.x = np.asarray([[1.], [2.], [3.], [4.]]) self.y = np.asarray([[2.], [4.], [6.], [8.]]) self.sample_weight = np.asarray([2., 3., 4., 5.]) self.class_weight = {i: 1 for i in range(10)} self.class_weight.update({2: 2, 4: 3, 6: 4, 8: 5}) # y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]] # Metric: # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30, # Count = 2 + 2 # Result = 7.5 # Weighted metric: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) # = 130 # Count = (2 + 3) + (4 + 5) # Result = 9.2857141 # Total loss with weights: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) # = 130, # Count = 2 + 2 # Result = 32.5 # Total loss without weights: # Total = ((3 - 2)^2 + (6 - 4)^2) + # ((9 - 6)^2 + (12 - 8)^2) # = 30, # Count = 2 + 2 # Result = 7.5 wmse = 'mean_squared_error_2' self.expected_fit_result_with_weights = { 'mean_squared_error': [7.5, 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5] } self.expected_fit_result = { 'mean_squared_error': [7.5, 7.5], wmse: [7.5, 7.5], 'loss': [7.5, 7.5] } # In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2' self.expected_batch_result_with_weights = [32.5, 7.5, 9.286] self.expected_batch_result = [7.5, 7.5, 7.5] def test_fit(self): model = self._get_model() history = model.fit( self.x, self.y, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_sample_weight(self): model = self._get_model() history = model.fit( self.x, self.y, sample_weight=self.sample_weight, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_class_weight(self): model = self._get_model() history = model.fit( self.x, self.y, class_weight=self.class_weight, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval(self): model = self._get_model() eval_result = model.evaluate(self.x, self.y, batch_size=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_with_sample_weight(self): model = self._get_model() eval_result = model.evaluate( self.x, self.y, batch_size=2, sample_weight=self.sample_weight) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) # Verify that metric value is same with arbitrary weights and batch size. x = np.random.random((50, 1)) y = np.random.random((50, 1)) w = np.random.random((50,)) mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1] mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1] self.assertAllClose(mse1, mse2, 1e-3) def test_train_on_batch(self): model = self._get_model() result = model.train_on_batch(self.x, self.y) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_train_on_batch_with_sample_weight(self): model = self._get_model() result = model.train_on_batch( self.x, self.y, sample_weight=self.sample_weight) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) def test_train_on_batch_with_class_weight(self): model = self._get_model() result = model.train_on_batch( self.x, self.y, class_weight=self.class_weight) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) def test_test_on_batch(self): model = self._get_model() result = model.test_on_batch(self.x, self.y) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_test_on_batch_with_sample_weight(self): model = self._get_model() result = model.test_on_batch( self.x, self.y, sample_weight=self.sample_weight) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) def test_fit_generator(self): model = self._get_model() history = model.fit_generator( self._custom_generator(), steps_per_epoch=2, epochs=2) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_sample_weight(self): model = self._get_model() history = model.fit_generator( self._custom_generator(sample_weight=self.sample_weight), steps_per_epoch=2, epochs=2) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_class_weight(self): model = self._get_model() history = model.fit_generator( self._custom_generator(), steps_per_epoch=2, epochs=2, class_weight=self.class_weight) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval_generator(self): model = self._get_model() eval_result = model.evaluate_generator(self._custom_generator(), steps=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_generator_with_sample_weight(self): model = self._get_model() eval_result = model.evaluate_generator( self._custom_generator(sample_weight=self.sample_weight), steps=2) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) @keras_parameterized.run_with_all_model_types(exclude_models=['sequential']) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @parameterized.parameters([ losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, losses_utils.ReductionV2.AUTO, losses_utils.ReductionV2.SUM ]) class TestOutputLossMetrics(keras_parameterized.TestCase): def _get_compiled_multi_io_model(self, loss): model = get_multi_io_model() model.compile( optimizer='rmsprop', loss=loss, run_eagerly=testing_utils.should_run_eagerly()) return model def setUp(self): super(TestOutputLossMetrics, self).setUp() self.x = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.y1 = np.asarray([[2.], [4.], [6.], [8.], [10.]]) self.y2 = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.sample_weight_1 = np.asarray([2., 3., 4., 5., 6.]) self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5, 3.]) # y_true_1 = [[2.], [4.], [6.], [8.], [10.]] # y_pred_1 = [[3.], [6.], [9.], [12.], [15.]] # y_true_2 = [[1.], [2.], [3.], [4.], [5.]] # y_pred_2 = [[3.], [6.], [9.], [12.], [15.]] # Loss `output_1`: # Per-sample weighted losses # Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12] # Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80] # Batch 3 = [(15 - 10)^2 * 6] = [150] # Result (reduction=SUM) = ((2 + 12)*2 + (36 + 80)*2 + 150) / 5 = 82 # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 280 / 5 = 56 # Loss `output_2`: # Per-sample weighted losses # Batch 1 = [(3 - 1)^2 * 3.5, (6 - 2)^2 * 2.5)] = [14, 40] # Batch 2 = [(9 - 3)^2 * 1.5, (12 - 4)^2 * 0.5)] = [54, 32] # Batch 3 = [(15 - 5)^2 * 3] = [300] # Result (reduction=SUM) = ((14 + 40)*2 + (54 + 32)*2 + 300) / 5 = 116 # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 440 / 5 = 88 # When reduction is 'NONE' loss value that is passed to the optimizer will # be vector loss but what is reported is a scalar, which is an average of # all the values in all the batch vectors. # Total loss = Output_loss_1 + Output_loss_2 sum_over_batch_size_fit_result = { 'loss': [144, 144], 'output_1_loss': [56, 56], 'output_2_loss': [88, 88], } self.expected_fit_result = { losses_utils.ReductionV2.NONE: sum_over_batch_size_fit_result, losses_utils.ReductionV2.SUM: { 'loss': [198, 198], 'output_1_loss': [82, 82], 'output_2_loss': [116, 116], }, losses_utils.ReductionV2.AUTO: sum_over_batch_size_fit_result, losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result, } # In the order: 'loss', 'output_1_loss', 'output_2_loss', self.expected_batch_result = { losses_utils.ReductionV2.NONE: [144, 56, 88], losses_utils.ReductionV2.SUM: [198, 82, 116], losses_utils.ReductionV2.AUTO: [144, 56, 88], losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: [144, 56, 88], } # 2 + 12 + 36 + 80 + 150 = 280 # 14 + 40 + 54 + 32 + 300 = 440 self.expected_single_batch_result = [720, 280, 440] def test_fit(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) history = model.fit([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result[reduction].items(): self.assertAllClose(history.history[key], value) def test_eval(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(eval_result, self.expected_batch_result[reduction]) def test_train_on_batch(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) result = model.train_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) expected_values = self.expected_batch_result[reduction] if reduction == losses_utils.ReductionV2.SUM: expected_values = self.expected_single_batch_result self.assertAllClose(result, expected_values) def test_test_on_batch(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) result = model.test_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) expected_values = self.expected_batch_result[reduction] if reduction == losses_utils.ReductionV2.SUM: expected_values = self.expected_single_batch_result self.assertAllClose(result, expected_values) def test_fit_generator(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) history = model.fit_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result[reduction].items(): self.assertAllClose(history.history[key], value) def test_eval_generator(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps=3) self.assertAllClose(eval_result, self.expected_batch_result[reduction]) if __name__ == '__main__': tf.test.main()
27,469
37.473389
80
py
keras
keras-master/keras/activations_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras activation functions.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import activations from keras import backend from keras import combinations from keras.layers import advanced_activations from keras.layers import core from keras.layers import serialization def _ref_softmax(values): m = np.max(values) e = np.exp(values - m) return e / np.sum(e) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class KerasActivationsTest(tf.test.TestCase, parameterized.TestCase): def test_serialization(self): all_activations = [ 'softmax', 'relu', 'elu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear', 'softplus', 'softsign', 'selu', 'gelu', 'relu6' ] for name in all_activations: fn = activations.get(name) ref_fn = getattr(activations, name) assert fn == ref_fn config = activations.serialize(fn) fn = activations.deserialize(config) assert fn == ref_fn def test_serialization_v2(self): activation_map = {tf.math.softmax: 'softmax'} for fn_v2_key in activation_map: fn_v2 = activations.get(fn_v2_key) config = activations.serialize(fn_v2) fn = activations.deserialize(config) assert fn.__name__ == activation_map[fn_v2_key] def test_serialization_with_layers(self): activation = advanced_activations.LeakyReLU(alpha=0.1) layer = core.Dense(3, activation=activation) config = serialization.serialize(layer) # with custom objects deserialized_layer = serialization.deserialize( config, custom_objects={'LeakyReLU': activation}) self.assertEqual(deserialized_layer.__class__.__name__, layer.__class__.__name__) self.assertEqual(deserialized_layer.activation.__class__.__name__, activation.__class__.__name__) # without custom objects deserialized_layer = serialization.deserialize(config) self.assertEqual(deserialized_layer.__class__.__name__, layer.__class__.__name__) self.assertEqual(deserialized_layer.activation.__class__.__name__, activation.__class__.__name__) def test_softmax(self): x = backend.placeholder(ndim=2) f = backend.function([x], [activations.softmax(x)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = _ref_softmax(test_values[0]) self.assertAllClose(result[0], expected, rtol=1e-05) x = backend.placeholder(ndim=1) with self.assertRaises(ValueError): activations.softmax(x) def test_softmax_2d_axis_0(self): x = backend.placeholder(ndim=2) f = backend.function([x], [activations.softmax(x, axis=0)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = np.zeros((2, 5)) for i in range(5): expected[:, i] = _ref_softmax(test_values[:, i]) self.assertAllClose(result, expected, rtol=1e-05) def test_softmax_3d_axis_tuple(self): x = backend.placeholder(ndim=3) f = backend.function([x], [activations.softmax(x, axis=(1, 2))]) test_values = np.random.random((2, 3, 5)) result = f([test_values])[0] expected = np.zeros((2, 3, 5)) for i in range(2): expected[i, :, :] = _ref_softmax(test_values[i, :, :]) self.assertAllClose(result, expected, rtol=1e-05) def test_temporal_softmax(self): x = backend.placeholder(shape=(2, 2, 3)) f = backend.function([x], [activations.softmax(x)]) test_values = np.random.random((2, 2, 3)) * 10 result = f([test_values])[0] expected = _ref_softmax(test_values[0, 0]) self.assertAllClose(result[0, 0], expected, rtol=1e-05) def test_selu(self): x = backend.placeholder(ndim=2) f = backend.function([x], [activations.selu(x)]) alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 positive_values = np.array([[1, 2]], dtype=backend.floatx()) result = f([positive_values])[0] self.assertAllClose(result, positive_values * scale, rtol=1e-05) negative_values = np.array([[-1, -2]], dtype=backend.floatx()) result = f([negative_values])[0] true_result = (np.exp(negative_values) - 1) * scale * alpha self.assertAllClose(result, true_result) def test_softplus(self): def softplus(x): return np.log(np.ones_like(x) + np.exp(x)) x = backend.placeholder(ndim=2) f = backend.function([x], [activations.softplus(x)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = softplus(test_values) self.assertAllClose(result, expected, rtol=1e-05) def test_softsign(self): def softsign(x): return np.divide(x, np.ones_like(x) + np.absolute(x)) x = backend.placeholder(ndim=2) f = backend.function([x], [activations.softsign(x)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = softsign(test_values) self.assertAllClose(result, expected, rtol=1e-05) def test_sigmoid(self): def ref_sigmoid(x): if x >= 0: return 1 / (1 + np.exp(-x)) else: z = np.exp(x) return z / (1 + z) sigmoid = np.vectorize(ref_sigmoid) x = backend.placeholder(ndim=2) f = backend.function([x], [activations.sigmoid(x)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = sigmoid(test_values) self.assertAllClose(result, expected, rtol=1e-05) def test_hard_sigmoid(self): def ref_hard_sigmoid(x): x = (x * 0.2) + 0.5 z = 0.0 if x <= 0 else (1.0 if x >= 1 else x) return z hard_sigmoid = np.vectorize(ref_hard_sigmoid) x = backend.placeholder(ndim=2) f = backend.function([x], [activations.hard_sigmoid(x)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = hard_sigmoid(test_values) self.assertAllClose(result, expected, rtol=1e-05) def test_relu(self): x = backend.placeholder(ndim=2) f = backend.function([x], [activations.relu(x)]) positive_values = np.random.random((2, 5)) result = f([positive_values])[0] self.assertAllClose(result, positive_values, rtol=1e-05) negative_values = np.random.uniform(-1, 0, (2, 5)) result = f([negative_values])[0] expected = np.zeros((2, 5)) self.assertAllClose(result, expected, rtol=1e-05) def test_gelu(self): def gelu(x, approximate=False): if approximate: return 0.5 * x * (1.0 + np.tanh( np.sqrt(2.0 / np.pi) * (x + 0.044715 * np.power(x, 3)))) else: from scipy.stats import norm # pylint: disable=g-import-not-at-top return x * norm.cdf(x) x = backend.placeholder(ndim=2) f = backend.function([x], [activations.gelu(x)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = gelu(test_values) self.assertAllClose(result, expected, rtol=1e-05) f = backend.function([x], [activations.gelu(x, True)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] expected = gelu(test_values, True) self.assertAllClose(result, expected, rtol=1e-05) def test_elu(self): x = backend.placeholder(ndim=2) f = backend.function([x], [activations.elu(x, 0.5)]) test_values = np.random.random((2, 5)) result = f([test_values])[0] self.assertAllClose(result, test_values, rtol=1e-05) negative_values = np.array([[-1, -2]], dtype=backend.floatx()) result = f([negative_values])[0] true_result = (np.exp(negative_values) - 1) / 2 self.assertAllClose(result, true_result) def test_tanh(self): test_values = np.random.random((2, 5)) x = backend.placeholder(ndim=2) exp = activations.tanh(x) f = backend.function([x], [exp]) result = f([test_values])[0] expected = np.tanh(test_values) self.assertAllClose(result, expected, rtol=1e-05) def test_exponential(self): test_values = np.random.random((2, 5)) x = backend.placeholder(ndim=2) exp = activations.exponential(x) f = backend.function([x], [exp]) result = f([test_values])[0] expected = np.exp(test_values) self.assertAllClose(result, expected, rtol=1e-05) def test_linear(self): x = np.random.random((10, 5)) self.assertAllClose(x, activations.linear(x)) def test_invalid_usage(self): with self.assertRaises(ValueError): activations.get('unknown') # The following should be possible but should raise a warning: activations.get(advanced_activations.LeakyReLU()) if __name__ == '__main__': tf.test.main()
9,323
34.452471
80
py
keras
keras-master/keras/estimator/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras estimator API.""" import tensorflow.compat.v2 as tf from tensorflow.python.util.tf_export import keras_export # Keras has undeclared dependency on tensorflow/estimator:estimator_py. # As long as you depend //third_party/py/tensorflow:tensorflow target # everything will work as normal. _model_to_estimator_usage_gauge = tf.__internal__.monitoring.BoolGauge( '/tensorflow/api/keras/model_to_estimator', 'Whether tf.keras.estimator.model_to_estimator() is called.', 'version') # LINT.IfChange @keras_export(v1=['keras.estimator.model_to_estimator']) def model_to_estimator( keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None, checkpoint_format='saver', metric_names_map=None, export_outputs=None): """Constructs an `Estimator` instance from given keras model. If you use infrastructure or other tooling that relies on Estimators, you can still build a Keras model and use model_to_estimator to convert the Keras model to an Estimator for use with downstream systems. For usage example, please see: [Creating estimators from Keras Models]( https://www.tensorflow.org/guide/estimator#create_an_estimator_from_a_keras_model). Sample Weights: Estimators returned by `model_to_estimator` are configured so that they can handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ```python keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Example with customized export signature: ```python inputs = {'a': tf.keras.Input(..., name='a'), 'b': tf.keras.Input(..., name='b')} outputs = {'c': tf.keras.layers.Dense(..., name='c')(inputs['a']), 'd': tf.keras.layers.Dense(..., name='d')(inputs['b'])} keras_model = tf.keras.Model(inputs, outputs) keras_model.compile(...) export_outputs = {'c': tf.estimator.export.RegressionOutput, 'd': tf.estimator.export.ClassificationOutput} estimator = tf.keras.estimator.model_to_estimator( keras_model, export_outputs=export_outputs) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. Estimator's `model_fn` uses the structure of the model to clone the model. Defaults to `None`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. Defaults to `None`. custom_objects: Dictionary for cloning customized objects. This is used with classes that is not part of this pip package. For example, if user maintains a `relu6` class that inherits from `tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`. Defaults to `None`. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. If unset a directory will be created with `tempfile.mkdtemp` config: `RunConfig` to config `Estimator`. Allows setting up things in `model_fn` based on configuration such as `num_ps_replicas`, or `model_dir`. Defaults to `None`. If both `config.model_dir` and the `model_dir` argument (above) are specified the `model_dir` **argument** takes precedence. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.train.Saver` or `tf.train.Checkpoint`. This argument currently defaults to `saver`. When 2.0 is released, the default will be `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. Defaults to 'saver'. metric_names_map: Optional dictionary mapping Keras model output metric names to custom names. This can be used to override the default Keras model output metrics names in a multi IO model use case and provide custom names for the `eval_metric_ops` in Estimator. The Keras model metric names can be obtained using `model.metrics_names` excluding any loss metrics such as total loss and output losses. For example, if your Keras model has two outputs `out_1` and `out_2`, with `mse` loss and `acc` metric, then `model.metrics_names` will be `['loss', 'out_1_loss', 'out_2_loss', 'out_1_acc', 'out_2_acc']`. The model metric names excluding the loss metrics will be `['out_1_acc', 'out_2_acc']`. export_outputs: Optional dictionary. This can be used to override the default Keras model output exports in a multi IO model use case and provide custom names for the `export_outputs` in `tf.estimator.EstimatorSpec`. Default is None, which is equivalent to {'serving_default': `tf.estimator.export.PredictOutput`}. If not None, the keys must match the keys of `model.output_names`. A dict `{name: output}` where: * name: An arbitrary name for this output. * output: an `ExportOutput` class such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Single-headed models only need to specify one entry in this dictionary. Multi-headed models should specify one entry for each head, one of which must be named using `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY` If no entry is provided, a default `PredictOutput` mapping to `predictions` will be created. Returns: An Estimator from given keras model. Raises: ValueError: If neither keras_model nor keras_model_path was given. ValueError: If both keras_model and keras_model_path was given. ValueError: If the keras_model_path is a GCS URI. ValueError: If keras_model has not been compiled. ValueError: If an invalid checkpoint_format was given. """ try: from tensorflow_estimator.python.estimator import keras_lib # pylint: disable=g-import-not-at-top except ImportError: raise NotImplementedError( 'tf.keras.estimator.model_to_estimator function not available in your ' 'installation.') _model_to_estimator_usage_gauge.get_cell('v1').set(True) return keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg keras_model=keras_model, keras_model_path=keras_model_path, custom_objects=custom_objects, model_dir=model_dir, config=config, checkpoint_format=checkpoint_format, use_v2_estimator=False, metric_names_map=metric_names_map, export_outputs=export_outputs) @keras_export('keras.estimator.model_to_estimator', v1=[]) def model_to_estimator_v2(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None, checkpoint_format='checkpoint', metric_names_map=None, export_outputs=None): """Constructs an `Estimator` instance from given keras model. If you use infrastructure or other tooling that relies on Estimators, you can still build a Keras model and use model_to_estimator to convert the Keras model to an Estimator for use with downstream systems. For usage example, please see: [Creating estimators from Keras Models]( https://www.tensorflow.org/guide/estimators#creating_estimators_from_keras_models). Sample Weights: Estimators returned by `model_to_estimator` are configured so that they can handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ```python keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Example with customized export signature: ```python inputs = {'a': tf.keras.Input(..., name='a'), 'b': tf.keras.Input(..., name='b')} outputs = {'c': tf.keras.layers.Dense(..., name='c')(inputs['a']), 'd': tf.keras.layers.Dense(..., name='d')(inputs['b'])} keras_model = tf.keras.Model(inputs, outputs) keras_model.compile(...) export_outputs = {'c': tf.estimator.export.RegressionOutput, 'd': tf.estimator.export.ClassificationOutput} estimator = tf.keras.estimator.model_to_estimator( keras_model, export_outputs=export_outputs) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Note: We do not support creating weighted metrics in Keras and converting them to weighted metrics in the Estimator API using `model_to_estimator`. You will have to create these metrics directly on the estimator spec using the `add_metrics` function. To customize the estimator `eval_metric_ops` names, you can pass in the `metric_names_map` dictionary mapping the keras model output metric names to the custom names as follows: ```python input_a = tf.keras.layers.Input(shape=(16,), name='input_a') input_b = tf.keras.layers.Input(shape=(16,), name='input_b') dense = tf.keras.layers.Dense(8, name='dense_1') interm_a = dense(input_a) interm_b = dense(input_b) merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge') output_a = tf.keras.layers.Dense(3, activation='softmax', name='dense_2')( merged) output_b = tf.keras.layers.Dense(2, activation='softmax', name='dense_3')( merged) keras_model = tf.keras.models.Model( inputs=[input_a, input_b], outputs=[output_a, output_b]) keras_model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics={ 'dense_2': 'categorical_accuracy', 'dense_3': 'categorical_accuracy' }) metric_names_map = { 'dense_2_categorical_accuracy': 'acc_1', 'dense_3_categorical_accuracy': 'acc_2', } keras_est = tf.keras.estimator.model_to_estimator( keras_model=keras_model, config=config, metric_names_map=metric_names_map) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. Estimator's `model_fn` uses the structure of the model to clone the model. Defaults to `None`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. Defaults to `None`. custom_objects: Dictionary for cloning customized objects. This is used with classes that is not part of this pip package. For example, if user maintains a `relu6` class that inherits from `tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`. Defaults to `None`. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. If unset a directory will be created with `tempfile.mkdtemp` config: `RunConfig` to config `Estimator`. Allows setting up things in `model_fn` based on configuration such as `num_ps_replicas`, or `model_dir`. Defaults to `None`. If both `config.model_dir` and the `model_dir` argument (above) are specified the `model_dir` **argument** takes precedence. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.compat.v1.train.Saver` or `tf.train.Checkpoint`. The default is `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. Defaults to 'checkpoint'. metric_names_map: Optional dictionary mapping Keras model output metric names to custom names. This can be used to override the default Keras model output metrics names in a multi IO model use case and provide custom names for the `eval_metric_ops` in Estimator. The Keras model metric names can be obtained using `model.metrics_names` excluding any loss metrics such as total loss and output losses. For example, if your Keras model has two outputs `out_1` and `out_2`, with `mse` loss and `acc` metric, then `model.metrics_names` will be `['loss', 'out_1_loss', 'out_2_loss', 'out_1_acc', 'out_2_acc']`. The model metric names excluding the loss metrics will be `['out_1_acc', 'out_2_acc']`. export_outputs: Optional dictionary. This can be used to override the default Keras model output exports in a multi IO model use case and provide custom names for the `export_outputs` in `tf.estimator.EstimatorSpec`. Default is None, which is equivalent to {'serving_default': `tf.estimator.export.PredictOutput`}. If not None, the keys must match the keys of `model.output_names`. A dict `{name: output}` where: * name: An arbitrary name for this output. * output: an `ExportOutput` class such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Single-headed models only need to specify one entry in this dictionary. Multi-headed models should specify one entry for each head, one of which must be named using `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY` If no entry is provided, a default `PredictOutput` mapping to `predictions` will be created. Returns: An Estimator from given keras model. Raises: ValueError: If neither keras_model nor keras_model_path was given. ValueError: If both keras_model and keras_model_path was given. ValueError: If the keras_model_path is a GCS URI. ValueError: If keras_model has not been compiled. ValueError: If an invalid checkpoint_format was given. """ try: from tensorflow_estimator.python.estimator import keras_lib # pylint: disable=g-import-not-at-top except ImportError: raise NotImplementedError( 'tf.keras.estimator.model_to_estimator function not available in your ' 'installation.') _model_to_estimator_usage_gauge.get_cell('v2').set(True) return keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg keras_model=keras_model, keras_model_path=keras_model_path, custom_objects=custom_objects, model_dir=model_dir, config=config, checkpoint_format=checkpoint_format, use_v2_estimator=True, metric_names_map=metric_names_map, export_outputs=export_outputs) # LINT.ThenChange(//tensorflow_estimator/python/estimator/keras_lib.py)
16,947
45.054348
102
py
keras
keras-master/keras/tools/pip_package/create_pip_helper.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils to help build and verify pip package for Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import fnmatch import os PIP_EXCLUDED_FILES = frozenset([ 'keras/api/create_python_api_wrapper.py', 'keras/applications/efficientnet_weight_update_util.py', 'keras/distribute/tpu_strategy_test_utils.py', 'keras/saving/saved_model/create_test_saved_model.py', 'keras/tools/pip_package/setup.py', 'keras/tools/pip_package/create_pip_helper.py', ]) PIP_EXCLUDED_DIRS = frozenset([ 'keras/benchmarks', 'keras/integration_tests', 'keras/tests', ]) # Directories that should not have __init__.py files generated within them. EXCLUDED_INIT_FILE_DIRECTORIES = frozenset([ 'keras/benchmarks', 'keras/tools', ]) class PipPackagingError(Exception): pass def create_init_files(pip_root): """Create __init__.py in pip directory tree. These files are auto-generated by Bazel when doing typical build/test, but do not get auto-generated by the pip build process. Currently, the entire directory tree is just python files, so its fine to just create all of the init files. Args: pip_root: Root directory of code being packaged into pip. """ for path, subdirs, _ in os.walk(pip_root): for subdir in subdirs: init_file_path = os.path.join(path, subdir, '__init__.py') if any(excluded_path in init_file_path for excluded_path in EXCLUDED_INIT_FILE_DIRECTORIES): continue if not os.path.exists(init_file_path): # Create empty file open(init_file_path, 'w').close() def verify_python_files_in_pip(pip_root, bazel_root): """Verifies all expected files are packaged into Pip. Args: pip_root: Root directory of code being packaged into pip. bazel_root: Root directory of Keras Bazel workspace. Raises: PipPackagingError: Missing file in pip. """ for path, _, files in os.walk(bazel_root): if any(d for d in PIP_EXCLUDED_DIRS if d in path): # Skip any directories that are exclude from PIP, eg tests. continue python_files = set(fnmatch.filter(files, '*.py')) python_test_files = set(fnmatch.filter(files, '*test.py')) python_benchmark_files = set(fnmatch.filter(files, '*benchmark.py')) # We only care about python files in the pip package, see create_init_files. files = python_files - python_test_files - python_benchmark_files for f in files: pip_path = os.path.join(pip_root, os.path.relpath(path, bazel_root), f) file_name = os.path.join(path, f) path_exists = os.path.exists(pip_path) file_excluded = file_name.lstrip('./') in PIP_EXCLUDED_FILES if not path_exists and not file_excluded: raise PipPackagingError( ('Pip package missing the file %s. If this is expected, add it ' 'to PIP_EXCLUDED_FILES in create_pip_helper.py. Otherwise, ' 'make sure it is a build dependency of the pip package') % file_name) if path_exists and file_excluded: raise PipPackagingError( ('File in PIP_EXCLUDED_FILES included in pip. %s' % file_name)) def main(): parser = argparse.ArgumentParser() parser.add_argument( '--bazel-root', type=str, required=True, help='Root directory of Keras Bazel workspace.') parser.add_argument( '--pip-root', type=str, required=True, help='Root directory of code being packaged into pip.') args = parser.parse_args() create_init_files(args.pip_root) verify_python_files_in_pip(args.pip_root, args.bazel_root) if __name__ == '__main__': main()
4,399
33.108527
80
py
keras
keras-master/keras/tools/pip_package/setup.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow Keras. TensorFlow Keras is an implementation of the Keras API that uses TensorFlow as a backend. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import setuptools DOCLINES = __doc__.split('\n') # This version string is semver compatible, but incompatible with pip. # For pip, we will remove all '-' characters from this string, and use the # result for pip. _VERSION = '2.7.0' REQUIRED_PACKAGES = [ # We depend on TensorFlow's declared pip dependencies. # Add a new dep there if one is needed. ] project_name = 'keras' if '--project_name' in sys.argv: project_name_idx = sys.argv.index('--project_name') project_name = sys.argv[project_name_idx + 1] sys.argv.remove('--project_name') sys.argv.pop(project_name_idx) setuptools.setup( name=project_name, version=_VERSION.replace('-', ''), description=DOCLINES[0], long_description='\n'.join(DOCLINES[2:]), url='https://keras.io/', download_url='https://github.com/keras-team/keras/tags', author='Google Inc.', author_email='packages@tensorflow.org', packages=setuptools.find_packages(), install_requires=REQUIRED_PACKAGES, # PyPI package information. classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='Apache 2.0', keywords='tensorflow keras tensor machine learning', )
2,944
34.914634
80
py
keras
keras-master/keras/optimizer_v2/optimizer_v2.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Version 2 of class Optimizer.""" import tensorflow.compat.v2 as tf # pylint: disable=g-bad-name import abc import contextlib import functools import warnings from keras import backend from keras import initializers from keras.engine import base_layer_utils from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import utils as optimizer_utils from keras.utils import generic_utils from keras.utils import layer_utils from keras.utils import tf_inspect from keras.utils import tf_utils from tensorflow.python.util.tf_export import keras_export keras_optimizers_gauge = tf.__internal__.monitoring.BoolGauge( "/tensorflow/api/keras/optimizers", "keras optimizer usage", "method") _DEFAULT_VALID_DTYPES = frozenset([ tf.float16, tf.bfloat16, tf.float32, tf.float64, tf.complex64, tf.complex128 ]) def _deduplicate_indexed_slices(values, indices): """Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index. """ unique_indices, new_index_positions = tf.unique(indices) summed_values = tf.math.unsorted_segment_sum( values, new_index_positions, tf.shape(unique_indices)[0]) return (summed_values, unique_indices) class NullContextmanager: def __init__(self, *args, **kwargs): pass def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def name_scope_only_in_function_or_graph(name): """Internal-only entry point for `name_scope*`. Enters a compat.v1.name_scope only when in a function or graph, not when running fully eagerly. Args: name: The name argument that is passed to the op function. Returns: `name_scope*` context manager. """ if not tf.executing_eagerly(): return tf.name_scope(name) else: return NullContextmanager() @keras_export("keras.optimizers.Optimizer", metaclass=abc.ABCMeta) class OptimizerV2(tf.__internal__.tracking.Trackable): """Base class for Keras optimizers. You should not use this class directly, but instead instantiate one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`, etc. ### Usage ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 * var1 + 2 * var2 * var2 # In graph mode, returns op that minimizes the loss by updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Usage in custom training loops In Keras models, sometimes variables are created when the model is first called, instead of construction time. Examples include 1) sequential models without input shape pre-defined, or 2) subclassed models. Pass var_list as callable in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')) loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them Calling `minimize()` takes care of both computing the gradients and applying them to the variables. If you want to process the gradients before applying them you can instead use the optimizer in three steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process the gradients as you wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list of variables. with tf.GradientTape() as tape: loss = <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars) # Process the gradients, for example cap them, etc. # capped_grads = [MyCapper(g) for g in grads] processed_grads = [process_gradient(g) for g in grads] # Ask the optimizer to apply the processed gradients. opt.apply_gradients(zip(processed_grads, var_list)) ``` ### Use with `tf.distribute.Strategy` This optimizer class is `tf.distribute.Strategy` aware, which means it automatically sums gradients across all replicas. To average gradients, you divide your loss by the global batch size, which is done automatically if you use `tf.keras` built-in training or evaluation loops. See the `reduction` argument of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. To aggregate gradients yourself, call `apply_gradients` with `experimental_aggregate_gradients` set to False. This is useful if you need to process aggregated gradients. If you are not using these and you want to average gradients, you should use `tf.math.reduce_sum` to add up your per-example losses and then divide by the global batch size. Note that when using `tf.distribute.Strategy`, the first component of a tensor's shape is the *replica-local* batch size, which is off by a factor equal to the number of replicas being used to compute a single step. As a result, using `tf.math.reduce_mean` will give the wrong answer, resulting in gradients that can be many times too big. ### Variable Constraints All Keras optimizers respect variable constraints. If constraint function is passed to any variable, the constraint will be applied to the variable after the gradient has been applied to the variable. Important: If gradient is sparse tensor, variable constraint is not supported. ### Thread Compatibility The entire optimizer is currently thread compatible, not thread-safe. The user needs to perform synchronization if necessary. ### Slots Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage additional variables associated with the variables to train. These are called <i>Slots</i>. Slots have names and you can ask the optimizer for the names of the slots that it uses. Once you have a slot name you can ask the optimizer for the variable it created to hold the slot value. This can be useful if you want to log debug a training algorithm, report stats about the slots, etc. ### Hyperparameters These are arguments passed to the optimizer subclass constructor (the `__init__` method), and then passed to `self._set_hyper()`. They can be either regular Python values (like 1.0), tensors, or callables. If they are callable, the callable will be called during `apply_gradients()` to get the value for the hyper parameter. Hyperparameters can be overwritten through user code: Example: ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 + 2 * var2 # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Callable learning rate Optimizer accepts a callable learning rate in two ways. The first way is through built-in or customized `tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be called on each iteration with `schedule(iteration)`, a `tf.Variable` owned by the optimizer. Example: >>> var = tf.Variable(np.random.random(size=(1,))) >>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay( ... initial_learning_rate=.01, decay_steps=20, decay_rate=.1) >>> opt = tf.keras.optimizers.SGD(learning_rate=learning_rate) >>> loss = lambda: 3 * var >>> opt.minimize(loss, var_list=[var]) <tf.Variable... The second way is through a callable function that does not accept any arguments. Example: >>> var = tf.Variable(np.random.random(size=(1,))) >>> def lr_callable(): ... return .1 >>> opt = tf.keras.optimizers.SGD(learning_rate=lr_callable) >>> loss = lambda: 3 * var >>> opt.minimize(loss, var_list=[var]) <tf.Variable... ### Creating a custom optimizer If you intend to create your own optimization algorithm, simply inherit from this class and override the following methods: - `_resource_apply_dense` (update variable given gradient tensor is a dense `tf.Tensor`) - `_resource_apply_sparse` (update variable given gradient tensor is a sparse `tf.IndexedSlices`. The most common way for this to happen is if you are taking the gradient through a `tf.gather`.) - `_create_slots` (if your optimizer algorithm requires additional variables) - `get_config` (serialization of the optimizer, include all hyper parameters) """ # Subclasses should set this to True unless they override `apply_gradients` # with a version that does not have the `experimental_aggregate_gradients` # argument. Older versions of Keras did not have this argument so custom # optimizers may have overridden `apply_gradients` without the # `experimental_aggregate_gradients` argument. Keras only passes # `experimental_aggregate_gradients` if this attribute is True. # Note: This attribute will likely be removed in an upcoming release. _HAS_AGGREGATE_GRAD = False def __init__(self, name, gradient_aggregator=None, gradient_transformers=None, **kwargs): """Create a new Optimizer. This must be called by the constructors of subclasses. Note that Optimizer instances should not bind to a single graph, and so shouldn't keep Tensors as member variables. Generally you should be able to use the _set_hyper()/state.get_hyper() facility instead. This class is stateful and thread-compatible. Example of custom gradient transformations: ```python def my_gradient_transformer(grads_and_vars): # Simple example, double the gradients. return [(2. * g, v) for g, v in grads_and_vars] optimizer = tf.keras.optimizers.SGD( 1e-3, gradient_transformers=[my_gradient_transformer]) ``` Args: name: String. The name to use for momentum accumulator weights created by the optimizer. gradient_aggregator: The function to use to aggregate gradients across devices (when using `tf.distribute.Strategy`). If `None`, defaults to summing the gradients across devices. The function should accept and return a list of `(gradient, variable)` tuples. gradient_transformers: Optional. List of functions to use to transform gradients before applying updates to Variables. The functions are applied after `gradient_aggregator`. The functions should accept and return a list of `(gradient, variable)` tuples. **kwargs: keyword arguments. Allowed arguments are `clipvalue`, `clipnorm`, `global_clipnorm`. If `clipvalue` (float) is set, the gradient of each weight is clipped to be no higher than this value. If `clipnorm` (float) is set, the gradient of each weight is individually clipped so that its norm is no higher than this value. If `global_clipnorm` (float) is set the gradient of all weights is clipped so that their global norm is no higher than this value. Raises: ValueError: in case of any invalid argument. """ # Instrument optimizer usages keras_optimizers_gauge.get_cell(self.__class__.__name__).set(True) allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay", "global_clipnorm"} for k in kwargs: if k not in allowed_kwargs: raise TypeError("Unexpected keyword argument " f"passed to optimizer: {str(k)}. Allowed kwargs are " f"{allowed_kwargs}.") # checks that all keyword arguments are non-negative. if kwargs[k] is not None and kwargs[k] < 0: raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k])) if k == "lr": warnings.warn( "The `lr` argument is deprecated, use `learning_rate` instead.") self._use_locking = True self._init_set_name(name) self._hyper = {} # dict: {variable name : {slot name : variable}} self._slots = {} self._slot_names = [] self._weights = [] self._iterations = None # For implementing Trackable. Stores information about how to restore # slot variables which have not yet been created # (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ... } self._deferred_slot_restorations = {} decay = kwargs.pop("decay", 0.0) if decay < 0.: raise ValueError("decay cannot be less than 0. " "Received: decay={}.".format(decay)) self._initial_decay = decay self._hypers_created = False # Store the distribution strategy object if the optimizer is created inside # strategy scope, so it could be used to create variables later. if tf.distribute.has_strategy(): self._distribution_strategy = tf.distribute.get_strategy() else: self._distribution_strategy = None # Configure gradient transformations. if gradient_aggregator is None: gradient_aggregator = optimizer_utils.all_reduce_sum_gradients self.gradient_aggregator = gradient_aggregator if gradient_transformers is None: gradient_transformers = [] self.gradient_transformers = gradient_transformers self.clipnorm = kwargs.pop("clipnorm", None) self.global_clipnorm = kwargs.pop("global_clipnorm", None) if self.clipnorm is not None and self.global_clipnorm is not None: raise ValueError("Cannot accept both `clipnorm` and `global_clipnorm`. " "Received: `clipnorm`={}, `global_clipnorm`={}.".format( self.clipnorm, self.global_clipnorm)) self.clipvalue = kwargs.pop("clipvalue", None) @property def clipnorm(self): """`float` or `None`. If set, clips gradients to a maximum norm.""" return self._clipnorm @property def global_clipnorm(self): """`float` or `None`. If set, clips gradients to a maximum norm.""" return self._global_clipnorm @clipnorm.setter def clipnorm(self, val): if val is not None and self.gradient_transformers: raise ValueError("`clipnorm` cannot be set when `gradient_transformers` " "is set. Instead, use the `gradient_transformers` to " "specify clipping and other transformations. Received: " f"val={val}, " f"gradient_transformers={self.gradient_transformers}.") self._clipnorm = val self._clipnorm_fn = optimizer_utils.make_gradient_clipnorm_fn( self._clipnorm) @global_clipnorm.setter def global_clipnorm(self, val): if val is not None and self.gradient_transformers: raise ValueError("`global_clipnorm` cannot be set when " "`gradient_transformers` " "is set. Instead, use the `gradient_transformers` to " "specify clipping and other transformations. Received: " f"val={val}, " f"gradient_transformers={self.gradient_transformers}.") self._global_clipnorm = val self._global_clipnorm_fn = optimizer_utils.make_global_gradient_clipnorm_fn( self._global_clipnorm) @property def clipvalue(self): """`float` or `None`. If set, clips gradients to a maximum value.""" return self._clipvalue @clipvalue.setter def clipvalue(self, val): if val is not None and self.gradient_transformers: raise ValueError("`clipvalue` cannot be set when `gradient_transformers` " "is set. Instead, use the `gradient_transformers` to " "specify clipping and other transformations. Received: " f"val={val}, " f"gradient_transformers={self.gradient_transformers}.") self._clipvalue = val self._clipvalue_fn = optimizer_utils.make_gradient_clipvalue_fn( self._clipvalue) def _transform_loss(self, loss): """Called in `.minimize` to transform loss before computing gradients.""" return loss def _get_gradients(self, tape, loss, var_list, grad_loss=None): """Called in `minimize` to compute gradients from loss.""" grads = tape.gradient(loss, var_list, grad_loss) return list(zip(grads, var_list)) def _transform_unaggregated_gradients(self, grads_and_vars): """Called in `apply_gradients` before gradient aggregation.""" return grads_and_vars def _aggregate_gradients(self, grads_and_vars): """Called in `apply_gradients` to aggregate gradients across devices. Note that user subclasses may override this, so the interface should not be changed. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: A list of (aggregrated_gradient, variable) pairs. By default, this calls `self.gradient_aggregator`. """ return self.gradient_aggregator(grads_and_vars) def _transform_gradients(self, grads_and_vars): """Called in `apply_gradients` after aggregation.""" if self._clipvalue is not None: grads_and_vars = self._clipvalue_fn(grads_and_vars) if self._clipnorm is not None: grads_and_vars = self._clipnorm_fn(grads_and_vars) if self._global_clipnorm is not None: grads_and_vars = self._global_clipnorm_fn(grads_and_vars) for fn in self.gradient_transformers: grads_and_vars = fn(grads_and_vars) return grads_and_vars def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None): """Minimize `loss` by updating `var_list`. This method simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If you want to process the gradient before applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead of using this function. Args: loss: `Tensor` or callable. If a callable, `loss` should take no arguments and return the value to minimize. If a `Tensor`, the `tape` argument must be passed. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` since the variables are created at the first time `loss` is called. grad_loss: (Optional). A `Tensor` holding the gradient computed for `loss`. name: (Optional) str. Name for the returned operation. tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`, the tape that computed the `loss` must be provided. Returns: An `Operation` that updates the variables in `var_list`. The `iterations` will be automatically increased by 1. Raises: ValueError: If some of the variables are not `Variable` objects. """ grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss, tape=tape) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): """Compute gradients of `loss` for the variables in `var_list`. This is the first part of `minimize()`. It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable". Note that "gradient" can be a `Tensor`, an `IndexedSlices`, or `None` if there is no gradient for the given variable. Args: loss: `Tensor` or callable. If a callable, `loss` should take no arguments and return the value to minimize. If a `Tensor`, the `tape` argument must be passed. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` and the variables are created at the first time when `loss` is called. grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`. tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`, the tape that computed the `loss` must be provided. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be `None`. Raises: TypeError: If `var_list` contains anything else than `Variable` objects. ValueError: If some arguments are invalid, or var_list is None. """ # TODO(joshl): Test that we handle weight decay in a reasonable way. if not callable(loss) and tape is None: raise ValueError("`tape` is required when a `Tensor` loss is passed. " f"Received: loss={loss}, tape={tape}.") tape = tape if tape is not None else tf.GradientTape() if callable(loss): with tape: if not callable(var_list): tape.watch(var_list) loss = loss() if callable(var_list): var_list = var_list() with tape: loss = self._transform_loss(loss) var_list = tf.nest.flatten(var_list) with tf.name_scope(self._name + "/gradients"): grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is not None and v.dtype != tf.resource ]) return grads_and_vars def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True): """Apply gradients to variables. This is the second part of `minimize()`. It returns an `Operation` that applies gradients. The method sums gradients from all replicas in the presence of `tf.distribute.Strategy` by default. You can aggregate gradients yourself by passing `experimental_aggregate_gradients=False`. Example: ```python grads = tape.gradient(loss, vars) grads = tf.distribute.get_replica_context().all_reduce('sum', grads) # Processing aggregated gradients. optimizer.apply_gradients(zip(grads, vars), experimental_aggregate_gradients=False) ``` Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional name for the returned operation. Default to the name passed to the `Optimizer` constructor. experimental_aggregate_gradients: Whether to sum gradients from different replicas in the presence of `tf.distribute.Strategy`. If False, it's user responsibility to aggregate the gradients. Default to True. Returns: An `Operation` that applies the specified gradients. The `iterations` will be automatically increased by 1. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none of the variables have gradients. RuntimeError: If called in a cross-replica context. """ grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars) var_list = [v for (_, v) in grads_and_vars] with tf.name_scope(self._name): # Create iteration if necessary. with tf.init_scope(): self._create_all_weights(var_list) if not grads_and_vars: # Distribution strategy does not support reducing an empty list of # gradients return tf.no_op() if tf.distribute.in_cross_replica_context(): raise RuntimeError( "`apply_gradients() cannot be called in cross-replica context. " "Use `tf.distribute.Strategy.run` to enter replica " "context. For more information, please see the docstring of " "`tf.distribute.get_replica_context`.") strategy = tf.distribute.get_strategy() if (not experimental_aggregate_gradients and strategy and isinstance(strategy, (tf.compat.v1.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy))): raise NotImplementedError( "`experimental_aggregate_gradients=False is not supported for " "ParameterServerStrategy and CentralStorageStrategy. Used: " f"strategy={strategy}.") apply_state = self._prepare(var_list) if experimental_aggregate_gradients: grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars) grads_and_vars = self._aggregate_gradients(grads_and_vars) grads_and_vars = self._transform_gradients(grads_and_vars) if optimizer_utils.strategy_supports_no_merge_call(): return self._distributed_apply(strategy, grads_and_vars, name, apply_state) else: return tf.distribute.get_replica_context().merge_call( functools.partial(self._distributed_apply, apply_state=apply_state), args=(grads_and_vars,), kwargs={ "name": name, }) def _distributed_apply(self, distribution, grads_and_vars, name, apply_state): """`apply_gradients` using a `DistributionStrategy`.""" def apply_grad_to_update_var(var, grad): """Apply gradient to variable.""" if isinstance(var, tf.Tensor): raise NotImplementedError( f"Updating a `Tensor` is not implemented. Received: var={var}.") apply_kwargs = {} if isinstance(grad, tf.IndexedSlices): if var.constraint is not None: raise RuntimeError( "Cannot use a constraint function on a sparse variable. " f"Received: grad={grad}, var.constraint={var.constraint}.") if "apply_state" in self._sparse_apply_args: apply_kwargs["apply_state"] = apply_state return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices, **apply_kwargs) if "apply_state" in self._dense_apply_args: apply_kwargs["apply_state"] = apply_state update_op = self._resource_apply_dense(grad, var, **apply_kwargs) if var.constraint is not None: with tf.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op eagerly_outside_functions = tf.compat.v1.executing_eagerly_outside_functions() update_ops = [] with name_scope_only_in_function_or_graph(name or self._name): for grad, var in grads_and_vars: # Colocate the update with variables to avoid unnecessary communication # delays. See b/136304694. with distribution.extended.colocate_vars_with(var): with name_scope_only_in_function_or_graph( "update" if eagerly_outside_functions else "update_" + var.op.name): update_op = distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False) if tf.distribute.in_cross_replica_context(): # In cross-replica context, extended.update returns a list of # update ops from all replicas (group=False). update_ops.extend(update_op) else: # In replica context, extended.update return the single update op # of current replica. update_ops.append(update_op) any_symbolic = any(isinstance(i, tf.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops) if not tf.executing_eagerly() or any_symbolic: # If the current context is graph mode or any of the update ops are # symbolic then the step update should be carried out under a graph # context. (eager updates execute immediately) with backend._current_graph(update_ops).as_default(): # pylint: disable=protected-access with tf.control_dependencies([tf.group(update_ops)]): return self.iterations.assign_add(1, read_value=False) return self.iterations.assign_add(1) def get_gradients(self, loss, params): """Returns gradients of `loss` with respect to `params`. Should be used only in legacy v1 graph mode. Args: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented). """ params = tf.nest.flatten(params) with backend.get_graph().as_default(), backend.name_scope(self._name + "/gradients"): grads = tf.compat.v1.gradients(loss, params) for grad, param in zip(grads, params): if grad is None: raise ValueError("Variable {} has `None` for gradient. " "Please make sure that all of your ops have a " "gradient defined (i.e. are differentiable). " "Common ops without gradient: " "K.argmax, K.round, K.eval.".format(param)) return grads def get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is not None and v.dtype != tf.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): """set hyper `name` to value. value can be callable, tensor, numeric.""" if isinstance(value, tf.__internal__.tracking.Trackable): self._track_trackable(value, name, overwrite=True) if name not in self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (tf.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value() if dtype: return tf.cast(value, dtype) else: return value def _create_slots(self, var_list): pass def _create_all_weights(self, var_list): """Creates all weights, including iterations, hyperparameters and slot vars. This will add newly created variables to `optimizer.weights`. New variables are only created when this method is called the first time, or when called with different variables in the var_list. Args: var_list: list or tuple of `Variable` objects that will be minimized using this optimizer. """ _ = self.iterations self._create_hypers() self._create_slots(var_list) def __getattribute__(self, name): """Overridden to support hyperparameter access.""" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: # Needed to avoid infinite recursion with __setattr__. if name == "_hyper": raise e # Backwards compatibility with Keras optimizers. if name == "lr": name = "learning_rate" if name in self._hyper: return self._get_hyper(name) raise e def __dir__(self): result = set(super(OptimizerV2, self).__dir__()) if "_hyper" in result: result |= self._hyper.keys() if "learning_rate" in self._hyper.keys(): result.add("lr") return list(result) def __setattr__(self, name, value): """Override setattr to support dynamic hyperparameter setting.""" # Backwards compatibility with Keras optimizers. if name == "lr": name = "learning_rate" if hasattr(self, "_hyper") and name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): """A list of names for this optimizer's slots.""" return self._slot_names def add_slot(self, var, slot_name, initializer="zeros", shape=None): """Add a new slot variable for `var`. A slot variable is an additional variable associated with `var` to train. It is allocated and managed by optimizers, e.g. `Adam`. Args: var: a `Variable` object. slot_name: name of the slot variable. initializer: initializer of the slot variable shape: (Optional) shape of the slot variable. If not set, it will default to the shape of `var`. Returns: A slot variable. """ if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer, str) or callable(initializer): initializer = initializers.get(initializer) if isinstance( initializer, tf.__internal__.tracking.CheckpointInitialValueCallable) or (shape is not None): slot_shape = shape else: slot_shape = var.shape initial_value = functools.partial( initializer, shape=slot_shape, dtype=var.dtype) else: initial_value = initializer with self._distribution_strategy_scope(): strategy = tf.distribute.get_strategy() if not strategy.extended.variable_created_in_scope(var): raise ValueError( "Trying to create optimizer slot variable under the scope for " "tf.distribute.Strategy ({}), which is different from the scope " "used for the original variable ({}). Make sure the slot " "variables are created under the same strategy scope. This may " "happen if you're restoring from a checkpoint outside the scope." .format(strategy, var)) with strategy.extended.colocate_vars_with(var): weight = tf.Variable( name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): keys = set() for var in var_list: if isinstance(var, tf.distribute.DistributedValues): var_devices = var._devices # pylint: disable=protected-access else: var_devices = [var.device] var_dtype = var.dtype.base_dtype for var_device in var_devices: keys.add((var_device, var_dtype)) apply_state = {} for var_device, var_dtype in keys: apply_state[(var_device, var_dtype)] = {} with tf.device(var_device): self._prepare_local(var_device, var_dtype, apply_state) return apply_state def _prepare_local(self, var_device, var_dtype, apply_state): if "learning_rate" in self._hyper: lr_t = tf.identity(self._decayed_lr(var_dtype)) apply_state[(var_device, var_dtype)]["lr_t"] = lr_t def _fallback_apply_state(self, var_device, var_dtype): """Compatibility for subclasses that don't pass apply_state through.""" apply_state = {(var_device, var_dtype): {}} self._prepare_local(var_device, var_dtype, apply_state) return apply_state[(var_device, var_dtype)] def _create_hypers(self): if self._hypers_created: return with self._distribution_strategy_scope(): # Iterate hyper values deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value, (tf.Tensor, tf.Variable)) or callable(value): # The check for `callable` covers the usage when `value` is a # `LearningRateSchedule`, in which case it does not need to create a # variable. continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self): """Variable. The number of training steps this Optimizer has run.""" if self._iterations is None: with self._distribution_strategy_scope(): self._iterations = self.add_weight( "iter", shape=[], dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if self._iterations is not None: raise RuntimeError("Cannot set `iterations` to a new Variable after " "the Optimizer weights have been created. Here it is " f"attempting to set `iterations` to {variable}.") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): """Get decayed learning rate as a Tensor with dtype=var_dtype.""" lr_t = self._get_hyper("learning_rate", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = tf.cast(self.iterations, var_dtype) lr_t = tf.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step = tf.cast(self.iterations, var_dtype) decay_t = tf.cast(self._initial_decay, var_dtype) lr_t = lr_t / (1. + decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): """Returns the config of the optimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary. """ config = {"name": self._name} if self.clipnorm is not None: config["clipnorm"] = self.clipnorm if self.clipvalue is not None: config["clipvalue"] = self.clipvalue if self.global_clipnorm is not None: config["global_clipnorm"] = self.global_clipnorm return config @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if "lr" in config: config["learning_rate"] = config.pop("lr") if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): """Serialize a hyperparameter that can be a float, callable, or Tensor.""" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tf.is_tensor(value): return backend.get_value(value) return value def variables(self): """Returns variables of this Optimizer based on the order created.""" return self._weights @property def weights(self): """Returns variables of this Optimizer based on the order created.""" return self._weights def get_weights(self): """Returns the current weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function returns the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they were created. The returned list can in turn be used to load state into similarly parameterized optimizers. For example, the RMSprop optimizer for this simple model returns a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> print('Training'); results = m.fit(data, labels) Training ... >>> len(opt.get_weights()) 3 Returns: Weights values as a list of numpy arrays. """ params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self, weights): """Set the weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function takes the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they are created. The passed values are used to set the new state of the optimizer. For example, the RMSprop optimizer for this simple model takes a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> print('Training'); results = m.fit(data, labels) Training ... >>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])] >>> opt.set_weights(new_weights) >>> opt.iterations <tf.Variable 'RMSprop/iter:0' shape=() dtype=int64, numpy=10> Args: weights: weight values as a list of numpy arrays. """ params = self.weights if len(params) != len(weights): raise ValueError( "You called `set_weights(weights)` on optimizer {self._name} " f"with a weight list of length {str(len(weights))}, " f"but the optimizer was expecting {str(len(params))} " f"weights. Provided weights: {str(weights)[:50]}...") if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError(f"Optimizer weight shape {str(pv.shape)} " "not compatible with " f"provided weight shape {str(w.shape)}.") weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer="zeros", trainable=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.VariableAggregation.NONE): if dtype is None: dtype = tf.float32 if isinstance(initializer, str) or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf.VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable variables. " "You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ.") else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _init_set_name(self, name, zero_based=True): if not name: self._name = backend.unique_object_name( generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based) else: self._name = name def _assert_valid_dtypes(self, tensors): """Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type. """ valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError("Invalid type {} for {}, expected: {}.".format( dtype, t.name, [v for v in valid_dtypes])) def _valid_dtypes(self): """Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients. """ return _DEFAULT_VALID_DTYPES def _call_if_callable(self, param): """Call the function if param is callable.""" return param() if callable(param) else param def _resource_apply_dense(self, grad, handle, apply_state): """Add ops to apply dense gradients to the variable `handle`. Args: grad: a `Tensor` representing the gradient. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. apply_state: A dict which is used across multiple apply calls. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError("`_resource_apply_dense` must be implemented in " "subclasses.") def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices, **kwargs): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. **kwargs: May optionally contain `apply_state` Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices, **kwargs) def _resource_apply_sparse(self, grad, handle, indices, apply_state): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. apply_state: A dict which is used across multiple apply calls. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError("`_resource_apply_sparse` Must be implemented in " "subclasses.") def _resource_scatter_add(self, x, i, v): with tf.control_dependencies([ tf.raw_ops.ResourceScatterAdd( resource=x.handle, indices=i, updates=v) ]): return x.value() def _resource_scatter_update(self, x, i, v): with tf.control_dependencies( [tf.raw_ops.ResourceScatterUpdate( resource=x.handle, indices=i, updates=v)]): return x.value() @property @layer_utils.cached_per_instance def _dense_apply_args(self): return tf_inspect.getfullargspec(self._resource_apply_dense).args @property @layer_utils.cached_per_instance def _sparse_apply_args(self): return tf_inspect.getfullargspec(self._resource_apply_sparse).args # --------------- # For implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): """Restore a newly created slot variable's value.""" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores, highest restore UID first to minimize the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): """Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but `variable` has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be restored. slot_name: The name of this `Optimizer`'s slot to restore into. variable: The variable object this slot is being created for. """ variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None and tf.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if there is an active variable creator # scope. Generally we'd like to eagerly create/restore slot variables # when possible, but this may mean that scopes intended to catch # `variable` also catch its eagerly created slot variable # unintentionally (specifically make_template would add a dependency on # a slot variable if not for this case). Deferring is mostly harmless # (aside from double initialization), and makes variable creator scopes # behave the same way they do when graph building. # # One notable case is with distribution strategy, which uses variable # creator scope but always desires the `variable` and the slot to use # the same scope, thus we can safely eagerly create/restore slot # variables. and (not tf.compat.v1.get_default_graph()._variable_creator_stack or # pylint: disable=protected-access self._distribution_strategy)): initializer = tf.__internal__.tracking.CheckpointInitialValueCallable( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape()) # Slot variables are not owned by any one object (because we don't want to # save the slot variable if the optimizer is saved without the non-slot # variable, or if the non-slot variable is saved without the optimizer; # it's a dependency hypergraph with edges of the form (optimizer, non-slot # variable, variable)). So we don't _track_ slot variables anywhere, and # instead special-case this dependency and otherwise pretend it's a normal # graph. if slot_variable is not None: # If we've either made this slot variable, or if we've pulled out an # existing slot variable, we should restore it. slot_variable_position.restore(slot_variable) else: # We didn't make the slot variable. Defer restoring until it gets created # normally. We keep a list rather than the one with the highest restore # UID in case slot variables have their own dependencies, in which case # those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) @contextlib.contextmanager def _distribution_strategy_scope(self): """Returns the `tf.distribute.Strategy` this optimizer was created under.""" if self._distribution_strategy and not tf.distribute.has_strategy(): with self._distribution_strategy.scope(): yield self._distribution_strategy.scope() else: yield def _var_key(var): """Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. """ # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): """Get the slot key for the variable: var_name/slot_name.""" name = _var_key(var) return name + "/" + slot_name class RestoredOptimizer(OptimizerV2): """A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the optimizer object iself (e.g. through `apply_gradients`). """ # TODO(allenl): Make the restored optimizer functional by tracing its apply # methods. def __init__(self): super(RestoredOptimizer, self).__init__("RestoredOptimizer") self._hypers_created = True def get_config(self): # TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError( "Restoring functional Optimizers from SavedModels is not currently " "supported. Please file a feature request if this limitation bothers " "you.") tf.__internal__.saved_model.load.register_revived_type( "optimizer", lambda obj: isinstance(obj, OptimizerV2), versions=[tf.__internal__.saved_model.load.VersionedTypeRegistration( object_factory=lambda proto: RestoredOptimizer(), version=2, min_producer_version=1, min_consumer_version=1, setter=RestoredOptimizer._set_hyper # pylint: disable=protected-access )])
58,565
38.358871
112
py
keras
keras-master/keras/optimizer_v2/adam_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adam.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import combinations from keras import optimizer_v1 from keras.optimizer_v2 import adam from keras.optimizer_v2 import learning_rate_schedule def adam_update_numpy(param, g_t, t, m, v, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1)) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon) return param_t, m_t, v_t def adam_update_numpy_amsgrad(param, g_t, t, m, v, vhat, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1)) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t vhat_t = np.maximum(vhat, v_t) param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon) return param_t, m_t, v_t, vhat_t def adam_sparse_update_numpy_amsgrad(param, indices, g_t, t, m, v, vhat, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat), np.copy(param)) lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1)) m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t m_t[indices] = m_t_slice v_t[indices] = v_t_slice v_hat_t = np.maximum(vhat_t, v_t) v_hat_t_slice = v_hat_t[indices] param_t_slice = param[indices] - ( lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon))) param_t[indices] = param_t_slice return param_t, m_t, v_t, vhat_t def get_beta_accumulators(opt, dtype): local_step = tf.cast(opt.iterations + 1, dtype) beta_1_t = tf.cast(opt._get_hyper("beta_1"), dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_t = tf.cast(opt._get_hyper("beta_2"), dtype) beta_2_power = tf.pow(beta_2_t, local_step) return (beta_1_power, beta_2_power) class AdamOptimizerTest(tf.test.TestCase, parameterized.TestCase): def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np[grads0_np_indices]), tf.constant(grads0_np_indices), tf.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np[grads1_np_indices]), tf.constant(grads1_np_indices), tf.constant([3])) opt = adam.Adam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSparseDevicePlacement(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for index_dtype in [tf.int32, tf.int64]: with tf.Graph().as_default(), self.cached_session( force_gpu=tf.test.is_gpu_available()): # If a GPU is available, tests that all optimizer ops can be placed on # it (i.e. they have GPU kernels). var = tf.Variable([[1.0], [2.0]]) indices = tf.constant([0, 1], dtype=index_dtype) g_sum = lambda: tf.reduce_sum(tf.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adam.Adam(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) self.evaluate(tf.compat.v1.global_variables_initializer()) minimize_op.run() def testSparseRepeatedIndices(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): repeated_index_update_var = tf.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = tf.Variable( [[1.0], [2.0]], dtype=dtype) grad_repeated_index = tf.IndexedSlices( tf.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices( tf.constant( [0.2], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) repeated_update = adam.Adam().apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adam.Adam().apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) def doTestBasic(self, use_callable_params=False): for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = lambda: 0.001 beta1 = lambda: 0.9 beta2 = lambda: 0.999 epsilon = lambda: 1e-8 if not use_callable_params: learning_rate = learning_rate() beta1 = beta1() beta2 = beta2() epsilon = epsilon() opt = adam.Adam(learning_rate=learning_rate) if not tf.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if not tf.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testResourceBasic(self): self.doTestBasic() @combinations.generate(combinations.combine(mode=["eager"])) def testBasicCallableParams(self): self.doTestBasic(use_callable_params=True) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithAmsgrad(self): for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adam.Adam(amsgrad=True) if not tf.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if not tf.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad( var0_np, grads0_np, t, m0, v0, v0hat) var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad( var1_np, grads1_np, t, m1, v1, v1hat) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testSparseWithAmsgrad(self): # dtypes.half does not work on gpu + eager. for dtype in [tf.float32, tf.float64]: with self.cached_session(): m0 = np.array([[0.0], [0.0]]) v0 = np.array([[0.0], [0.0]]) v0hat = np.array([[0.0], [0.0]]) indices_np = np.array([1]) indices = tf.constant(indices_np, dtype=tf.int32) var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype) repeated_index_update_var = tf.Variable(var0_np, dtype=dtype) aggregated_update_var = tf.Variable(var0_np, dtype=dtype) grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype) grad_repeated_index = tf.IndexedSlices( tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices(grads0_np, indices, tf.constant([2, 1])) opt_repeated = adam.Adam(amsgrad=True) opt_aggregated = adam.Adam(amsgrad=True) if not tf.executing_eagerly(): repeated_update = opt_repeated.apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = opt_aggregated.apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) for t in range(3): if not tf.executing_eagerly(): self.evaluate(repeated_update) self.evaluate(aggregated_update) else: opt_repeated.apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) opt_aggregated.apply_gradients( [(grad_aggregated, aggregated_update_var)]) var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad( var0_np, indices_np, grads0_np, t, m0, v0, v0hat) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(aggregated_update_var)) self.assertAllCloseAccordingToType( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) def testBasicWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.001 beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-7 decay = 0.5 opt = adam.Adam( learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): self.evaluate(update) lr_np = learning_rate / (1 + decay * t) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0, lr=lr_np) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1, lr=lr_np) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testBasicWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.001 decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-7 opt = adam.Adam( learning_rate=lr_schedule, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): self.evaluate(update) lr_np = learning_rate / (1 + decay * t) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0, lr=lr_np) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1, lr=lr_np) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adam.Adam(tf.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adam.Adam() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of intertwined Adam1 and Adam2. for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if t % 2 == 0: update1.run() else: update2.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["eager"])) def testSlotsUniqueEager(self): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = adam.Adam(1.) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertLen(set(v.ref() for v in opt.variables()), 5) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) def testSetWeightsFromV1AdamWithoutMinimize(self): keras_v1_adam = optimizer_v1.Adam() keras_v2_adam = adam.Adam() keras_v2_adam.set_weights(keras_v1_adam.get_weights()) keras_v1_iteration = keras_v1_adam.iterations keras_v2_iteration = keras_v2_adam.iterations self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual( self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration)) def testConstructAdamWithLR(self): opt = adam.Adam(lr=1.0) opt_2 = adam.Adam(learning_rate=0.1, lr=1.0) opt_3 = adam.Adam(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) class NonFusedAdamOptimizerTest(tf.test.TestCase, parameterized.TestCase): def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np[grads0_np_indices]), tf.constant(grads0_np_indices), tf.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np[grads1_np_indices]), tf.constant(grads1_np_indices), tf.constant([3])) opt = adam.NonFusedAdam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of NonFusedAdam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSparseDevicePlacement(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for index_dtype in [tf.int32, tf.int64]: with tf.Graph().as_default(), self.cached_session( force_gpu=tf.test.is_gpu_available()): # If a GPU is available, tests that all optimizer ops can be placed on # it (i.e. they have GPU kernels). var = tf.Variable([[1.0], [2.0]]) indices = tf.constant([0, 1], dtype=index_dtype) g_sum = lambda: tf.reduce_sum(tf.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adam.NonFusedAdam(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) self.evaluate(tf.compat.v1.global_variables_initializer()) minimize_op.run() def testSparseRepeatedIndices(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): repeated_index_update_var = tf.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = tf.Variable( [[1.0], [2.0]], dtype=dtype) grad_repeated_index = tf.IndexedSlices( tf.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices( tf.constant( [0.2], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) repeated_update = adam.NonFusedAdam().apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adam.NonFusedAdam().apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) def doTestBasic(self, use_callable_params=False): for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = lambda: 0.001 beta1 = lambda: 0.9 beta2 = lambda: 0.999 epsilon = lambda: 1e-8 if not use_callable_params: learning_rate = learning_rate() beta1 = beta1() beta2 = beta2() epsilon = epsilon() opt = adam.NonFusedAdam(learning_rate=learning_rate) if not tf.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of NonFusedAdam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if not tf.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4) self.assertAllCloseAccordingToType( var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testResourceBasic(self): self.doTestBasic() @combinations.generate(combinations.combine(mode=["eager"])) def testBasicCallableParams(self): self.doTestBasic(use_callable_params=True) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithAmsgrad(self): for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adam.NonFusedAdam(amsgrad=True) if not tf.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of NonFusedAdam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if not tf.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad( var0_np, grads0_np, t, m0, v0, v0hat) var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad( var1_np, grads1_np, t, m1, v1, v1hat) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4) self.assertAllCloseAccordingToType( var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testSparseWithAmsgrad(self): # dtypes.half does not work on gpu + eager. for dtype in [tf.float32, tf.float64]: with self.cached_session(): m0 = np.array([[0.0], [0.0]]) v0 = np.array([[0.0], [0.0]]) v0hat = np.array([[0.0], [0.0]]) indices_np = np.array([1]) indices = tf.constant(indices_np, dtype=tf.int32) var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype) repeated_index_update_var = tf.Variable(var0_np, dtype=dtype) aggregated_update_var = tf.Variable(var0_np, dtype=dtype) grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype) grad_repeated_index = tf.IndexedSlices( tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices(grads0_np, indices, tf.constant([2, 1])) opt_repeated = adam.NonFusedAdam(amsgrad=True) opt_aggregated = adam.NonFusedAdam(amsgrad=True) if not tf.executing_eagerly(): repeated_update = opt_repeated.apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = opt_aggregated.apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) for t in range(3): if not tf.executing_eagerly(): self.evaluate(repeated_update) self.evaluate(aggregated_update) else: opt_repeated.apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) opt_aggregated.apply_gradients( [(grad_aggregated, aggregated_update_var)]) var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad( var0_np, indices_np, grads0_np, t, m0, v0, v0hat) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(aggregated_update_var)) self.assertAllCloseAccordingToType( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) def testBasicWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.001 beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-7 decay = 0.5 opt = adam.NonFusedAdam( learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of NonFusedAdam for t in range(3): self.evaluate(update) lr_np = learning_rate / (1 + decay * t) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0, lr=lr_np) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1, lr=lr_np) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testBasicWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.001 decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-7 opt = adam.NonFusedAdam( learning_rate=lr_schedule, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 3 steps of NonFusedAdam for t in range(3): self.evaluate(update) lr_np = learning_rate / (1 + decay * t) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0, lr=lr_np) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1, lr=lr_np) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adam.NonFusedAdam(tf.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of NonFusedAdam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adam.NonFusedAdam() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of intertwined NonFusedAdam1 and NonFusedAdam2. for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if t % 2 == 0: update1.run() else: update2.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) if __name__ == "__main__": tf.test.main()
42,782
42.835041
100
py
keras
keras-master/keras/optimizer_v2/adadelta_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adadelta Optimizer.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import combinations from keras.optimizer_v2 import adadelta _DATA_TYPES = [ tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128 ] class AdadeltaOptimizerTest(tf.test.TestCase, parameterized.TestCase): def doTestBasic(self, use_resource=False, use_callable_params=False): num_updates = 4 # number of ADADELTA steps to perform for dtype in _DATA_TYPES: for grad in [0.2, 0.1, 0.01]: for lr in [1.0, 0.5, 0.1]: var0_init = [1.0, 2.0] var1_init = [3.0, 4.0] if use_resource: var0 = tf.Variable(var0_init, dtype=dtype) var1 = tf.Variable(var1_init, dtype=dtype) else: var0 = tf.Variable(var0_init, dtype=dtype) var1 = tf.Variable(var1_init, dtype=dtype) grads = tf.constant([grad, grad], dtype=dtype) accum = 0.0 accum_update = 0.0 # ADADELTA gradient optimizer rho = 0.95 epsilon = 1e-8 if use_callable_params: adadelta_opt = adadelta.Adadelta( learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop rho=lambda: rho, # pylint: disable=cell-var-from-loop epsilon=epsilon) # pylint: disable=cell-var-from-loop else: adadelta_opt = adadelta.Adadelta( learning_rate=lr, rho=rho, epsilon=epsilon) if not tf.executing_eagerly(): adadelta_update = adadelta_opt.apply_gradients( zip([grads, grads], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Assign slots slot = [None] * 2 slot_update = [None] * 2 slot[0] = adadelta_opt.get_slot(var0, "accum_grad") self.assertEqual(slot[0].shape, var0.shape) slot_update[0] = adadelta_opt.get_slot(var0, "accum_var") self.assertEqual(slot_update[0].shape, var0.shape) slot[1] = adadelta_opt.get_slot(var1, "accum_grad") self.assertEqual(slot[1].shape, var1.shape) slot_update[1] = adadelta_opt.get_slot(var1, "accum_var") self.assertEqual(slot_update[1].shape, var1.shape) # Fetch params to validate initial values self.assertAllClose(var0_init, self.evaluate(var0)) self.assertAllClose(var1_init, self.evaluate(var1)) update = [None] * num_updates tot_update = 0 for step in range(num_updates): # Run adadelta update for comparison if not tf.executing_eagerly(): self.evaluate(adadelta_update) else: adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1])) # Perform initial update without previous accum values accum = accum * rho + (grad**2) * (1 - rho) update[step] = ( np.sqrt(accum_update + epsilon) * (1. / np.sqrt(accum + epsilon)) * grad) accum_update = ( accum_update * rho + (update[step]**2) * (1.0 - rho)) tot_update += update[step] * lr if not tf.executing_eagerly(): # Check that the accumulators have been updated # TODO(lxuechen): This is hard to test in eager mode for slot_idx in range(2): self.assertAllCloseAccordingToType( np.array([accum, accum], dtype=dtype.as_numpy_dtype(0)), self.evaluate(slot[slot_idx]), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [accum_update, accum_update], dtype=dtype.as_numpy_dtype(0)), self.evaluate(slot_update[slot_idx]), rtol=1e-5) # Check that the parameters have been updated self.assertAllCloseAccordingToType( np.array( [var0_init[0] - tot_update, var0_init[1] - tot_update], dtype=dtype.as_numpy_dtype(0)), self.evaluate(var0), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [var1_init[0] - tot_update, var1_init[1] - tot_update], dtype=dtype.as_numpy_dtype(0)), self.evaluate(var1), rtol=1e-5) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testResourceBasic(self): self.doTestBasic(use_resource=True) @combinations.generate(combinations.combine(mode=["eager"])) def testBasicCallableParams(self): self.doTestBasic(use_resource=True, use_callable_params=True) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0)) def testConstructAdadeltaWithLR(self): opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.) opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0) opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) def testConstructAdadeltaWithEpsilonValues(self): opt = adadelta.Adadelta(epsilon=None) self.assertEqual(opt.epsilon, 1e-7) opt = adadelta.Adadelta(epsilon=1e-8) self.assertEqual(opt.epsilon, 1e-8) if __name__ == "__main__": tf.test.main()
7,408
38.620321
114
py
keras
keras-master/keras/optimizer_v2/optimizer_v2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for OptimizerV2.""" import tensorflow.compat.v2 as tf import collections from absl.testing import parameterized import numpy as np import keras from tensorflow.python.framework import test_util from keras import backend from keras import callbacks from keras import combinations from keras import keras_parameterized from keras import losses from keras import optimizer_v1 from keras import testing_utils from keras.engine import input_layer from keras.engine import sequential from keras.engine import training from keras.layers import core from keras.optimizer_v2 import adadelta from keras.optimizer_v2 import adagrad from keras.optimizer_v2 import adam from keras.optimizer_v2 import adamax from keras.optimizer_v2 import ftrl from keras.optimizer_v2 import gradient_descent from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import nadam from keras.optimizer_v2 import optimizer_v2 from keras.optimizer_v2 import rmsprop from keras.utils import np_utils _DATA_TYPES = [tf.half, tf.float32, tf.float64] # TODO(b/141710709): complex support in NVCC and ROCM. if (not test_util.IsBuiltWithNvcc() and not tf.test.is_built_with_rocm()): _DATA_TYPES += [tf.complex64, tf.complex128] class OptimizerTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testBasic(self): for dtype in _DATA_TYPES: with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(3.0) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, var_list=[var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params self.assertAllClose([-14., -13.], self.evaluate(var0)) self.assertAllClose([-6., -5.], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testAdaptiveLearningRate(self): for dtype in _DATA_TYPES: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) def loss(): return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(1.0) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params # var0 = [1., 2.] - 1.0 * [5, 5] self.assertAllClose([-4., -3.], self.evaluate(var0)) # var1 = [3., 4.] - 1.0 * [3, 3] self.assertAllClose([0., 1.], self.evaluate(var1)) sgd.learning_rate = 0.5 if tf.executing_eagerly(): sgd.minimize(loss, [var0, var1]) else: self.evaluate(opt_op) # Validate updated params # var0 = [-4., -3.] - 0.5 * [5, 5] self.assertAllClose([-6.5, -5.5], self.evaluate(var0)) # var1 = [0., 1.] - 0.5 * [3, 3] self.assertAllClose([-1.5, -0.5], self.evaluate(var1)) sgd.learning_rate = learning_rate_schedule.InverseTimeDecay( 0.5, decay_steps=1.0, decay_rate=0.5) if tf.executing_eagerly(): sgd.minimize(loss, [var0, var1]) else: self.evaluate(opt_op) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testPrecomputedGradient(self): for dtype in _DATA_TYPES: with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop grad_loss = tf.constant([42, -42], dtype=dtype) sgd = gradient_descent.SGD(3.0) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)], self.evaluate(var0)) self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoGradients(self): for dtype in _DATA_TYPES: with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop sgd_op = gradient_descent.SGD(3.0) with self.assertRaisesRegex(ValueError, 'No gradients'): # var1 has no gradient sgd_op.minimize(loss, var_list=[var1]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoGradientsForAnyVariables_Minimize(self): for dtype in _DATA_TYPES: with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) loss = lambda: tf.constant(5.0) sgd_op = gradient_descent.SGD(3.0) with self.assertRaisesRegex(ValueError, 'No gradients provided for any variable'): sgd_op.minimize(loss, var_list=[var0, var1]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoGradientsForAnyVariables_ApplyGradients(self): for dtype in _DATA_TYPES: with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) sgd_op = gradient_descent.SGD(3.0) with self.assertRaisesRegex(ValueError, 'No gradients provided for any variable'): sgd_op.apply_gradients([(None, var0), (None, var1)]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testGradientsAsVariables(self): for i, dtype in enumerate(_DATA_TYPES): with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(3.0) grads_and_vars = sgd._compute_gradients(loss, [var0, var1]) # Convert gradients to tf.Variables converted_grads = [ tf.Variable( tf.zeros([2], dtype), name='c_%d_%d' % (i, j)) for j, gv in enumerate(grads_and_vars) ] convert_ops = [ tf.compat.v1.assign(converted_grads[j], gv[0]) for j, gv in enumerate(grads_and_vars) ] # Run convert_ops to achieve the gradients converting self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(convert_ops) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer converted_grads_and_vars = list(zip(converted_grads, [var0, var1])) opt_op = sgd.apply_gradients(converted_grads_and_vars) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(convert_ops) self.evaluate(opt_op) # Validate updated params self.assertAllClose([-14., -13.], self.evaluate(var0)) self.assertAllClose([-6., -5.], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testComputeGradientsWithTensors(self): with testing_utils.use_gpu(): x = tf.convert_to_tensor(1.0) def f(): return x * x sgd = gradient_descent.SGD(3.0) grads_and_vars = sgd._compute_gradients(f, [x]) self.assertLen(grads_and_vars, 1) grad, x_as_var = grads_and_vars[0] self.assertIs(x, x_as_var) self.assertEqual(2.0, self.evaluate(grad)) with self.assertRaises(NotImplementedError): sgd.apply_gradients(grads_and_vars) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testConstraint(self): constraint_01 = lambda x: tf.clip_by_value(x, -0.1, 0.) constraint_0 = lambda x: tf.clip_by_value(x, 0., 1.) with testing_utils.use_gpu(): var0 = tf.Variable([1.0, 2.0], constraint=constraint_01) var1 = tf.Variable([3.0, 4.0], constraint=constraint_0) loss = lambda: 5 * var0 + 3 * var1 sgd = gradient_descent.SGD(3.0) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, var_list=[var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params self.assertAllClose([-0.1, -0.1], self.evaluate(var0)) self.assertAllClose([0., 0.], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testIterationWithoutMinimize(self): with testing_utils.use_gpu(): sgd = gradient_descent.SGD(3.0) self.evaluate(sgd.iterations.initializer) self.assertEqual(0, self.evaluate(sgd.iterations)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testConfig(self): with testing_utils.use_gpu(): opt = gradient_descent.SGD(learning_rate=1.0) config = opt.get_config() opt2 = gradient_descent.SGD.from_config(config) lr = opt._get_hyper('learning_rate') lr2 = opt2._get_hyper('learning_rate') self.evaluate(tf.compat.v1.global_variables_initializer()) # assert both are equal float values. self.assertEqual(self.evaluate(lr), self.evaluate(lr2)) var0 = tf.Variable([[1.0], [2.0]], dtype=tf.float32) loss = lambda: 3 * var0 # learning rate variable created when calling minimize. opt.minimize(loss, [var0]) opt3 = gradient_descent.SGD.from_config(config) lr3 = opt3._get_hyper('learning_rate') self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(lr), self.evaluate(lr3)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testConfigWithLearningRateDecay(self): with testing_utils.use_gpu(): var0 = tf.Variable([[1.0], [2.0]], dtype=tf.float32) for decay_schedule in [ learning_rate_schedule.InverseTimeDecay( 0.5, decay_steps=1.0, decay_rate=0.1), learning_rate_schedule.PiecewiseConstantDecay( [5], [1., .5]) ]: step = 10 opt = gradient_descent.SGD(decay_schedule) config = opt.get_config() opt2 = gradient_descent.SGD.from_config(config) # assert both are equal float values. self.assertAllEqual( decay_schedule(step), opt._get_hyper('learning_rate')(step)) self.assertAllEqual( decay_schedule(step), opt2._get_hyper('learning_rate')(step)) loss = lambda: 3 * var0 # learning rate variable is created when calling minimize. opt.minimize(loss, [var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) config = opt.get_config() opt3 = gradient_descent.SGD.from_config(config) self.assertAllEqual( self.evaluate(opt._get_hyper('learning_rate')(step)), opt3._get_hyper('learning_rate')(step)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testGradClipValue(self): with testing_utils.use_gpu(): var = tf.Variable([1.0, 2.0]) loss = lambda: 3 * var opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0) opt_op = opt.minimize(loss, [var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertAllClose([0., 1.], self.evaluate(var)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testGradClipNorm(self): with testing_utils.use_gpu(): var = tf.Variable([1.0]) loss = lambda: 3 * var opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0) opt_op = opt.minimize(loss, [var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertAllClose([0.], self.evaluate(var)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testGradGlobalClipNorm(self): with testing_utils.use_gpu(): # l2 norm is 5.0 var1 = tf.Variable([1.0]) var2 = tf.Variable([2.0]) loss = lambda: 3 * var1 + 4 * var2 opt = gradient_descent.SGD(learning_rate=1.0, global_clipnorm=2.0) opt_op = opt.minimize(loss, [var1, var2]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # grad1 = 3.0 * 2.0 / 5.0 = 1.2 self.assertAllClose([-.2], self.evaluate(var1)) # grad2 = 4.0 * 2.0 / 5.0 = 1.6 self.assertAllClose([.4], self.evaluate(var2)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInvalidClipNorm(self): with self.assertRaisesRegex(ValueError, '>= 0'): gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0) @combinations.generate( combinations.combine( mode=['graph', 'eager'], clip_type=['clipnorm', 'global_clipnorm', 'clipvalue'])) def testConfigWithCliping(self, clip_type): opt = gradient_descent.SGD(learning_rate=1.0, **{clip_type: 2.0}) config = opt.get_config() opt = gradient_descent.SGD.from_config(config) self.assertEqual(getattr(opt, clip_type), 2.0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInvalidKwargs(self): with self.assertRaisesRegex(TypeError, 'Unexpected keyword argument'): gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testWeights(self): with testing_utils.use_gpu(): opt1 = adam.Adam(learning_rate=1.0) var1 = tf.Variable([1.0, 2.0], dtype=tf.float32) loss1 = lambda: 3 * var1 opt_op_1 = opt1.minimize(loss1, [var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) config = opt1.get_config() opt2 = adam.Adam.from_config(config) var2 = tf.Variable([1.0, 2.0], dtype=tf.float32) loss2 = lambda: 3 * var2 opt_op_2 = opt2.minimize(loss2, [var2]) weights = opt1.get_weights() # Assert set_weights and both variables get updated to same value. self.evaluate(tf.compat.v1.global_variables_initializer()) opt2.set_weights(weights) self.evaluate([opt_op_1, opt_op_2]) self.assertAllClose(self.evaluate(var1), self.evaluate(var2)) self.assertEqual(1, self.evaluate(opt1.iterations)) self.assertEqual(1, self.evaluate(opt2.iterations)) var3 = tf.Variable([1.0, 2.0, 3.0], dtype=tf.float32) var4 = tf.Variable([4.0, 5.0, 6.0], dtype=tf.float32) loss3 = lambda: 3 * var3 + 5 * var4 opt_op_3 = opt1.minimize(loss3, [var3, var4]) # Assert set_weights with ValueError since weight list does not match. self.evaluate(tf.compat.v1.global_variables_initializer()) weights = opt1.get_weights() with self.assertRaisesRegex(ValueError, 'but the optimizer was'): opt2.set_weights(weights) # Assert set_weights and variables get updated to same value. var5 = tf.Variable([1.0, 2.0, 3.0], dtype=tf.float32) var6 = tf.Variable([4.0, 5.0, 6.0], dtype=tf.float32) loss4 = lambda: 3 * var5 + 5 * var6 opt_op_4 = opt2.minimize(loss4, [var5, var6]) self.evaluate(tf.compat.v1.global_variables_initializer()) opt2.set_weights(weights) self.evaluate([opt_op_3, opt_op_4]) self.assertAllClose( self.evaluate([var3, var4]), self.evaluate([var5, var6])) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testGettingHyperParameters(self): with self.test_session(): opt = adam.Adam(learning_rate=1.0) var = tf.Variable([1.0, 2.0], dtype=tf.float32) loss = lambda: 3 * var opt_op = opt.minimize(loss, [var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) lr = self.evaluate(opt.lr) self.assertEqual(1.0, lr) opt.lr = 2.0 lr = self.evaluate(opt.lr) self.assertEqual(2.0, lr) self.evaluate(opt.lr.assign(3.0)) lr = self.evaluate(opt.lr) self.assertEqual(3.0, lr) with self.assertRaises(AttributeError): opt.not_an_attr += 3 @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testGettingHyperParametersWithLrInConstructor(self): with self.test_session(): opt = gradient_descent.SGD(lr=3.0) var = tf.Variable([1.0, 2.0], dtype=tf.float32) loss = lambda: 3 * var opt_op = opt.minimize(loss, [var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt.learning_rate, tf.Variable) lr = self.evaluate(opt.lr) self.assertEqual(3.0, lr) opt.lr = 2.0 lr = self.evaluate(opt.lr) self.assertEqual(2.0, lr) self.evaluate(opt.lr.assign(4.0)) lr = self.evaluate(opt.lr) self.assertEqual(4.0, lr) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testDir(self): opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.1) dir_result = set(dir(opt)) self.assertIn('learning_rate', dir_result) # Hyperparameter self.assertIn('lr', dir_result) # Hyperparameter self.assertIn('momentum', dir_result) # Hyperparameter self.assertIn('nesterov', dir_result) # Attribute self.assertIn('minimize', dir_result) # Attribute @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testOptimizerWithKerasModel(self): a = input_layer.Input(shape=(3,), name='input_a') b = input_layer.Input(shape=(3,), name='input_b') dense = core.Dense(4, name='dense') c = dense(a) d = dense(b) e = core.Dropout(0.5, name='dropout')(c) model = training.Model([a, b], [d, e]) optimizer = gradient_descent.SGD(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) model.fit([input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testOptimizerWithCallbacks(self): np.random.seed(1331) input_np = np.random.random((10, 3)) output_np = np.random.random((10, 4)) a = input_layer.Input(shape=(3,), name='input_a') model = sequential.Sequential() model.add(core.Dense(4, kernel_initializer='zeros', name='dense')) model.add(core.Dropout(0.5, name='dropout')) model(a) optimizer = gradient_descent.SGD(learning_rate=0.1) model.compile(optimizer, loss='mse', metrics=['mae']) # This does not reduce the LR after the first epoch (due to low delta). cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5) ] model.fit( input_np, output_np, batch_size=10, validation_data=(input_np, output_np), callbacks=cbks, epochs=2, verbose=0) self.assertAllClose( float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4) # This should reduce the LR after the first epoch (due to high delta). cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5) ] model.fit( input_np, output_np, batch_size=10, validation_data=(input_np, output_np), callbacks=cbks, epochs=2, verbose=2) self.assertAllClose( float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4) def testOptimizerSetIterations(self): global_step = tf.compat.v1.train.get_or_create_global_step() opt = adam.Adam(learning_rate=1.0) opt.iterations = global_step var = tf.Variable([1.0, 2.0], dtype=tf.float32) self.evaluate(tf.compat.v1.global_variables_initializer()) init_step_value = self.evaluate(global_step) loss = lambda: 3 * var opt_op = opt.minimize(loss, [var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) new_step_value = self.evaluate(global_step) self.assertEqual(new_step_value, init_step_value + 1) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testOptimizerWithCallableVarList(self): train_samples = 20 input_dim = 1 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = np_utils.to_categorical(y) num_hidden = 1 model = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes) opt = adam.Adam() loss = lambda: losses.mean_squared_error(model(x), y) var_list = lambda: model.trainable_weights with self.assertRaisesRegex( ValueError, 'Weights for model .* have not yet been created'): var_list() train_op = opt.minimize(loss, var_list) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual( [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm'))) self.evaluate(train_op) self.assertNotEqual( [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm'))) self.assertLen(var_list(), 4) def testVarKey(self): with tf.compat.v1.get_default_graph().as_default(): a = tf.Variable([1., 2.], name='var') b = tf.Variable([1.], name='var') self.assertTrue(a._in_graph_mode) self.assertTrue(b._in_graph_mode) var_key = optimizer_v2._var_key(a) self.assertEqual('var', var_key) var_key = optimizer_v2._var_key(b) self.assertEqual('var_1', var_key) def testVarName(self): with tf.compat.v1.get_default_graph().as_default(): var = tf.Variable([1., 2.], name='var') loss = var + 1. opt = adam.Adam() opt.get_updates(loss, [var]) opt_vars = opt.variables() self.assertLen(opt_vars, 3) self.assertEqual('Adam/iter:0', opt_vars[0].name) self.assertEqual('Adam/var/m:0', opt_vars[1].name) var_2 = tf.Variable([1., 2.], name='var_2') loss = var_2 + 1. with backend.name_scope('outter'): opt.get_updates(loss, [var_2]) opt_vars = opt.variables() self.assertLen(opt_vars, 5) self.assertEqual('outter/Adam/var_2/m:0', opt_vars[3].name) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testEmptyVarList(self): opt = gradient_descent.SGD(1.) opt.minimize(lambda: tf.constant(1.), []) opt.apply_gradients([]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testAggregationTrue(self): # Test that experimental_aggregate_gradients=True works without distributed # strategy. var = tf.Variable([1., 2.]) opt = gradient_descent.SGD(3.0) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose([1., 2.], self.evaluate(var)) opt_op = opt.apply_gradients([([0.1, 0.1], var)], experimental_aggregate_gradients=True) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertAllClose([0.7, 1.7], self.evaluate(var)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testAggregationFalse(self): # Test that experimental_aggregate_gradients=False works without distributed # strategy. var = tf.Variable([1., 2.]) opt = gradient_descent.SGD(3.0) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose([1., 2.], self.evaluate(var)) opt_op = opt.apply_gradients([([0.1, 0.1], var)], experimental_aggregate_gradients=False) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertAllClose([0.7, 1.7], self.evaluate(var)) @combinations.generate(combinations.combine(mode=['eager'])) def testRestoringIterationsWithoutAnOptimizer(self): opt = gradient_descent.SGD(3.0) opt.iterations.assign(5) checkpoint = tf.train.Checkpoint(optimizer=opt) path = checkpoint.save(self.get_temp_dir()) # Following verifies that the `iterations` can be restored with the absence # of an `Optimizer` object (using a `Checkpoint` as a placeholder). iterations_var = tf.Variable(0, dtype=tf.int64) optimizer_checkpoint = tf.train.Checkpoint(iter=iterations_var) checkpoint_to_restore = tf.train.Checkpoint( optimizer=optimizer_checkpoint) checkpoint_to_restore.restore(path) self.assertEqual(5, self.evaluate(iterations_var)) @combinations.generate(combinations.combine(mode=['eager'])) def testSlotWithNonstandardShapeRestoresBasedOnCheckpoint(self): # First create an optimizer and a slot variable with a non-standard shape. x = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=tf.float32) slot_shape = [2, 1] optimizer_1 = optimizer_v2.OptimizerV2(name='test') optimizer_1.add_slot(x, 'test_slot', 'ones', shape=slot_shape) # Then save the variable and optimizer to a checkpoint. checkpoint_1 = tf.train.Checkpoint(var=x, optimizer=optimizer_1) checkpoint_path = checkpoint_1.save(self.get_temp_dir()) # Create a new optimizer and call restore on it (and x) optimizer_2 = optimizer_v2.OptimizerV2(name='test') checkpoint_2 = tf.train.Checkpoint(var=x, optimizer=optimizer_2) checkpoint_2.restore(checkpoint_path) self.assertEqual(slot_shape, optimizer_2.get_slot(x, 'test_slot').shape.as_list()) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_gradient_aggregator(self): def gradient_aggregator(grads_and_vars): # Simulate an all-reduce where the other replica has zeros for gradients, # by dividing each gradient by 2. grads = [g for g, _ in grads_and_vars] vars = [v for _, v in grads_and_vars] # pylint: disable=redefined-builtin all_reduced_grads = [g / 2 for g in grads] return list(zip(all_reduced_grads, vars)) var = tf.Variable(2.0) sgd = gradient_descent.SGD(1.0, gradient_aggregator=gradient_aggregator) loss = lambda: 2 * var opt_op = sgd.minimize(loss, var_list=[var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertEqual(self.evaluate(var), 1.0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_override_aggregate_gradients(self): class MyOptimizer(gradient_descent.SGD): def _aggregate_gradients(self, grads_and_vars): # Simulate an all-reduce where the other replica has zeros for # gradients, by dividing each gradient by 2. grads = [g for g, _ in grads_and_vars] vars = [v for _, v in grads_and_vars] # pylint: disable=redefined-builtin all_reduced_grads = [g / 2 for g in grads] return list(zip(all_reduced_grads, vars)) var = tf.Variable(2.0) sgd = MyOptimizer(1.0) loss = lambda: 2 * var opt_op = sgd.minimize(loss, var_list=[var]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertEqual(self.evaluate(var), 1.0) @keras_parameterized.run_all_keras_modes class OptimizersCompatibilityTest(keras_parameterized.TestCase): def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') np.random.seed(1331) with testing_utils.use_gpu(): train_samples = 20 input_dim = 3 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = np_utils.to_categorical(y) num_hidden = 5 model_v1 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_v1.compile( opt_v1, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) model_v1.fit(x, y, batch_size=5, epochs=1) model_v2 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_v2.set_weights(model_v1.get_weights()) model_v2.compile( opt_v2, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) if not tf.compat.v1.executing_eagerly_outside_functions(): model_v2._make_train_function() if test_weights: opt_v2.set_weights(opt_v1.get_weights()) hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False) hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False) self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(), rtol=1e-5, atol=1e-5) self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'], rtol=1e-5, atol=1e-5) def testAdadeltaCompatibility(self): opt_v1 = optimizer_v1.Adadelta(lr=0.01) opt_v2 = adadelta.Adadelta(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2) def testAdagradCompatibility(self): opt_v1 = optimizer_v1.Adagrad(lr=0.01) opt_v2 = adagrad.Adagrad(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2) def testAdamCompatibility(self): opt_v1 = optimizer_v1.Adam() opt_v2 = adam.Adam() self._testOptimizersCompatibility(opt_v1, opt_v2) def testAdamaxCompatibility(self): opt_v1 = optimizer_v1.Adamax(lr=0.01) opt_v2 = adamax.Adamax(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2) def testNadamCompatibility(self): opt_v1 = optimizer_v1.Nadam(lr=0.001) opt_v2 = nadam.Nadam(learning_rate=0.001) self._testOptimizersCompatibility(opt_v1, opt_v2) def testMomentumCompatibility(self): opt_v1 = optimizer_v1.SGD(lr=0.01, momentum=0.9) opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9) self._testOptimizersCompatibility(opt_v1, opt_v2) def testRMSpropCompatibility(self): opt_v1 = optimizer_v1.RMSprop() opt_v2 = rmsprop.RMSprop() self._testOptimizersCompatibility(opt_v1, opt_v2) def testSGDCompatibility(self): opt_v1 = optimizer_v1.SGD(lr=0.01) opt_v2 = gradient_descent.SGD(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2, False) def testNumericEquivalenceForNesterovMomentum(self): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') np.random.seed(1331) with testing_utils.use_gpu(): train_samples = 20 input_dim = 3 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = np_utils.to_categorical(y) num_hidden = 5 model_k_v1 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2.set_weights(model_k_v1.get_weights()) model_tf = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_tf.set_weights(model_k_v2.get_weights()) opt_k_v1 = optimizer_v1.SGD(momentum=0.9, nesterov=True) opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True) opt_tf = tf.compat.v1.train.MomentumOptimizer( learning_rate=0.01, momentum=0.9, use_nesterov=True) model_k_v1.compile( opt_k_v1, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) model_k_v2.compile( opt_k_v2, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) model_tf.compile( opt_tf, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False) hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False) hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False) self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights()) self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights()) self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights()) self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss']) self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss']) def testNumericEquivalenceForAmsgrad(self): if tf.executing_eagerly(): self.skipTest( 'v1 optimizer does not run in eager mode') np.random.seed(1331) with testing_utils.use_gpu(): train_samples = 20 input_dim = 3 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = np_utils.to_categorical(y) num_hidden = 5 model_k_v1 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2.set_weights(model_k_v1.get_weights()) opt_k_v1 = optimizer_v1.Adam(amsgrad=True) opt_k_v2 = adam.Adam(amsgrad=True) model_k_v1.compile( opt_k_v1, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) model_k_v2.compile( opt_k_v2, loss='categorical_crossentropy', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False) hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False) self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights()) self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights()) self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss']) # Note: These tests are kept in a separate class to avoid bugs in some # distributions of Python that break AutoGraph which is used by tf.function. @combinations.generate(combinations.combine(mode=['eager'])) class OptimizerWithFunctionTest(tf.test.TestCase, parameterized.TestCase): def testBasic(self): var = tf.Variable([1.0, 2.0], dtype=tf.float32) loss = lambda: 3 * var opt = adam.Adam(learning_rate=1.0) @tf.function def fn(): opt.minimize(loss, [var]) return var self.assertAllClose([0., 1.], fn(), atol=1e-4) self.assertAllClose([-1, 0.], fn(), atol=1e-4) def testBasicWithConstantDecay(self): var = tf.Variable([1.0, 2.0], dtype=tf.float32) loss = lambda: 3 * var opt = adam.Adam(learning_rate=1.0) @tf.function def fn(): opt.minimize(loss, [var]) return var self.assertAllClose([0., 1.], fn(), atol=1e-4) self.assertAllClose([-1, 0.], fn(), atol=1e-4) def testVarKeyWithVarCreatedInEager(self): a = tf.Variable([1., 2.], name='var') b = tf.Variable([1.], name='var') @test_util.also_run_as_tf_function def var_key_test(): self.assertFalse(a._in_graph_mode) self.assertFalse(b._in_graph_mode) var_key_a = optimizer_v2._var_key(a) self.assertStartsWith(var_key_a, 'var_') var_key_b = optimizer_v2._var_key(b) self.assertStartsWith(var_key_b, 'var_') self.assertNotEqual(var_key_a, var_key_b) var_key_test() def testLearningRateDecayUsedInTwoFunctions(self): a = tf.Variable([1., 2.], name='var') b = tf.Variable([1.], name='var') learning_rate_decay = learning_rate_schedule.InverseTimeDecay( 0.5, decay_steps=1.0, decay_rate=0.5) opt = adam.Adam(learning_rate=learning_rate_decay) loss_a = lambda: 3 * a loss_b = lambda: 2 * b @tf.function def fn_a(): opt.minimize(loss_a, [a]) return a @tf.function def fn_b(): opt.minimize(loss_b, [b]) return b fn_a() fn_b() _NUM_LEARNERS = 50 APPLY_SCOPE = 'debug_apply' ALLOWLIST = [ # optimizer_v2._deduplicate_indexed_slices contains an indexed slice: # array_ops.shape(unique_indices)[0] # which winds up expanding to [0:1:1] thereby creating three constants # to represent the indices. ('embeddings/strided_slice/stack', 'Const'), ] def get_inputs(op): op_inputs = list(op.inputs) + op.control_inputs names = [i.name for i in op_inputs] op_inputs = [getattr(i, 'op', i) for i in op_inputs] return op_inputs, names def strip_name(node): if 'Placeholder' in node.op: return node.name = '' def topological_sort(graph): graph_ops = graph.get_operations() sources = [] result = [] inputs = {} outputs = collections.defaultdict(set) for op in graph_ops: op_inputs = get_inputs(op)[0] if not op_inputs: sources.append(op) inputs[op] = set(op_inputs) for i in op_inputs: outputs[i].add(op) while sources: op = sources.pop() for op_output in outputs[op]: inputs[op_output].remove(op) if not inputs[op_output]: sources.append(op_output) result.append(op) # Check correctness. if len(result) != len(graph_ops): raise ValueError('Sort result has {} ops, source graph has {}.' .format(len(result), len(graph_ops))) sort_check_seen = set() for op in result: sort_check_seen.add(op) for i in get_inputs(op)[0]: assert i in sort_check_seen return result def identify_redundant_ops(graph): """Implements basic common subexpression elimination. This is not intended to replicate the graph semantics of TensorFlow Graphs (for instance it does not handle stateful op ordering), nor is it intended to replace the common subexpression elimination Grappler pass. Rather, it provides a high level sanity check that clearly redundant ops are not being created. Args: graph: The graph to be analyzed. Returns: A count of the duplicate ops and a description of the structure of each. """ sorted_ops = topological_sort(graph) duplicates = collections.defaultdict(list) unified_node_defs = {} name_map = {} for op in sorted_ops: input_names = [] for op_input, name in zip(*get_inputs(op)): input_def = op_input.node_def # Operations can have multiple outputs. We track which is used to prevent # overzealous elimination. input_def.name = name input_def.input[:] = [name_map.get(i, i) for i in input_def.input] strip_name(input_def) # NodeDef.SerializeToString() does not provide identical serialized # representations for identical NodeDefs, so we instead use string # representation as a dict key. key = repr(input_def) if key in unified_node_defs: input_names.append(unified_node_defs[key]) else: unified_node_defs[key] = op_input.name input_names.append(name) node_def = op.node_def node_def.input[:] = input_names strip_name(node_def) key = repr(node_def) duplicates[key].append(op) name_map[op.name] = duplicates[key][0].name num_duplicates = 0 duplicate_types = [] for standard_def, op_defs in duplicates.items(): # We are only interested in testing the apply method of the optimizer op_defs = [i for i in op_defs if APPLY_SCOPE in i.name] # We only check for per-apply redundant ops. if len(op_defs) < _NUM_LEARNERS: continue # Certain ops are simply not worth eliminating, and are instead simply # ignored. name, op_type = op_defs[0].name, op_defs[0].type if any(allowlisted_scope in name and op_type == allowlisted_type for allowlisted_scope, allowlisted_type in ALLOWLIST): continue num_duplicates += len(op_defs) traceback = [] for level in op_defs[0].traceback: traceback.append(' {} {}:{}'.format(level[0], level[2], level[1])) duplicate_types.append( '# Example name: {}\n# Op creation stack:\n{}\n{}'.format( op_defs[0].name, '\n'.join(traceback), standard_def)) return num_duplicates, duplicate_types def make_model(): r"""Constructs a simple ensemble of weak learners model. --------- --------- --------- --------- | Input | | Input | ... | Input | | Input | --------- --------- --------- --------- | | | | V V V V --------- --------- --------- --------- | Embed | | Embed | ... | Embed | | Embed | --------- --------- --------- --------- | | | | V V V V --------- --------- --------- --------- | Dense | | Dense | ... | Dense | | Dense | --------- --------- --------- --------- \ | | / \ | | / --------------------------------------------- | --------- | Dense | --------- This topology is chosen because it exercises both dense and sparse update paths. Returns: A model for testing optimizer coefficient reuse. """ inputs = [] intermediates = [] for _ in range(_NUM_LEARNERS): inp = keras.layers.Input(shape=(1,), dtype=tf.int32) layer = keras.layers.Embedding(1, 4)(inp) layer = keras.layers.Dense(1)(layer) inputs.append(inp) intermediates.append(layer) layer = keras.layers.Concatenate(axis=-1)(intermediates) layer = keras.layers.Dense(1)(layer) return keras.models.Model(inputs, layer) COEFFICIENT_PARAMS = ( ('Adadelta', adadelta.Adadelta, None), ('Adagrad', adagrad.Adagrad, None), ('Adam', adam.Adam, None), ('Adam_amdgrad', adam.Adam, dict(amsgrad=True)), ('Adamax', adamax.Adamax, None), ('Ftrl', ftrl.Ftrl, None), ('Ftrl_l2_shrinkage', ftrl.Ftrl, dict(l2_shrinkage_regularization_strength=0.1)), ('SGD', gradient_descent.SGD, None), ('SGD_momentum', gradient_descent.SGD, dict(momentum=0.5)), ('Nadam', nadam.Nadam, None), ('RMSprop', rmsprop.RMSprop, None), ('RMSprop_centered', rmsprop.RMSprop, dict(centered=True)), ('RMSprop_momentum', rmsprop.RMSprop, dict(momentum=0.5)), ('RMSprop_momentum_centered', rmsprop.RMSprop, dict(momentum=0.5, centered=True)), ) class OptimizerCoefficientTest(keras_parameterized.TestCase): @parameterized.named_parameters(*COEFFICIENT_PARAMS) def test_duplicate_ops(self, optimizer_class, init_kwargs=None): init_kwargs = init_kwargs or {} optimizer = optimizer_class(**init_kwargs) graph = tf.Graph() with graph.as_default(): model = make_model() trainable_variables = model.trainable_variables grads = optimizer.get_gradients(model.outputs[0], trainable_variables) with backend.name_scope(APPLY_SCOPE): optimizer.apply_gradients(zip(grads, trainable_variables)) num_duplicates, duplicate_types = identify_redundant_ops(graph) if num_duplicates: # Avoid spamming logs. if len(duplicate_types) > 3: duplicate_types = duplicate_types[:3] + ['...'] num_total = len(graph.get_operations()) raise ValueError('{} of {} ({:.1f}%) ops were duplicates:\n\n{}'.format( num_duplicates, num_total, num_duplicates / num_total * 100, '\n'.join(duplicate_types))) @parameterized.named_parameters(*COEFFICIENT_PARAMS) def test_subclass_compat(self, optimizer_class, init_kwargs=None): """Ensure that subclassed optimizers without apply_state still work.""" class SubclassedOptimizer(optimizer_class): def _resource_apply_dense(self, grad, var): # pylint: disable=useless-super-delegation return super(SubclassedOptimizer, self)._resource_apply_dense(grad, var) def _resource_apply_sparse(self, grad, var, indices): # pylint: disable=useless-super-delegation return super(SubclassedOptimizer, self)._resource_apply_sparse( grad, var, indices) init_kwargs = init_kwargs or {} optimizer = SubclassedOptimizer(**init_kwargs) graph = tf.Graph() with graph.as_default(): model = make_model() trainable_variables = model.trainable_variables grads = optimizer.get_gradients(model.outputs[0], trainable_variables) with backend.name_scope(APPLY_SCOPE): optimizer.apply_gradients(zip(grads, trainable_variables)) if __name__ == '__main__': tf.test.main()
48,254
36.847059
103
py
keras
keras-master/keras/optimizer_v2/rmsprop_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for rmsprop.""" import tensorflow.compat.v2 as tf import copy import itertools import math from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util from keras import combinations from keras import testing_utils from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import rmsprop _DATA_TYPES = [ tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128 ] _TEST_PARAM_VALUES = [ # learning_rate, rho, momentum, epsilon, centered [0.05, 0.9, 0.0, 1e-3, True], [0.05, 0.9, 0.0, 1e-3, False], [0.1, 0.9, 0.0, 1e-3, True], [0.01, 0.9, 0.0, 1e-5, True], [0.01, 0.9, 0.9, 1e-5, True], ] _TESTPARAMS = [ [data_type] + values for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES) ] class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase): def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum, epsilon, centered): rms_t = rms * rho + (1 - rho) * g * g if centered: mg_t = mg * rho + (1 - rho) * g denom_t = rms_t - mg_t * mg_t else: mg_t = mg denom_t = rms_t if momentum > 0.: mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon)) var_t = var - mom_t else: mom_t = mom var_t = var - lr * g / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom, lr, rho, momentum, epsilon, centered): mg_t = copy.deepcopy(mg) rms_t = copy.deepcopy(rms) mom_t = copy.deepcopy(mom) var_t = copy.deepcopy(var) for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue if centered: mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex] else: denom_t = rms_t[gindex] if momentum > 0.: mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t + epsilon) var_t[gindex] = var[gindex] - mom_t[gindex] else: mom_t[gindex] = mom[gindex] var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def testDense(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, dtype=dtype) var1 = tf.Variable(var1_np, dtype=dtype) grads0 = tf.constant(grads0_np, dtype=dtype) grads1 = tf.constant(grads1_np, dtype=dtype) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") mg1 = opt.get_slot(var1, "mg") else: mg0 = None mg1 = None if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) opt = rmsprop.RMSprop( learning_rate=lr_schedule, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0, centered=False).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[0., 1.]], self.evaluate(var0), atol=0.01) def testMinimizeSparseResourceVariableCentered(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred # loss = lambda: pred * pred # pylint: disable=cell-var-from-loop sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0, centered=True).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0), atol=0.01) def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([1])) grads1_np_indices = np.array([1], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([1])) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") self.assertEqual(mg0 is not None, centered) mg1 = opt.get_slot(var1, "mg") self.assertEqual(mg1 is not None, centered) else: mg0 = None mg1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy( var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy( var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["eager"])) def testCallableParams(self): for dtype in _DATA_TYPES: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) learning_rate = lambda: 2.0 rho = lambda: 0.9 momentum = lambda: 0.0 epsilon = 1.0 opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the rms accumulators where 1. So we should see a normal # update: v -= grad * learning_rate opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) ]), self.evaluate(var1)) # Step 2: the root mean square accumulators contain the previous update. opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)) ]), self.evaluate(var1)) def testConstructRMSpropWithLR(self): opt = rmsprop.RMSprop(lr=1.0) opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0) opt_3 = rmsprop.RMSprop(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) @combinations.generate(combinations.combine(mode=["eager"])) def testSlotsUniqueEager(self): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = rmsprop.RMSprop(1., momentum=0., centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and one unique slot variable for v1 and v2. self.assertLen(set({id(v) for v in opt.variables()}), 3) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertLen(set({id(v) for v in opt.variables()}), 5) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and three unique slot variables for v1 and v2 self.assertLen(set({id(v) for v in opt.variables()}), 7) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) @combinations.generate(combinations.combine(mode=["eager"])) def testMomentumProperValue(self): with self.assertRaisesRegex(ValueError, r"`momentum` must be between \[0, 1\]. " r"Received: momentum=2.5 \(of type <class " r"\'float\'>\)."): rmsprop.RMSprop(1., momentum=2.5, centered=False) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class SlotColocationTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters([True, False]) @test_util.run_gpu_only def testRunMinimizeOnGPUForCPUVariables(self, use_resource): with tf.device("/device:CPU:0"): if use_resource: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) else: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) def loss(): return 5 * var0 + 3 * var1 opt = rmsprop.RMSprop( learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0) # Fetch params to validate initial values self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step through optimizer on GPU. # Slot variables are created the first time optimizer is used on some # variable. This tests that slot variables will be colocated with the base # variable. with tf.device("/device:GPU:0"): # Note that for eager execution, minimize expects a function instead of a # Tensor. opt_op = opt.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params, All variables should have decreased. self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)), msg="updated variables: %s" % self.evaluate(var0)) self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)), msg="updated variables: %s" % self.evaluate(var1)) if __name__ == "__main__": tf.test.main()
23,974
39.635593
114
py
keras
keras-master/keras/optimizer_v2/legacy_learning_rate_decay_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for learning rate decay.""" import tensorflow.compat.v2 as tf import math from keras import combinations from keras import keras_parameterized from keras.optimizer_v2 import legacy_learning_rate_decay as learning_rate_decay @combinations.generate(combinations.combine(mode=["graph", "eager"])) class LRDecayTest(keras_parameterized.TestCase): def testContinuous(self): self.evaluate(tf.compat.v1.global_variables_initializer()) step = 5 decayed_lr = tf.compat.v1.train.exponential_decay(0.05, step, 10, 0.96) expected = .05 * 0.96**(5.0 / 10.0) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testStaircase(self): if tf.executing_eagerly(): step = tf.Variable(0) self.evaluate(tf.compat.v1.global_variables_initializer()) decayed_lr = tf.compat.v1.train.exponential_decay( .1, step, 3, 0.96, staircase=True) # No change to learning rate due to staircase expected = .1 self.evaluate(step.assign(1)) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) expected = .1 self.evaluate(step.assign(2)) self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6) # Decayed learning rate expected = .1 * 0.96 ** (100 // 3) self.evaluate(step.assign(100)) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testVariables(self): step = tf.Variable(1) decayed_lr = tf.compat.v1.train.exponential_decay( .1, step, 3, 0.96, staircase=True) self.evaluate(tf.compat.v1.global_variables_initializer()) # No change to learning rate assign_1 = step.assign(1) if not tf.executing_eagerly(): self.evaluate(assign_1.op) self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6) assign_2 = step.assign(2) if not tf.executing_eagerly(): self.evaluate(assign_2.op) self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6) # Decayed learning rate assign_100 = step.assign(100) if not tf.executing_eagerly(): self.evaluate(assign_100.op) expected = .1 * 0.96**(100 // 3) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testPiecewiseConstant(self): x = tf.Variable(-999) decayed_lr = tf.compat.v1.train.piecewise_constant( x, [100, 110, 120], [1.0, 0.1, 0.01, 0.001]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6) self.evaluate(x.assign(100)) self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6) self.evaluate(x.assign(105)) self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6) self.evaluate(x.assign(110)) self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6) self.evaluate(x.assign(120)) self.assertAllClose(self.evaluate(decayed_lr), 0.01, 1e-6) self.evaluate(x.assign(999)) self.assertAllClose(self.evaluate(decayed_lr), 0.001, 1e-6) def testPiecewiseConstantEdgeCases(self): x_int = tf.Variable(0, dtype=tf.int32) boundaries, values = [-1.0, 1.0], [1, 2, 3] with self.assertRaises(ValueError): decayed_lr = tf.compat.v1.train.piecewise_constant( x_int, boundaries, values) if tf.executing_eagerly(): decayed_lr() x = tf.Variable(0.0) boundaries, values = [-1.0, 1.0], [1.0, 2, 3] with self.assertRaises(ValueError): decayed_lr = tf.compat.v1.train.piecewise_constant( x, boundaries, values) if tf.executing_eagerly(): decayed_lr() # Test that ref types are valid. if not tf.executing_eagerly(): x = tf.compat.v1.Variable(0.0, use_resource=False) x_ref = x.op.outputs[0] # float32_ref tensor should be accepted boundaries, values = [1.0, 2.0], [1, 2, 3] tf.compat.v1.train.piecewise_constant(x_ref, boundaries, values) # Test casting boundaries from int32 to int64. x_int64 = tf.Variable(0, dtype=tf.int64) boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7] decayed_lr = tf.compat.v1.train.piecewise_constant( x_int64, boundaries, values) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6) self.evaluate(x_int64.assign(1)) self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6) self.evaluate(x_int64.assign(2)) self.assertAllClose(self.evaluate(decayed_lr), 0.5, 1e-6) self.evaluate(x_int64.assign(3)) self.assertAllClose(self.evaluate(decayed_lr), 0.6, 1e-6) self.evaluate(x_int64.assign(4)) self.assertAllClose(self.evaluate(decayed_lr), 0.7, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class LinearDecayTest(keras_parameterized.TestCase): def testHalfWay(self): step = 5 lr = 0.05 end_lr = 0.0 decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr) expected = lr * 0.5 self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testEnd(self): step = 10 lr = 0.05 end_lr = 0.001 decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testHalfWayWithEnd(self): step = 5 lr = 0.05 end_lr = 0.001 decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr) expected = (lr + end_lr) * 0.5 self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testBeyondEnd(self): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testBeyondEndWithCycle(self): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, 10, end_lr, cycle=True) expected = (lr - end_lr) * 0.25 + end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class SqrtDecayTest(keras_parameterized.TestCase): def testHalfWay(self): step = 5 lr = 0.05 end_lr = 0.0 power = 0.5 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, 10, end_lr, power=power) expected = lr * 0.5**power self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testEnd(self): step = 10 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, 10, end_lr, power=power) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testHalfWayWithEnd(self): step = 5 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, 10, end_lr, power=power) expected = (lr - end_lr) * 0.5**power + end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testBeyondEnd(self): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, 10, end_lr, power=power) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testBeyondEndWithCycle(self): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, 10, end_lr, power=power, cycle=True) expected = (lr - end_lr) * 0.25**power + end_lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class PolynomialDecayTest(keras_parameterized.TestCase): def testBeginWithCycle(self): lr = 0.001 decay_steps = 10 step = 0 decayed_lr = tf.compat.v1.train.polynomial_decay( lr, step, decay_steps, cycle=True) expected = lr self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class ExponentialDecayTest(keras_parameterized.TestCase): def testDecay(self): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = tf.compat.v1.train.natural_exp_decay(initial_lr, step, k, decay_rate) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr * math.exp(-i / k * decay_rate) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) self.evaluate(step.assign_add(1)) def testStaircase(self): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = tf.compat.v1.train.natural_exp_decay( initial_lr, step, k, decay_rate, staircase=True) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr * math.exp(-decay_rate * (i // k)) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) self.evaluate(step.assign_add(1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class InverseDecayTest(keras_parameterized.TestCase): def testDecay(self): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = tf.compat.v1.train.inverse_time_decay(initial_lr, step, k, decay_rate) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + i / k * decay_rate) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) self.evaluate(step.assign_add(1)) def testStaircase(self): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = tf.compat.v1.train.inverse_time_decay( initial_lr, step, k, decay_rate, staircase=True) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + decay_rate * (i // k)) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) self.evaluate(step.assign_add(1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class CosineDecayTest(keras_parameterized.TestCase): def np_cosine_decay(self, step, decay_steps, alpha=0.0): step = min(step, decay_steps) completed_fraction = step / decay_steps decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha def testDecay(self): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.cosine_decay(initial_lr, step, num_training_steps) expected = self.np_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testAlpha(self): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.cosine_decay(initial_lr, step, num_training_steps, alpha) expected = self.np_cosine_decay(step, num_training_steps, alpha) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class CosineDecayRestartsTest(keras_parameterized.TestCase): def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0): fac = 1.0 while step >= decay_steps: step -= decay_steps decay_steps *= t_mul fac *= m_mul completed_fraction = step / decay_steps decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha def testDecay(self): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.cosine_decay_restarts( initial_lr, step, num_training_steps) expected = self.np_cosine_decay_restarts(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testAlpha(self): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.cosine_decay_restarts( initial_lr, step, num_training_steps, alpha=alpha) expected = self.np_cosine_decay_restarts( step, num_training_steps, alpha=alpha) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testMMul(self): num_training_steps = 1000 initial_lr = 1.0 m_mul = 0.9 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.cosine_decay_restarts( initial_lr, step, num_training_steps, m_mul=m_mul) expected = self.np_cosine_decay_restarts( step, num_training_steps, m_mul=m_mul) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testTMul(self): num_training_steps = 1000 initial_lr = 1.0 t_mul = 1.0 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.cosine_decay_restarts( initial_lr, step, num_training_steps, t_mul=t_mul) expected = self.np_cosine_decay_restarts( step, num_training_steps, t_mul=t_mul) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class LinearCosineDecayTest(keras_parameterized.TestCase): def np_linear_cosine_decay(self, step, decay_steps, alpha=0.0, beta=0.001, num_periods=0.5): step = min(step, decay_steps) linear_decayed = float(decay_steps - step) / decay_steps fraction = 2.0 * num_periods * step / float(decay_steps) cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction)) return (alpha + linear_decayed) * cosine_decayed + beta def testDefaultDecay(self): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.linear_cosine_decay( initial_lr, step, num_training_steps) expected = self.np_linear_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) def testNonDefaultDecay(self): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = tf.compat.v1.train.linear_cosine_decay( initial_lr, step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5) expected = self.np_linear_cosine_decay( step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5) self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class NoisyLinearCosineDecayTest(keras_parameterized.TestCase): def testDefaultNoisyLinearCosine(self): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): # No numerical check because of noise decayed_lr = tf.compat.v1.train.noisy_linear_cosine_decay( initial_lr, step, num_training_steps) # Cannot be deterministically tested self.evaluate(decayed_lr) def testNonDefaultNoisyLinearCosine(self): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): # No numerical check because of noise decayed_lr = tf.compat.v1.train.noisy_linear_cosine_decay( initial_lr, step, num_training_steps, initial_variance=0.5, variance_decay=0.1, alpha=0.1, beta=1e-4, num_periods=5) # Cannot be deterministically tested self.evaluate(decayed_lr) if __name__ == "__main__": tf.test.main()
16,812
34.395789
80
py
keras
keras-master/keras/optimizer_v2/learning_rate_schedule.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various learning rate decay functions.""" import tensorflow.compat.v2 as tf import abc import math from keras.utils import generic_utils from tensorflow.python.util.tf_export import keras_export @keras_export("keras.optimizers.schedules.LearningRateSchedule") class LearningRateSchedule: """The learning rate schedule base class. You can use a learning rate schedule to modulate how the learning rate of your optimizer changes over time. Several built-in learning rate schedules are available, such as `tf.keras.optimizers.schedules.ExponentialDecay` or `tf.keras.optimizers.schedules.PiecewiseConstantDecay`: ```python lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.9) optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) ``` A `LearningRateSchedule` instance can be passed in as the `learning_rate` argument of any optimizer. To implement your own schedule object, you should implement the `__call__` method, which takes a `step` argument (scalar integer tensor, the current training step count). Like for any other Keras object, you can also optionally make your object serializable by implementing the `get_config` and `from_config` methods. Example: ```python class MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, initial_learning_rate): self.initial_learning_rate = initial_learning_rate def __call__(self, step): return self.initial_learning_rate / (step + 1) optimizer = tf.keras.optimizers.SGD(learning_rate=MyLRSchedule(0.1)) ``` """ @abc.abstractmethod def __call__(self, step): raise NotImplementedError("Learning rate schedule must override __call__") @abc.abstractmethod def get_config(self): raise NotImplementedError("Learning rate schedule must override get_config") @classmethod def from_config(cls, config): """Instantiates a `LearningRateSchedule` from its config. Args: config: Output of `get_config()`. Returns: A `LearningRateSchedule` instance. """ return cls(**config) @keras_export("keras.optimizers.schedules.ExponentialDecay") class ExponentialDecay(LearningRateSchedule): """A LearningRateSchedule that uses an exponential decay schedule. When training a model, it is often useful to lower the learning rate as the training progresses. This schedule applies an exponential decay function to an optimizer step, given a provided initial learning rate. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate * decay_rate ^ (step / decay_steps) ``` If the argument `staircase` is `True`, then `step / decay_steps` is an integer division and the decayed learning rate follows a staircase function. You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: When fitting a Keras model, decay every 100000 steps with a base of 0.96: ```python initial_learning_rate = 0.1 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True) model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): """Applies exponential decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'. """ super(ExponentialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name def __call__(self, step): with tf.name_scope(self.name or "ExponentialDecay") as name: initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = tf.cast(self.decay_steps, dtype) decay_rate = tf.cast(self.decay_rate, dtype) global_step_recomp = tf.cast(step, dtype) p = global_step_recomp / decay_steps if self.staircase: p = tf.floor(p) return tf.multiply( initial_learning_rate, tf.pow(decay_rate, p), name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "name": self.name } @keras_export("keras.optimizers.schedules.PiecewiseConstantDecay") class PiecewiseConstantDecay(LearningRateSchedule): """A LearningRateSchedule that uses a piecewise constant decay schedule. The function returns a 1-arg callable to compute the piecewise constant when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. ```python step = tf.Variable(0, trainable=False) boundaries = [100000, 110000] values = [1.0, 0.5, 0.1] learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay( boundaries, values) # Later, whenever we perform an optimization step, we pass in the step. learning_rate = learning_rate_fn(step) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as the boundary tensors. The output of the 1-arg function that takes the `step` is `values[0]` when `step <= boundaries[0]`, `values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ..., and values[-1] when `step > boundaries[-1]`. """ def __init__( self, boundaries, values, name=None): """Piecewise constant from boundaries and interval values. Args: boundaries: A list of `Tensor`s or `int`s or `float`s with strictly increasing entries, and with all elements having the same type as the optimizer step. values: A list of `Tensor`s or `float`s or `int`s that specifies the values for the intervals defined by `boundaries`. It should have one more element than `boundaries`, and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Raises: ValueError: if the number of elements in the lists do not match. """ super(PiecewiseConstantDecay, self).__init__() if len(boundaries) != len(values) - 1: raise ValueError( "The length of boundaries should be 1 less than the length of " f"values. Received: boundaries={boundaries} of length " f"{len(boundaries)}, and values={values} of length {len(values)}.") self.boundaries = boundaries self.values = values self.name = name def __call__(self, step): with tf.name_scope(self.name or "PiecewiseConstant"): boundaries = tf.nest.map_structure(tf.convert_to_tensor, tf.nest.flatten(self.boundaries)) values = tf.nest.map_structure(tf.convert_to_tensor, tf.nest.flatten(self.values)) x_recomp = tf.convert_to_tensor(step) for i, b in enumerate(boundaries): if b.dtype.base_dtype != x_recomp.dtype.base_dtype: # We cast the boundaries to have the same type as the step b = tf.cast(b, x_recomp.dtype.base_dtype) boundaries[i] = b pred_fn_pairs = [] pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0])) pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1])) for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]): # Need to bind v here; can do this with lambda v=v: ... pred = (x_recomp > low) & (x_recomp <= high) pred_fn_pairs.append((pred, lambda v=v: v)) # The default isn't needed here because our conditions are mutually # exclusive and exhaustive, but tf.case requires it. default = lambda: values[0] return tf.case(pred_fn_pairs, default, exclusive=True) def get_config(self): return { "boundaries": self.boundaries, "values": self.values, "name": self.name } @keras_export("keras.optimizers.schedules.PolynomialDecay") class PolynomialDecay(LearningRateSchedule): """A LearningRateSchedule that uses a polynomial decay schedule. It is commonly observed that a monotonically decreasing learning rate, whose degree of change is carefully chosen, results in a better performing model. This schedule applies a polynomial decay function to an optimizer step, given a provided `initial_learning_rate`, to reach an `end_learning_rate` in the given `decay_steps`. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule is a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate ``` If `cycle` is True then a multiple of `decay_steps` is used, the first one that is bigger than `step`. ```python def decayed_learning_rate(step): decay_steps = decay_steps * ceil(step / decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5): ```python ... starter_learning_rate = 0.1 end_learning_rate = 0.01 decay_steps = 10000 learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( starter_learning_rate, decay_steps, end_learning_rate, power=0.5) model.compile(optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name=None): """Applies a polynomial decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The minimal end learning rate. power: A scalar `float32` or `float64` `Tensor` or a Python number. The power of the polynomial. Defaults to linear, 1.0. cycle: A boolean, whether or not it should cycle beyond decay_steps. name: String. Optional name of the operation. Defaults to 'PolynomialDecay'. """ super(PolynomialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.end_learning_rate = end_learning_rate self.power = power self.cycle = cycle self.name = name def __call__(self, step): with tf.name_scope(self.name or "PolynomialDecay") as name: initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype end_learning_rate = tf.cast(self.end_learning_rate, dtype) power = tf.cast(self.power, dtype) global_step_recomp = tf.cast(step, dtype) decay_steps_recomp = tf.cast(self.decay_steps, dtype) if self.cycle: # Find the first multiple of decay_steps that is bigger than # global_step. If global_step is zero set the multiplier to 1 multiplier = tf.where( tf.equal(global_step_recomp, 0), 1.0, tf.math.ceil(global_step_recomp / self.decay_steps)) decay_steps_recomp = tf.multiply(decay_steps_recomp, multiplier) else: # Make sure that the global_step used is not bigger than decay_steps. global_step_recomp = tf.minimum(global_step_recomp, decay_steps_recomp) p = tf.divide(global_step_recomp, decay_steps_recomp) return tf.add( tf.multiply(initial_learning_rate - end_learning_rate, tf.pow(1 - p, power)), end_learning_rate, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "end_learning_rate": self.end_learning_rate, "power": self.power, "cycle": self.cycle, "name": self.name } @keras_export("keras.optimizers.schedules.InverseTimeDecay") class InverseTimeDecay(LearningRateSchedule): """A LearningRateSchedule that uses an inverse time decay schedule. When training a model, it is often useful to lower the learning rate as the training progresses. This schedule applies the inverse decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * step / decay_step) ``` or, if `staircase` is `True`, as: ```python def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * floor(step / decay_step)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: Fit a Keras model when decaying 1/t with a rate of 0.5: ```python ... initial_learning_rate = 0.1 decay_steps = 1.0 decay_rate = 0.5 learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay( initial_learning_rate, decay_steps, decay_rate) model.compile(optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): """Applies inverse time decay to the initial learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'. """ super(InverseTimeDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name def __call__(self, step): with tf.name_scope(self.name or "InverseTimeDecay") as name: initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = tf.cast(self.decay_steps, dtype) decay_rate = tf.cast(self.decay_rate, dtype) global_step_recomp = tf.cast(step, dtype) p = global_step_recomp / decay_steps if self.staircase: p = tf.floor(p) const = tf.cast(tf.constant(1), dtype) denom = tf.add(const, tf.multiply(decay_rate, p)) return tf.divide(initial_learning_rate, denom, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "name": self.name } @keras_export("keras.optimizers.schedules.CosineDecay", "keras.experimental.CosineDecay") class CosineDecay(LearningRateSchedule): """A LearningRateSchedule that uses a cosine decay schedule. See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983), SGDR: Stochastic Gradient Descent with Warm Restarts. When training a model, it is often useful to lower the learning rate as the training progresses. This schedule applies a cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps)) decayed = (1 - alpha) * cosine_decay + alpha return initial_learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay( initial_learning_rate, decay_steps) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, decay_steps, alpha=0.0, name=None): """Applies cosine decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of initial_learning_rate. name: String. Optional name of the operation. Defaults to 'CosineDecay'. """ super(CosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.alpha = alpha self.name = name def __call__(self, step): with tf.name_scope(self.name or "CosineDecay"): initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = tf.cast(self.decay_steps, dtype) global_step_recomp = tf.cast(step, dtype) global_step_recomp = tf.minimum(global_step_recomp, decay_steps) completed_fraction = global_step_recomp / decay_steps cosine_decayed = 0.5 * (1.0 + tf.cos( tf.constant(math.pi, dtype=dtype) * completed_fraction)) decayed = (1 - self.alpha) * cosine_decayed + self.alpha return tf.multiply(initial_learning_rate, decayed) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "alpha": self.alpha, "name": self.name } @keras_export("keras.optimizers.schedules.CosineDecayRestarts", "keras.experimental.CosineDecayRestarts") class CosineDecayRestarts(LearningRateSchedule): """A LearningRateSchedule that uses a cosine decay schedule with restarts. See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983), SGDR: Stochastic Gradient Descent with Warm Restarts. When training a model, it is often useful to lower the learning rate as the training progresses. This schedule applies a cosine decay function with restarts to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. The learning rate multiplier first decays from 1 to `alpha` for `first_decay_steps` steps. Then, a warm restart is performed. Each new warm restart runs for `t_mul` times more steps and with `m_mul` times smaller initial learning rate. Example usage: ```python first_decay_steps = 1000 lr_decayed_fn = ( tf.keras.optimizers.schedules.CosineDecayRestarts( initial_learning_rate, first_decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None): """Applies cosine decay with restarts to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of the initial_learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'. """ super(CosineDecayRestarts, self).__init__() self.initial_learning_rate = initial_learning_rate self.first_decay_steps = first_decay_steps self._t_mul = t_mul self._m_mul = m_mul self.alpha = alpha self.name = name def __call__(self, step): with tf.name_scope(self.name or "SGDRDecay") as name: initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype first_decay_steps = tf.cast(self.first_decay_steps, dtype) alpha = tf.cast(self.alpha, dtype) t_mul = tf.cast(self._t_mul, dtype) m_mul = tf.cast(self._m_mul, dtype) global_step_recomp = tf.cast(step, dtype) completed_fraction = global_step_recomp / first_decay_steps def compute_step(completed_fraction, geometric=False): """Helper for `cond` operation.""" if geometric: i_restart = tf.floor( tf.math.log(1.0 - completed_fraction * (1.0 - t_mul)) / tf.math.log(t_mul)) sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul) completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart else: i_restart = tf.floor(completed_fraction) completed_fraction -= i_restart return i_restart, completed_fraction i_restart, completed_fraction = tf.cond( tf.equal(t_mul, 1.0), lambda: compute_step(completed_fraction, geometric=False), lambda: compute_step(completed_fraction, geometric=True)) m_fac = m_mul**i_restart cosine_decayed = 0.5 * m_fac * (1.0 + tf.cos( tf.constant(math.pi, dtype=dtype) * completed_fraction)) decayed = (1 - alpha) * cosine_decayed + alpha return tf.multiply(initial_learning_rate, decayed, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "first_decay_steps": self.first_decay_steps, "t_mul": self._t_mul, "m_mul": self._m_mul, "alpha": self.alpha, "name": self.name } # Note: this code is still used by V1 APIs. class LinearCosineDecay(LearningRateSchedule): """A LearningRateSchedule that uses a linear cosine decay schedule. See [Bello et al., ICML2017] Neural Optimizer Search with RL. https://arxiv.org/abs/1709.07417 For the idea of warm starts here controlled by `num_periods`, see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a linear cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay) * cosine_decay + beta return initial_learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.LinearCosineDecay( initial_learning_rate, decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, decay_steps, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies linear cosine decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'LinearCosineDecay'. """ super(LinearCosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.num_periods = num_periods self.alpha = alpha self.beta = beta self.name = name def __call__(self, step): with tf.name_scope(self.name or "LinearCosineDecay") as name: initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = tf.cast(self.decay_steps, dtype) num_periods = tf.cast(self.num_periods, dtype) alpha = tf.cast(self.alpha, dtype) beta = tf.cast(self.beta, dtype) global_step_recomp = tf.cast(step, dtype) global_step_recomp = tf.minimum(global_step_recomp, decay_steps) linear_decayed = (decay_steps - global_step_recomp) / decay_steps completed_fraction = global_step_recomp / decay_steps fraction = 2.0 * num_periods * completed_fraction cosine_decayed = 0.5 * ( 1.0 + tf.cos(tf.constant(math.pi, dtype=dtype) * fraction)) linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta return tf.multiply(initial_learning_rate, linear_cosine_decayed, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "num_periods": self.num_periods, "alpha": self.alpha, "beta": self.beta, "name": self.name } # Note: this code is still used by V1 APIs. class NoisyLinearCosineDecay(LearningRateSchedule): """A LearningRateSchedule that uses a noisy linear cosine decay schedule. See [Bello et al., ICML2017] Neural Optimizer Search with RL. https://arxiv.org/abs/1709.07417 For the idea of warm starts here controlled by `num_periods`, see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a noisy linear cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta return initial_learning_rate * decayed ``` where eps_t is 0-centered gaussian noise with variance initial_variance / (1 + global_step) ** variance_decay Example usage: ```python decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.NoisyLinearCosineDecay( initial_learning_rate, decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ def __init__( self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies noisy linear cosine decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. initial_variance: initial variance for the noise. See computation above. variance_decay: decay for the noise's variance. See computation above. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'NoisyLinearCosineDecay'. """ super(NoisyLinearCosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.initial_variance = initial_variance self.variance_decay = variance_decay self.num_periods = num_periods self.alpha = alpha self.beta = beta self.name = name def __call__(self, step): with tf.name_scope(self.name or "NoisyLinearCosineDecay") as name: initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = tf.cast(self.decay_steps, dtype) initial_variance = tf.cast(self.initial_variance, dtype) variance_decay = tf.cast(self.variance_decay, dtype) num_periods = tf.cast(self.num_periods, dtype) alpha = tf.cast(self.alpha, dtype) beta = tf.cast(self.beta, dtype) global_step_recomp = tf.cast(step, dtype) global_step_recomp = tf.minimum(global_step_recomp, decay_steps) linear_decayed = (decay_steps - global_step_recomp) / decay_steps variance = initial_variance / ( tf.pow(1.0 + global_step_recomp, variance_decay)) std = tf.sqrt(variance) noisy_linear_decayed = ( linear_decayed + tf.random.normal( linear_decayed.shape, stddev=std)) completed_fraction = global_step_recomp / decay_steps fraction = 2.0 * num_periods * completed_fraction cosine_decayed = 0.5 * ( 1.0 + tf.cos(tf.constant(math.pi, dtype=dtype) * fraction)) noisy_linear_cosine_decayed = ( (alpha + noisy_linear_decayed) * cosine_decayed + beta) return tf.multiply( initial_learning_rate, noisy_linear_cosine_decayed, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "initial_variance": self.initial_variance, "variance_decay": self.variance_decay, "num_periods": self.num_periods, "alpha": self.alpha, "beta": self.beta, "name": self.name } @keras_export("keras.optimizers.schedules.serialize") def serialize(learning_rate_schedule): """Serializes a `LearningRateSchedule` into a JSON-compatible representation. Args: learning_rate_schedule: The `LearningRateSchedule` object to serialize. Returns: A JSON-serializable dict representing the object's config. Example: >>> lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( ... 0.1, decay_steps=100000, decay_rate=0.96, staircase=True) >>> tf.keras.optimizers.schedules.serialize(lr_schedule) {'class_name': 'ExponentialDecay', 'config': {'decay_rate': 0.96, 'decay_steps': 100000, 'initial_learning_rate': 0.1, 'name': None, 'staircase': True}} """ return generic_utils.serialize_keras_object(learning_rate_schedule) @keras_export("keras.optimizers.schedules.deserialize") def deserialize(config, custom_objects=None): """Instantiates a `LearningRateSchedule` object from a serialized form. Args: config: The serialized form of the `LearningRateSchedule`. Dictionary of the form {'class_name': str, 'config': dict}. custom_objects: A dictionary mapping class names (or function names) of custom (non-Keras) objects to class/functions. Returns: A `LearningRateSchedule` object. Example: ```python # Configuration for PolynomialDecay config = { 'class_name': 'PolynomialDecay', 'config': {'cycle': False, 'decay_steps': 10000, 'end_learning_rate': 0.01, 'initial_learning_rate': 0.1, 'name': None, 'power': 0.5}} lr_schedule = tf.keras.optimizers.schedules.deserialize(config) ``` """ return generic_utils.deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name="decay")
40,125
36.016605
80
py
keras
keras-master/keras/optimizer_v2/adagrad_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for aggregate operations.""" import tensorflow.compat.v2 as tf import copy from absl.testing import parameterized import numpy as np from keras import combinations from keras.optimizer_v2 import adagrad from keras.optimizer_v2 import learning_rate_schedule _DATA_TYPES = [ tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128 ] def adagrad_update_numpy(param, accum, g_t, lr=0.001, epsilon=1e-7): accum_t = accum + g_t * g_t param_t = param - lr * g_t / (np.sqrt(accum_t) + epsilon) return param_t, accum_t def sparse_adagrad_update_numpy(param, accum, gindexs, gvalues, lr=0.001, epsilon=1e-7): accum_t = copy.deepcopy(accum) param_t = copy.deepcopy(param) # first loop accumulates repeated indices if necessary. for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] accum_t[gindex] = accum_t[gindex] + gvalue * gvalue for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] param_t[gindex] = param_t[gindex] - lr * gvalue / ( np.sqrt(accum_t[gindex]) + epsilon) return param_t, accum_t class AdagradOptimizerTest(tf.test.TestCase, parameterized.TestCase): def doTestBasic(self, use_callable_params=False): for dtype in _DATA_TYPES: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = lambda: 3.0 if not use_callable_params: learning_rate = learning_rate() ada_opt = adagrad.Adagrad(learning_rate) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) if not tf.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for _ in range(3): if not tf.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, 3.0) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, 3.0) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasic(self): self.doTestBasic() @combinations.generate(combinations.combine(mode=["eager"])) def testBasicCallableParams(self): self.doTestBasic(use_callable_params=True) def testBasicWithLearningRateDecay(self): for dtype in _DATA_TYPES: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 3.0 decay = 0.5 ada_opt = adagrad.Adagrad(learning_rate, decay=decay) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) if not tf.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for t in range(3): if not tf.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) lr_np = learning_rate / (1 + decay * t) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, lr_np) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, lr_np) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testBasicWithLargeEpsilon(self): var0_np = np.array([1.0, 2.0]) var1_np = np.array([3.0, 4.0]) grads0_np = np.array([0.1, 0.1]) grads1_np = np.array([0.01, 0.01]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 3.0 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0) accum0_np = np.array([0.1, 0.1]) accum1_np = np.array([0.1, 0.1]) if not tf.executing_eagerly(): ada_update = ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for _ in range(3): if not tf.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, 3.0, 1.0) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, 3.0, 1.0) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testBasicWithLearningRateInverseTimeDecay(self): for dtype in _DATA_TYPES: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 3.0 decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) ada_opt = adagrad.Adagrad(lr_schedule) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) if not tf.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for t in range(3): if not tf.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) lr_np = learning_rate / (1 + decay * t) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, lr_np) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, lr_np) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[0, 1], [3, 4]], self.evaluate(var0), atol=0.01) def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = tf.constant(3.0) ada_opt = adagrad.Adagrad(learning_rate) ada_update = ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) # Run 3 steps of adagrad for _ in range(3): self.evaluate(ada_update) var0_np, accum0_np = adagrad_update_numpy( var0_np, accum0_np, grads0_np, learning_rate) var1_np, accum1_np = adagrad_update_numpy( var1_np, accum1_np, grads1_np, learning_rate) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSparseBasic(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np[grads0_np_indices]), tf.constant(grads0_np_indices), tf.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np[grads1_np_indices]), tf.constant(grads1_np_indices), tf.constant([3])) learning_rate = 3.0 ada_opt = adagrad.Adagrad(learning_rate) ada_update = ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1)) accum0_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype) # Run 3 step of sgd for _ in range(3): self.evaluate(ada_update) var0_np, accum0_np = sparse_adagrad_update_numpy( var0_np, accum0_np, grads0_np_indices, grads0_np[grads0_np_indices], learning_rate) var1_np, accum1_np = sparse_adagrad_update_numpy( var1_np, accum1_np, grads1_np_indices, grads1_np[grads1_np_indices], learning_rate) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSparseSingleVarDim(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0_np = np.array([1.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) grads0_np_indices = np.array([0], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np[grads0_np_indices]), tf.constant(grads0_np_indices), tf.constant([3])) learning_rate = 3.0 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.) ada_update = ada_opt.apply_gradients(zip([grads0], [var0])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0], self.evaluate(var0)) accum0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) # Run 3 step of sgd for _ in range(3): self.evaluate(ada_update) var0_np, accum0_np = sparse_adagrad_update_numpy( var0_np, accum0_np, grads0_np_indices, grads0_np[grads0_np_indices], learning_rate, epsilon=1.) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) def testSparseRepeatedIndices(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype) repeated_index_update_var = tf.Variable( var_np, dtype=dtype) aggregated_update_var = tf.Variable( var_np, dtype=dtype) grad_repeated_index = tf.IndexedSlices( tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices( tf.constant([0.2], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) repeated_update = adagrad.Adagrad(3.0).apply_gradients([ (grad_repeated_index, repeated_index_update_var) ]) aggregated_update = adagrad.Adagrad(3.0).apply_gradients([ (grad_aggregated, aggregated_update_var) ]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) for _ in range(3): self.evaluate(repeated_update) self.evaluate(aggregated_update) self.assertAllClose( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) def testSparseRepeatedIndicesByEmbeddingLookUp(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var_repeated = tf.Variable([1.0, 2.0], dtype=dtype) loss_repeated = lambda: tf.reduce_sum( # pylint: disable=g-long-lambda tf.compat.v1.nn.embedding_lookup(var_repeated, [0, 0])) # pylint: disable=cell-var-from-loop var_aggregated = tf.Variable([1.0, 2.0], dtype=dtype) loss_aggregated = lambda: 2 * tf.reduce_sum( # pylint: disable=g-long-lambda tf.compat.v1.nn.embedding_lookup(var_aggregated, [0])) # pylint: disable=cell-var-from-loop update_op_repeated = adagrad.Adagrad(2.0).minimize( loss_repeated, var_list=[var_repeated]) update_op_aggregated = adagrad.Adagrad(2.0).minimize( loss_aggregated, var_list=[var_aggregated]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllCloseAccordingToType( self.evaluate(var_repeated), self.evaluate(var_aggregated)) for _ in range(3): self.evaluate(update_op_repeated) self.evaluate(update_op_aggregated) self.assertAllCloseAccordingToType( self.evaluate(var_repeated), self.evaluate(var_aggregated)) def testSparseStability(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half]: shape = [1, 6] var0_np = np.array([[0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257, -0.0105945]], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) grads0_np = np.array([[ -5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05, -8.4877e-05, -9.48906e-05 ]], dtype=dtype.as_numpy_dtype) grads0 = tf.IndexedSlices( tf.constant(grads0_np), tf.constant([0]), tf.constant(shape)) ada_opt = adagrad.Adagrad(1.0) ada_update = ada_opt.apply_gradients(zip([grads0], [var0])) slot0 = ada_opt.get_slot(var0, "accumulator") init = tf.compat.v1.global_variables_initializer() for _ in range(100): self.evaluate(init) self.evaluate(ada_update) self.assertAllCloseAccordingToType( np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([[ 0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573, -0.01029443 ]]), self.evaluate(var0)) def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 3.0 ada_opt = adagrad.Adagrad(learning_rate) # Apply the optimizer twice. Both applications will use # the same accums. ada_update1 = ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) ada_update2 = ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) slot0 = ada_opt.get_slot(var0, "accumulator") self.assertEqual(slot0.shape, var0.shape) slot1 = ada_opt.get_slot(var1, "accumulator") self.assertEqual(slot1.shape, var1.shape) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values. self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Mix the first and the second adagrad for 3 steps. self.evaluate(ada_update1) self.evaluate(ada_update2) self.evaluate(ada_update1) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) for _ in range(3): var0_np, accum0_np = adagrad_update_numpy( var0_np, accum0_np, grads0_np, learning_rate) var1_np, accum1_np = adagrad_update_numpy( var1_np, accum1_np, grads1_np, learning_rate) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testConstructAdagradWithLR(self): opt = adagrad.Adagrad(lr=1.0) opt_2 = adagrad.Adagrad(learning_rate=0.1, lr=1.0) opt_3 = adagrad.Adagrad(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) if __name__ == "__main__": tf.test.main()
22,382
41.553232
114
py
keras
keras-master/keras/optimizer_v2/ftrl.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ftrl-proximal optimizer implementation.""" import tensorflow.compat.v2 as tf # pylint: disable=g-classes-have-attributes from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Ftrl') class Ftrl(optimizer_v2.OptimizerV2): r"""Optimizer that implements the FTRL algorithm. "Follow The Regularized Leader" (FTRL) is an optimization algorithm developed at Google for click-through rate prediction in the early 2010s. It is most suitable for shallow models with large and sparse feature spaces. The algorithm is described by [McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf). The Keras version has support for both online L2 regularization (the L2 regularization described in the paper above) and shrinkage-type L2 regularization (which is the addition of an L2 penalty to the loss function). Initialization: ```python n = 0 sigma = 0 z = 0 ``` Update rule for one variable `w`: ```python prev_n = n n = n + g ** 2 sigma = (sqrt(n) - sqrt(prev_n)) / lr z = z + g - sigma * w if abs(z) < lambda_1: w = 0 else: w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2) ``` Notation: - `lr` is the learning rate - `g` is the gradient for the variable - `lambda_1` is the L1 regularization strength - `lambda_2` is the L2 regularization strength Check the documentation for the `l2_shrinkage_regularization_strength` parameter for more details when shrinkage is enabled, in which case gradient is replaced with a gradient with shrinkage. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. learning_rate_power: A float value, must be less or equal to zero. Controls how the learning rate decreases during training. Use zero for a fixed learning rate. initial_accumulator_value: The starting value for accumulators. Only zero or positive values are allowed. l1_regularization_strength: A float value, must be greater than or equal to zero. Defaults to 0.0. l2_regularization_strength: A float value, must be greater than or equal to zero. Defaults to 0.0. name: Optional name prefix for the operations created when applying gradients. Defaults to `"Ftrl"`. l2_shrinkage_regularization_strength: A float value, must be greater than or equal to zero. This differs from L2 above in that the L2 above is a stabilization penalty, whereas this L2 shrinkage is a magnitude penalty. When input is sparse shrinkage will only happen on the active weights. beta: A float value, representing the beta value from the paper. Defaults to 0.0. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips gradients by value. Reference: - [McMahan et al., 2013]( https://research.google.com/pubs/archive/41159.pdf) """ def __init__(self, learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl', l2_shrinkage_regularization_strength=0.0, beta=0.0, **kwargs): super(Ftrl, self).__init__(name, **kwargs) if initial_accumulator_value < 0.0: raise ValueError( '`initial_accumulator_value` needs to be positive or zero. Received: ' f'initial_accumulator_value={initial_accumulator_value}.') if learning_rate_power > 0.0: raise ValueError( '`learning_rate_power` needs to be negative or zero. Received: ' f'learning_rate_power={learning_rate_power}.') if l1_regularization_strength < 0.0: raise ValueError( '`l1_regularization_strength` needs to be positive or zero. ' f'Received: l1_regularization_strength={l1_regularization_strength}.') if l2_regularization_strength < 0.0: raise ValueError( '`l2_regularization_strength` needs to be positive or zero. ' f'Received: l2_regularization_strength={l2_regularization_strength}.') if l2_shrinkage_regularization_strength < 0.0: raise ValueError( '`l2_shrinkage_regularization_strength` needs to be positive or ' 'zero. Received: l2_shrinkage_regularization_strength' f'={l2_shrinkage_regularization_strength}.') self._set_hyper('learning_rate', learning_rate) self._set_hyper('decay', self._initial_decay) self._set_hyper('learning_rate_power', learning_rate_power) self._set_hyper('l1_regularization_strength', l1_regularization_strength) self._set_hyper('l2_regularization_strength', l2_regularization_strength) self._set_hyper('beta', beta) self._initial_accumulator_value = initial_accumulator_value self._l2_shrinkage_regularization_strength = ( l2_shrinkage_regularization_strength) def _create_slots(self, var_list): # Create the "accum" and "linear" slots. for var in var_list: dtype = var.dtype.base_dtype init = tf.compat.v1.constant_initializer( self._initial_accumulator_value, dtype=dtype) self.add_slot(var, 'accumulator', init) self.add_slot(var, 'linear') def _prepare_local(self, var_device, var_dtype, apply_state): super(Ftrl, self)._prepare_local(var_device, var_dtype, apply_state) apply_state[(var_device, var_dtype)].update( dict( learning_rate_power=tf.identity( self._get_hyper('learning_rate_power', var_dtype)), l1_regularization_strength=tf.identity( self._get_hyper('l1_regularization_strength', var_dtype)), l2_regularization_strength=tf.identity( self._get_hyper('l2_regularization_strength', var_dtype)), beta=tf.identity(self._get_hyper('beta', var_dtype)), l2_shrinkage_regularization_strength=tf.cast( self._l2_shrinkage_regularization_strength, var_dtype))) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) # Adjust L2 regularization strength to include beta to avoid the underlying # TensorFlow ops needing to include it. adjusted_l2_regularization_strength = ( coefficients['l2_regularization_strength'] + coefficients['beta'] / (2. * coefficients['lr_t'])) accum = self.get_slot(var, 'accumulator') linear = self.get_slot(var, 'linear') if self._l2_shrinkage_regularization_strength <= 0.0: return tf.raw_ops.ResourceApplyFtrl( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, lr=coefficients['lr_t'], l1=coefficients['l1_regularization_strength'], l2=adjusted_l2_regularization_strength, lr_power=coefficients['learning_rate_power'], use_locking=self._use_locking) else: return tf.raw_ops.ResourceApplyFtrlV2( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, lr=coefficients['lr_t'], l1=coefficients['l1_regularization_strength'], l2=adjusted_l2_regularization_strength, l2_shrinkage=coefficients['l2_shrinkage_regularization_strength'], lr_power=coefficients['learning_rate_power'], use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) # Adjust L2 regularization strength to include beta to avoid the underlying # TensorFlow ops needing to include it. adjusted_l2_regularization_strength = ( coefficients['l2_regularization_strength'] + coefficients['beta'] / (2. * coefficients['lr_t'])) accum = self.get_slot(var, 'accumulator') linear = self.get_slot(var, 'linear') if self._l2_shrinkage_regularization_strength <= 0.0: return tf.raw_ops.ResourceSparseApplyFtrl( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, indices=indices, lr=coefficients['lr_t'], l1=coefficients['l1_regularization_strength'], l2=adjusted_l2_regularization_strength, lr_power=coefficients['learning_rate_power'], use_locking=self._use_locking) else: return tf.raw_ops.ResourceSparseApplyFtrlV2( var=var.handle, accum=accum.handle, linear=linear.handle, grad=grad, indices=indices, lr=coefficients['lr_t'], l1=coefficients['l1_regularization_strength'], l2=adjusted_l2_regularization_strength, l2_shrinkage=coefficients['l2_shrinkage_regularization_strength'], lr_power=coefficients['learning_rate_power'], use_locking=self._use_locking) def get_config(self): config = super(Ftrl, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'initial_accumulator_value': self._initial_accumulator_value, 'learning_rate_power': self._serialize_hyperparameter('learning_rate_power'), 'l1_regularization_strength': self._serialize_hyperparameter('l1_regularization_strength'), 'l2_regularization_strength': self._serialize_hyperparameter('l2_regularization_strength'), 'beta': self._serialize_hyperparameter('beta'), 'l2_shrinkage_regularization_strength': self._l2_shrinkage_regularization_strength, }) return config
11,004
40.37218
80
py
keras
keras-master/keras/optimizer_v2/utils.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Optimizer utilities.""" import tensorflow.compat.v2 as tf from tensorflow.python.platform import tf_logging as logging def all_reduce_sum_gradients(grads_and_vars): """Returns all-reduced gradients aggregated via summation. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: List of (gradient, variable) pairs where gradients have been all-reduced. """ grads_and_vars = list(grads_and_vars) filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) if filtered_grads_and_vars: if strategy_supports_no_merge_call(): grads = [pair[0] for pair in filtered_grads_and_vars] reduced = tf.distribute.get_replica_context().all_reduce( tf.distribute.ReduceOp.SUM, grads) else: # TODO(b/183257003): Remove this branch reduced = tf.distribute.get_replica_context().merge_call( _all_reduce_sum_fn, args=(filtered_grads_and_vars,)) else: reduced = [] # Copy 'reduced' but add None gradients back in reduced_with_nones = [] reduced_pos = 0 for g, v in grads_and_vars: if g is None: reduced_with_nones.append((None, v)) else: reduced_with_nones.append((reduced[reduced_pos], v)) reduced_pos += 1 assert reduced_pos == len(reduced), "Failed to add all gradients" return reduced_with_nones def filter_empty_gradients(grads_and_vars): """Filter out `(grad, var)` pairs that have a gradient equal to `None`.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: variable = ([v.name for _, v in grads_and_vars],) raise ValueError(f"No gradients provided for any variable: {variable}. " f"Provided `grads_and_vars` is {grads_and_vars}.") if vars_with_empty_grads: logging.warning( ("Gradients do not exist for variables %s when minimizing the loss. " "If you're using `model.compile()`, did you forget to provide a `loss`" "argument?"), ([v.name for v in vars_with_empty_grads])) return filtered def make_gradient_clipnorm_fn(clipnorm): """Creates a gradient transformation function for clipping by norm.""" if clipnorm is None: return lambda grads_and_vars: grads_and_vars def gradient_clipnorm_fn(grads_and_vars): if isinstance(tf.distribute.get_strategy(), (tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): raise ValueError( "`clipnorm` is not supported with `CenteralStorageStrategy`. " f"The strategy used is {tf.distribute.get_strategy()}.") clipped_grads_and_vars = [ (tf.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars ] return clipped_grads_and_vars return gradient_clipnorm_fn def make_global_gradient_clipnorm_fn(clipnorm): """Creates a gradient transformation function for clipping by norm.""" if clipnorm is None: return lambda grads_and_vars: grads_and_vars def gradient_clipnorm_fn(grads_and_vars): if isinstance(tf.distribute.get_strategy(), (tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): raise ValueError( "`global_clipnorm` is not supported with `CenteralStorageStrategy`. " f"The strategy used is {tf.distribute.get_strategy()}.") grads, variables = zip(*grads_and_vars) clipped_grads, _ = tf.clip_by_global_norm(grads, clipnorm) clipped_grads_and_vars = list(zip(clipped_grads, variables)) return clipped_grads_and_vars return gradient_clipnorm_fn def make_gradient_clipvalue_fn(clipvalue): """Creates a gradient transformation function for clipping by value.""" if clipvalue is None: return lambda grads_and_vars: grads_and_vars def gradient_clipvalue_fn(grads_and_vars): if isinstance(tf.distribute.get_strategy(), (tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): raise ValueError( "`clipvalue` is not supported with `CenteralStorageStrategy`. " f"The strategy used is {tf.distribute.get_strategy()}.") clipped_grads_and_vars = [(tf.clip_by_value(g, -clipvalue, clipvalue), v) for g, v in grads_and_vars] return clipped_grads_and_vars return gradient_clipvalue_fn def _all_reduce_sum_fn(distribution, grads_and_vars): return distribution.extended.batch_reduce_to(tf.distribute.ReduceOp.SUM, grads_and_vars) def strategy_supports_no_merge_call(): """Returns if the current Strategy can operate in pure replica context.""" if not tf.distribute.has_strategy(): return True strategy = tf.distribute.get_strategy() return not strategy.extended._use_merge_call() # pylint: disable=protected-access
5,940
35.900621
84
py
keras
keras-master/keras/optimizer_v2/gradient_descent.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SGD optimizer implementation.""" import tensorflow.compat.v2 as tf from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export("keras.optimizers.SGD") class SGD(optimizer_v2.OptimizerV2): r"""Gradient descent (with momentum) optimizer. Update rule for parameter `w` with gradient `g` when `momentum` is 0: ```python w = w - learning_rate * g ``` Update rule when `momentum` is larger than 0: ```python velocity = momentum * velocity - learning_rate * g w = w + velocity ``` When `nesterov=True`, this rule becomes: ```python velocity = momentum * velocity - learning_rate * g w = w + momentum * velocity - learning_rate * g ``` Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to 0.01. momentum: float hyperparameter >= 0 that accelerates gradient descent in the relevant direction and dampens oscillations. Defaults to 0, i.e., vanilla gradient descent. nesterov: boolean. Whether to apply Nesterov momentum. Defaults to `False`. name: Optional name prefix for the operations created when applying gradients. Defaults to `"SGD"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips gradients by value. Usage: >>> opt = tf.keras.optimizers.SGD(learning_rate=0.1) >>> var = tf.Variable(1.0) >>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1 >>> step_count = opt.minimize(loss, [var]).numpy() >>> # Step is `- learning_rate * grad` >>> var.numpy() 0.9 >>> opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9) >>> var = tf.Variable(1.0) >>> val0 = var.value() >>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1 >>> # First step is `- learning_rate * grad` >>> step_count = opt.minimize(loss, [var]).numpy() >>> val1 = var.value() >>> (val0 - val1).numpy() 0.1 >>> # On later steps, step-size increases because of momentum >>> step_count = opt.minimize(loss, [var]).numpy() >>> val2 = var.value() >>> (val1 - val2).numpy() 0.18 Reference: - For `nesterov=True`, See [Sutskever et al., 2013]( http://jmlr.org/proceedings/papers/v28/sutskever13.pdf). """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.01, momentum=0.0, nesterov=False, name="SGD", **kwargs): super(SGD, self).__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) self._set_hyper("decay", self._initial_decay) self._momentum = False if isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0: self._momentum = True if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1): raise ValueError(f"`momentum` must be between [0, 1]. Received: " f"momentum={momentum} (of type {type(momentum)}).") self._set_hyper("momentum", momentum) self.nesterov = nesterov def _create_slots(self, var_list): if self._momentum: for var in var_list: self.add_slot(var, "momentum") def _prepare_local(self, var_device, var_dtype, apply_state): super(SGD, self)._prepare_local(var_device, var_dtype, apply_state) apply_state[(var_device, var_dtype)]["momentum"] = tf.identity( self._get_hyper("momentum", var_dtype)) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) if self._momentum: momentum_var = self.get_slot(var, "momentum") return tf.raw_ops.ResourceApplyKerasMomentum( var=var.handle, accum=momentum_var.handle, lr=coefficients["lr_t"], grad=grad, momentum=coefficients["momentum"], use_locking=self._use_locking, use_nesterov=self.nesterov) else: return tf.raw_ops.ResourceApplyGradientDescent( var=var.handle, alpha=coefficients["lr_t"], delta=grad, use_locking=self._use_locking) def _resource_apply_sparse_duplicate_indices(self, grad, var, indices, **kwargs): if self._momentum: return super(SGD, self)._resource_apply_sparse_duplicate_indices( grad, var, indices, **kwargs) else: var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (kwargs.get("apply_state", {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) return tf.raw_ops.ResourceScatterAdd( resource=var.handle, indices=indices, updates=-grad * coefficients["lr_t"]) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): # This method is only needed for momentum optimization. var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) momentum_var = self.get_slot(var, "momentum") return tf.raw_ops.ResourceSparseApplyKerasMomentum( var=var.handle, accum=momentum_var.handle, lr=coefficients["lr_t"], grad=grad, indices=indices, momentum=coefficients["momentum"], use_locking=self._use_locking, use_nesterov=self.nesterov) def get_config(self): config = super(SGD, self).get_config() config.update({ "learning_rate": self._serialize_hyperparameter("learning_rate"), "decay": self._initial_decay, "momentum": self._serialize_hyperparameter("momentum"), "nesterov": self.nesterov, }) return config
6,905
35.539683
80
py
keras
keras-master/keras/optimizer_v2/legacy_learning_rate_decay.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various learning rate decay functions.""" import tensorflow.compat.v2 as tf import functools from keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["train.exponential_decay"]) def exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None): """Applies exponential decay to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an exponential decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) ``` If the argument `staircase` is `True`, then `global_step / decay_steps` is an integer division and the decayed learning rate follows a staircase function. Example: decay every 100000 steps with a base of 0.96: ```python ... global_step = tf.Variable(0, trainable=False) starter_learning_rate = 0.1 learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. Must not be negative. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.ExponentialDecay( learning_rate, decay_steps, decay_rate, staircase=staircase, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.piecewise_constant_decay", "train.piecewise_constant"]) def piecewise_constant(x, boundaries, values, name=None): """Piecewise constant from boundaries and interval values. Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. ```python global_step = tf.Variable(0, trainable=False) boundaries = [100000, 110000] values = [1.0, 0.5, 0.1] learning_rate = tf.compat.v1.train.piecewise_constant(global_step, boundaries, values) # Later, whenever we perform an optimization step, we increment global_step. ``` Args: x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`. boundaries: A list of `Tensor`s or `int`s or `float`s with strictly increasing entries, and with all elements having the same type as `x`. values: A list of `Tensor`s or `float`s or `int`s that specifies the values for the intervals defined by `boundaries`. It should have one more element than `boundaries`, and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Returns: A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`, `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ..., and values[-1] when `x > boundaries[-1]`. Raises: ValueError: if types of `x` and `boundaries` do not match, or types of all `values` do not match or the number of elements in the lists does not match. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ boundaries = tf.nest.map_structure(tf.convert_to_tensor, tf.nest.flatten(boundaries)) values = tf.nest.map_structure(tf.convert_to_tensor, tf.nest.flatten(values)) x_recomp = tf.convert_to_tensor(x) # Avoid explicit conversion to x's dtype. This could result in faulty # comparisons, for example if floats are converted to integers. for i, b in enumerate(boundaries): if b.dtype.base_dtype != x_recomp.dtype.base_dtype: # We can promote int32 boundaries to int64 without loss of precision. # This covers the most common case where the user passes in boundaries # as an array of Python integers. if (b.dtype.base_dtype == tf.int32 and x_recomp.dtype.base_dtype == tf.int64): b = tf.cast(b, x_recomp.dtype.base_dtype) boundaries[i] = b else: raise ValueError( f"`boundaries` ({b.dtype.base_dtype}) must have the same dtype as " f"x ({x_recomp.dtype.base_dtype}).") for v in values[1:]: if v.dtype.base_dtype != values[0].dtype.base_dtype: raise ValueError( f"`values` must have elements all with the same dtype " f"({values[0].dtype.base_dtype} vs {v.dtype.base_dtype}).") decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( boundaries, values, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(x) else: decayed_lr = functools.partial(decayed_lr, x) return decayed_lr @tf_export(v1=["train.polynomial_decay"]) def polynomial_decay(learning_rate, global_step, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name=None): """Applies a polynomial decay to the learning rate. It is commonly observed that a monotonically decreasing learning rate, whose degree of change is carefully chosen, results in a better performing model. This function applies a polynomial decay function to a provided initial `learning_rate` to reach an `end_learning_rate` in the given `decay_steps`. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) decayed_learning_rate = (learning_rate - end_learning_rate) * (1 - global_step / decay_steps) ^ (power) + end_learning_rate ``` If `cycle` is True then a multiple of `decay_steps` is used, the first one that is bigger than `global_steps`. ```python decay_steps = decay_steps * ceil(global_step / decay_steps) decayed_learning_rate = (learning_rate - end_learning_rate) * (1 - global_step / decay_steps) ^ (power) + end_learning_rate ``` Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5): ```python ... global_step = tf.Variable(0, trainable=False) starter_learning_rate = 0.1 end_learning_rate = 0.01 decay_steps = 10000 learning_rate = tf.compat.v1.train.polynomial_decay(starter_learning_rate, global_step, decay_steps, end_learning_rate, power=0.5) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. Must not be negative. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The minimal end learning rate. power: A scalar `float32` or `float64` `Tensor` or a Python number. The power of the polynomial. Defaults to linear, 1.0. cycle: A boolean, whether or not it should cycle beyond decay_steps. name: String. Optional name of the operation. Defaults to 'PolynomialDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.PolynomialDecay( learning_rate, decay_steps, end_learning_rate=end_learning_rate, power=power, cycle=cycle, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.natural_exp_decay"]) def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None): """Applies natural exponential decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an exponential decay function to a provided initial learning rate. It requires an `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate * exp(-decay_rate * global_step / decay_step) ``` or, if `staircase` is `True`, as: ```python decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step / decay_step)) ``` Example: decay exponentially with a base of 0.96: ```python ... global_step = tf.Variable(0, trainable=False) learning_rate = 0.1 decay_steps = 5 k = 0.5 learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate, global_step, decay_steps, k) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A Python number. Global step to use for the decay computation. Must not be negative. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'ExponentialTimeDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ natural_exp_rate = tf.exp(tf.negative(decay_rate)) decayed_lr = learning_rate_schedule.ExponentialDecay( learning_rate, decay_steps, natural_exp_rate, staircase=staircase, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.inverse_time_decay"]) def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None): """Applies inverse time decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an inverse decay function to a provided initial learning rate. It requires an `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) ``` or, if `staircase` is `True`, as: ```python decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) ``` Example: decay 1/t with a rate of 0.5: ```python ... global_step = tf.Variable(0, trainable=False) learning_rate = 0.1 decay_steps = 1.0 decay_rate = 0.5 learning_rate = tf.compat.v1.train.inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A Python number. Global step to use for the decay computation. Must not be negative. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps, decay_rate, staircase=staircase, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.cosine_decay"]) def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None): """Applies cosine decay to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a cosine decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps)) decayed = (1 - alpha) * cosine_decay + alpha decayed_learning_rate = learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed = cosine_decay(learning_rate, global_step, decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of learning_rate. name: String. Optional name of the operation. Defaults to 'CosineDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx&noteId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.CosineDecay( learning_rate, decay_steps, alpha=alpha, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.cosine_decay_restarts"]) def cosine_decay_restarts(learning_rate, global_step, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None): """Applies cosine decay with restarts to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a cosine decay function with restarts to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate while taking into account possible warm restarts. The learning rate multiplier first decays from 1 to `alpha` for `first_decay_steps` steps. Then, a warm restart is performed. Each new warm restart runs for `t_mul` times more steps and with `m_mul` times smaller initial learning rate. Example usage: ```python first_decay_steps = 1000 lr_decayed = cosine_decay_restarts(learning_rate, global_step, first_decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of the learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx&noteId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.CosineDecayRestarts( learning_rate, first_decay_steps, t_mul=t_mul, m_mul=m_mul, alpha=alpha, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.linear_cosine_decay"]) def linear_cosine_decay(learning_rate, global_step, decay_steps, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies linear cosine decay to the learning rate. Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a linear cosine decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) linear_decay = (decay_steps - global_step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * global_step / decay_steps)) decayed = (alpha + linear_decay) * cosine_decay + beta decayed_learning_rate = learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed = linear_cosine_decay(learning_rate, global_step, decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'LinearCosineDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Neural Optimizer Search with Reinforcement Learning: [Bello et al., 2017](http://proceedings.mlr.press/v70/bello17a.html) ([pdf](http://proceedings.mlr.press/v70/bello17a/bello17a.pdf)) Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx&noteId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.LinearCosineDecay( learning_rate, decay_steps, num_periods=num_periods, alpha=alpha, beta=beta, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr @tf_export(v1=["train.noisy_linear_cosine_decay"]) def noisy_linear_cosine_decay(learning_rate, global_step, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies noisy linear cosine decay to the learning rate. Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a noisy linear cosine decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) linear_decay = (decay_steps - global_step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * global_step / decay_steps)) decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta decayed_learning_rate = learning_rate * decayed ``` where eps_t is 0-centered gaussian noise with variance initial_variance / (1 + global_step) ** variance_decay Example usage: ```python decay_steps = 1000 lr_decayed = noisy_linear_cosine_decay( learning_rate, global_step, decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. initial_variance: initial variance for the noise. See computation above. variance_decay: decay for the noise's variance. See computation above. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'NoisyLinearCosineDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Neural Optimizer Search with Reinforcement Learning: [Bello et al., 2017](http://proceedings.mlr.press/v70/bello17a.html) ([pdf](http://proceedings.mlr.press/v70/bello17a/bello17a.pdf)) Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx&noteId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ decayed_lr = learning_rate_schedule.NoisyLinearCosineDecay( learning_rate, decay_steps, initial_variance=initial_variance, variance_decay=variance_decay, num_periods=num_periods, alpha=alpha, beta=beta, name=name) if not tf.executing_eagerly(): decayed_lr = decayed_lr(global_step) else: decayed_lr = functools.partial(decayed_lr, global_step) return decayed_lr
29,391
37.270833
80
py
keras
keras-master/keras/optimizer_v2/learning_rate_schedule_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for learning rate decay.""" import math from absl.testing import parameterized from keras import combinations from keras.optimizer_v2 import gradient_descent from keras.optimizer_v2 import learning_rate_schedule import numpy as np import tensorflow.compat.v2 as tf def _maybe_serialized(lr_decay, serialize_and_deserialize): if serialize_and_deserialize: serialized = learning_rate_schedule.serialize(lr_decay) return learning_rate_schedule.deserialize(serialized) else: return lr_decay @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class LRDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testContinuous(self, serialize): self.evaluate(tf.compat.v1.global_variables_initializer()) step = 5 decayed_lr = learning_rate_schedule.ExponentialDecay(0.05, 10, 0.96) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = .05 * 0.96**(5.0 / 10.0) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testStaircase(self, serialize): if tf.executing_eagerly(): step = tf.Variable(0) self.evaluate(tf.compat.v1.global_variables_initializer()) decayed_lr = learning_rate_schedule.ExponentialDecay( .1, 3, 0.96, staircase=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) # No change to learning rate due to staircase expected = .1 self.evaluate(step.assign(1)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) expected = .1 self.evaluate(step.assign(2)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # Decayed learning rate expected = .1 * 0.96 ** (100 // 3) self.evaluate(step.assign(100)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testVariables(self, serialize): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): step = tf.Variable(1) assign_1 = step.assign(1) assign_2 = step.assign(2) assign_100 = step.assign(100) decayed_lr = learning_rate_schedule.ExponentialDecay( .1, 3, 0.96, staircase=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) # No change to learning rate self.evaluate(assign_1.op) self.assertAllClose(self.evaluate(decayed_lr(step)), .1, 1e-6) self.evaluate(assign_2.op) self.assertAllClose(self.evaluate(decayed_lr(step)), .1, 1e-6) # Decayed learning rate self.evaluate(assign_100.op) expected = .1 * 0.96**(100 // 3) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testPiecewiseConstant(self, serialize): x = tf.Variable(-999) decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( [100, 110, 120], [1.0, 0.1, 0.01, 0.001]) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6) self.evaluate(x.assign(100)) self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6) self.evaluate(x.assign(105)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6) self.evaluate(x.assign(110)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6) self.evaluate(x.assign(120)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.01, 1e-6) self.evaluate(x.assign(999)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.001, 1e-6) def testPiecewiseFunction(self, serialize): if not tf.executing_eagerly(): self.skipTest("Run on eager mode only.") del serialize v = tf.Variable(1.) def loss_fn(): return v * v learning_rate = learning_rate_schedule.PiecewiseConstantDecay( [1.], [1., 0.1]) opt = gradient_descent.SGD(learning_rate=learning_rate) @tf.function def minimize(): with tf.GradientTape() as tape: loss = loss_fn() g = tape.gradient(loss, [v]) opt.apply_gradients(list(zip(g, [v]))) minimize() self.assertAllEqual(v.read_value(), -1.0) def testPiecewiseConstantEdgeCases(self, serialize): # Test casting boundaries from int32 to int64. x_int64 = tf.Variable(0, dtype=tf.int64) boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7] decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( boundaries, values) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6) self.evaluate(x_int64.assign(1)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6) self.evaluate(x_int64.assign(2)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.5, 1e-6) self.evaluate(x_int64.assign(3)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.6, 1e-6) self.evaluate(x_int64.assign(4)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.7, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class LinearDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testHalfWay(self, serialize): step = 5 lr = 0.05 end_lr = 0.0 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr * 0.5 self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testEnd(self, serialize): step = 10 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testHalfWayWithEnd(self, serialize): step = 5 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr + end_lr) * 0.5 self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEnd(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEndWithCycle(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, cycle=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.25 + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class SqrtDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testHalfWay(self, serialize): step = 5 lr = 0.05 end_lr = 0.0 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr * 0.5**power self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testEnd(self, serialize): step = 10 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testHalfWayWithEnd(self, serialize): step = 5 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.5**power + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEnd(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEndWithCycle(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power, cycle=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.25**power + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class PolynomialDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testBeginWithCycle(self, serialize): lr = 0.001 decay_steps = 10 step = 0 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, decay_steps, cycle=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class InverseDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testDecay(self, serialize): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = learning_rate_schedule.InverseTimeDecay(initial_lr, k, decay_rate) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + i / k * decay_rate) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) self.evaluate(step.assign_add(1)) def testStaircase(self, serialize): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = learning_rate_schedule.InverseTimeDecay( initial_lr, k, decay_rate, staircase=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + decay_rate * (i // k)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) self.evaluate(step.assign_add(1)) @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class CosineDecayTestV2(tf.test.TestCase, parameterized.TestCase): def np_cosine_decay(self, step, decay_steps, alpha=0.0): step = min(step, decay_steps) completed_fraction = step / decay_steps decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha def testDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay(initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testAlpha(self, serialize): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay(initial_lr, num_training_steps, alpha) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps, alpha) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testFloat64InitLearningRate(self, serialize): num_training_steps = 1000 initial_lr = np.float64(1.0) for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay(initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @combinations.generate(combinations.combine(serialize=[False, True], mode=["graph", "eager"])) class CosineDecayRestartsTestV2(tf.test.TestCase, parameterized.TestCase): def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0): fac = 1.0 while step >= decay_steps: step -= decay_steps decay_steps *= t_mul fac *= m_mul completed_fraction = step / decay_steps decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha def testDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testFloat64InitLearningRate(self, serialize): num_training_steps = 1000 initial_lr = np.float64(1.0) for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testAlpha(self, serialize): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, alpha=alpha) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, alpha=alpha) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testMMul(self, serialize): num_training_steps = 1000 initial_lr = 1.0 m_mul = 0.9 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, m_mul=m_mul) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, m_mul=m_mul) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testTMul(self, serialize): num_training_steps = 1000 initial_lr = 1.0 t_mul = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, t_mul=t_mul) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, t_mul=t_mul) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) if __name__ == "__main__": tf.test.main()
17,081
37.044543
80
py
keras
keras-master/keras/optimizer_v2/nadam.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Nadam optimizer implementation.""" import tensorflow.compat.v2 as tf from keras import backend_config from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Nadam') class Nadam(optimizer_v2.OptimizerV2): r"""Optimizer that implements the NAdam algorithm. Much like Adam is essentially RMSprop with momentum, Nadam is Adam with Nesterov momentum. Args: learning_rate: A Tensor or a floating point value. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to `"Nadam"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips gradients by value. Usage Example: >>> opt = tf.keras.optimizers.Nadam(learning_rate=0.2) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2) / 2.0 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> "{:.1f}".format(var1.numpy()) 9.8 Reference: - [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf). """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, name='Nadam', **kwargs): # Backwards compatibility with keras NAdam optimizer. kwargs['decay'] = kwargs.pop('schedule_decay', 0.004) learning_rate = kwargs.get('lr', learning_rate) if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule): raise ValueError('The Nadam optimizer does not support ' 'tf.keras.optimizers.LearningRateSchedules as the ' 'learning rate.') super(Nadam, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() self._m_cache = None def _create_slots(self, var_list): var_dtype = var_list[0].dtype.base_dtype if self._m_cache is None: self._m_cache = self.add_weight( 'momentum_cache', shape=[], dtype=var_dtype, initializer='ones', trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._m_cache) # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: # Create slots for the first moments. self.add_slot(var, 'm') for var in var_list: # Create slots for the second moments. self.add_slot(var, 'v') def _prepare_local(self, var_device, var_dtype, apply_state): lr_t = tf.identity(self._get_hyper('learning_rate', var_dtype)) beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype)) local_step = tf.cast(self.iterations + 1, var_dtype) next_step = tf.cast(self.iterations + 2, var_dtype) decay_base = tf.cast(0.96, var_dtype) m_t = beta_1_t * (1. - 0.5 * ( tf.pow(decay_base, self._initial_decay * local_step))) m_t_1 = beta_1_t * (1. - 0.5 * ( tf.pow(decay_base, self._initial_decay * next_step))) m_schedule_new = tf.cast(self._m_cache_read, var_dtype) * m_t if var_dtype is self._m_cache.dtype: m_schedule_new = tf.identity(tf.compat.v1.assign( self._m_cache, m_schedule_new, use_locking=self._use_locking)) m_schedule_next = m_schedule_new * m_t_1 apply_state[(var_device, var_dtype)] = dict( lr_t=lr_t, neg_lr_t=-lr_t, # pylint: disable=invalid-unary-operand-type epsilon=tf.convert_to_tensor(self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_2_t=beta_2_t, m_t=m_t, m_t_1=m_t_1, one_minus_beta_1_t=1 - beta_1_t, one_minus_beta_2_t=1 - beta_2_t, one_minus_m_t=1. - m_t, one_minus_m_schedule_new=1. - m_schedule_new, one_minus_m_schedule_next=1. - m_schedule_next, v_t_prime_denominator=1. - tf.pow(beta_2_t, local_step), ) def _prepare(self, var_list): # Get the value of the momentum cache before starting to apply gradients. self._m_cache_read = tf.identity(self._m_cache) return super(Nadam, self)._prepare(var_list) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') g_prime = grad / coefficients['one_minus_m_schedule_new'] m_t = (coefficients['beta_1_t'] * m + coefficients['one_minus_beta_1_t'] * grad) m_t = tf.compat.v1.assign(m, m_t, use_locking=self._use_locking) m_t_prime = m_t / coefficients['one_minus_m_schedule_next'] v_t = (coefficients['beta_2_t'] * v + coefficients['one_minus_beta_2_t'] * tf.square(grad)) v_t = tf.compat.v1.assign(v, v_t, use_locking=self._use_locking) v_t_prime = v_t / coefficients['v_t_prime_denominator'] m_t_bar = (coefficients['one_minus_m_t'] * g_prime + coefficients['m_t_1'] * m_t_prime) var_t = var - coefficients['lr_t'] * m_t_bar / ( tf.sqrt(v_t_prime) + coefficients['epsilon']) return tf.compat.v1.assign(var, var_t, use_locking=self._use_locking).op def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') g_prime = grad / coefficients['one_minus_m_schedule_new'] # m_t = beta1 * m + (1 - beta1) * g_t m_scaled_g_values = grad * coefficients['one_minus_beta_1_t'] m_t = tf.compat.v1.assign(m, m * coefficients['beta_1_t'], use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) m_t_slice = tf.gather(m_t, indices) m_t_prime = m_t_slice / coefficients['one_minus_m_schedule_next'] m_t_bar = (coefficients['one_minus_m_t'] * g_prime + coefficients['m_t_1'] * m_t_prime) # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t'] v_t = tf.compat.v1.assign(v, v * coefficients['beta_2_t'], use_locking=self._use_locking) with tf.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) v_t_slice = tf.gather(v_t, indices) v_t_prime = v_t_slice / coefficients['v_t_prime_denominator'] v_prime_sqrt_plus_eps = tf.sqrt(v_t_prime) + coefficients['epsilon'] var_update = self._resource_scatter_add( var, indices, coefficients['neg_lr_t'] * m_t_bar / v_prime_sqrt_plus_eps) return tf.group(*[var_update, m_t_bar, v_t]) def get_config(self): config = super(Nadam, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, }) return config
8,789
40.074766
80
py
keras
keras-master/keras/optimizer_v2/adamax.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adamax optimizer implementation.""" import tensorflow.compat.v2 as tf from keras import backend_config from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adamax') class Adamax(optimizer_v2.OptimizerV2): """Optimizer that implements the Adamax algorithm. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Adamax is sometimes superior to adam, specially in models with embeddings. Initialization: ```python m = 0 # Initialize initial 1st moment vector v = 0 # Initialize the exponentially weighted infinity norm t = 0 # Initialize timestep ``` The update rule for parameter `w` with gradient `g` is described at the end of section 7.1 of the paper: ```python t += 1 m = beta1 * m + (1 - beta) * g v = max(beta2 * v, abs(g)) current_lr = learning_rate / (1 - beta1 ** t) w = w - current_lr * m / (v + epsilon) ``` Similarly to `Adam`, the epsilon is added for numerical stability (especially to get rid of division by zero when `v_t == 0`). In contrast to `Adam`, the sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) only updates variable slices and corresponding `m_t`, `v_t` terms when that part of the variable was used in the forward pass. This means that the sparse behavior is contrast to the dense behavior (similar to some momentum implementations which ignore momentum unless a variable slice was actually used). Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to `"Adamax"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips gradients by value. Reference: - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, name='Adamax', **kwargs): super(Adamax, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: self.add_slot(var, 'm') # Create slots for the first moments. for var in var_list: self.add_slot(var, 'v') # Create slots for the second moments. def _prepare_local(self, var_device, var_dtype, apply_state): super(Adamax, self)._prepare_local(var_device, var_dtype, apply_state) local_step = tf.cast(self.iterations + 1, var_dtype) beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype)) beta_1_power = tf.pow(beta_1_t, local_step) lr_t = apply_state[(var_device, var_dtype)]['lr_t'] apply_state[(var_device, var_dtype)].update( dict( neg_scaled_lr=-lr_t / (1 - beta_1_power), epsilon=tf.convert_to_tensor( self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_1_power=beta_1_power, one_minus_beta_1_t=1 - beta_1_t, beta_2_t=beta_2_t, zero=tf.zeros((), dtype=tf.int64))) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') return tf.raw_ops.ResourceApplyAdaMax( var=var.handle, m=m.handle, v=v.handle, beta1_power=coefficients['beta_1_power'], lr=coefficients['lr_t'], beta1=coefficients['beta_1_t'], beta2=coefficients['beta_2_t'], epsilon=coefficients['epsilon'], grad=grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_slice = tf.gather(m, indices, axis=coefficients['zero']) m_t_slice = (m_slice * coefficients['beta_1_t'] + grad * coefficients['one_minus_beta_1_t']) with tf.control_dependencies([m_t_slice]): m_t = self._resource_scatter_update(m, indices, m_t_slice) # u_t = max(beta2 * u, abs(g_t)) v = self.get_slot(var, 'v') v_slice = tf.gather(v, indices, axis=coefficients['zero']) v_t_slice = tf.maximum(v_slice * coefficients['beta_2_t'], tf.abs(grad)) with tf.control_dependencies([v_t_slice]): v_t = self._resource_scatter_update(v, indices, v_t_slice) # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t var_slice = coefficients['neg_scaled_lr'] * ( m_t_slice / (v_t_slice + coefficients['epsilon'])) with tf.control_dependencies([var_slice]): var_update = self._resource_scatter_add(var, indices, var_slice) return tf.group(*[var_update, m_t, v_t]) def get_config(self): config = super(Adamax, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, }) return config
7,264
39.361111
80
py
keras
keras-master/keras/optimizer_v2/adamax_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adamax.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import combinations from keras.optimizer_v2 import adamax def adamax_update_numpy(param, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): m_t = beta1 * m + (1 - beta1) * g_t v_t = np.maximum(beta2 * v, np.abs(g_t)) param_t = param - (alpha / (1 - beta1**(t + 1))) * (m_t / (v_t + epsilon)) return param_t, m_t, v_t def adamax_sparse_update_numpy(param, indices, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param) m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t)) param_t_slice = param[indices] - ( (alpha / (1 - beta1**(t + 1))) * (m_t_slice / (v_t_slice + epsilon))) m_t[indices] = m_t_slice v_t[indices] = v_t_slice param_t[indices] = param_t_slice return param_t, m_t, v_t def get_beta_accumulators(opt, dtype): local_step = tf.cast(opt.iterations + 1, dtype) beta_1_t = tf.cast(opt._get_hyper("beta_1"), dtype) beta_1_power = tf.pow(beta_1_t, local_step) return beta_1_power class AdamaxOptimizerTest(tf.test.TestCase, parameterized.TestCase): def testResourceSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots() var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0, 1], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([3])) grads1_np_indices = np.array([2, 1], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([3])) opt = adamax.Adamax() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0, 3.0], var0) self.assertAllClose([4.0, 5.0, 6.0], var1) beta1_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adamax for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power) update.run() var0_np, m0, v0 = adamax_sparse_update_numpy( var0_np, grads0_np_indices, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_sparse_update_numpy( var1_np, grads1_np_indices, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0) self.assertAllCloseAccordingToType(var1_np, var1) def testSparseDevicePlacement(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for index_dtype in [tf.int32, tf.int64]: with tf.Graph().as_default(), self.cached_session( force_gpu=tf.test.is_gpu_available()): # If a GPU is available, tests that all optimizer ops can be placed on # it (i.e. they have GPU kernels). var = tf.Variable([[1.0], [2.0]]) indices = tf.constant([0, 1], dtype=index_dtype) g_sum = lambda: tf.reduce_sum(tf.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adamax.Adamax(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) self.evaluate(tf.compat.v1.global_variables_initializer()) minimize_op.run() def testSparseRepeatedIndices(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): repeated_index_update_var = tf.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = tf.Variable( [[1.0], [2.0]], dtype=dtype) grad_repeated_index = tf.IndexedSlices( tf.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices( tf.constant( [0.2], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) repeated_update = adamax.Adamax().apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adamax.Adamax().apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(aggregated_update_var, repeated_index_update_var.eval()) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var, repeated_index_update_var.eval()) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasic(self): for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with self.session(graph=tf.Graph(), use_gpu=True): # Initialize variables for numpy implementation. m0 = np.array([0.0, 0.0]) v0 = np.array([0.0, 0.0]) m1 = np.array([0.0, 0.0]) v1 = np.array([0.0, 0.0]) var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adamax.Adamax() if not tf.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of Adamax for t in range(3): beta_1_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) if not tf.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(var0), rtol=1e-2) self.assertAllCloseAccordingToType( var1_np, self.evaluate(var1), rtol=1e-2) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithLearningRateDecay(self): for i, dtype in enumerate([tf.half, tf.float32, tf.float64]): with self.session(graph=tf.Graph(), use_gpu=True): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.001 decay = 0.002 opt = adamax.Adamax(learning_rate=learning_rate, decay=decay) if not tf.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of Adamax for t in range(3): beta_1_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) if not tf.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) lr = learning_rate / (1 + decay * t) var0_np, m0, v0 = adamax_update_numpy( var0_np, grads0_np, t, m0, v0, alpha=lr) var1_np, m1, v1 = adamax_update_numpy( var1_np, grads1_np, t, m1, v1, alpha=lr) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0), rtol=1e-2) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1), rtol=1e-2) def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adamax.Adamax(tf.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0) self.assertAllClose([3.0, 4.0], var1) beta1_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adamax for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power) update.run() var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0) self.assertAllCloseAccordingToType(var1_np, var1) def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = adamax.Adamax() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) beta1_power = get_beta_accumulators(opt, dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0) self.assertAllClose([3.0, 4.0], var1) # Run 3 steps of intertwined Adamax1 and Adamax2. for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power) if t % 2 == 0: update1.run() else: update2.run() var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0) self.assertAllCloseAccordingToType(var1_np, var1) @combinations.generate(combinations.combine(mode=["eager"])) def testSlotsUniqueEager(self): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = adamax.Adamax(1.) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertLen({id(v) for v in opt.variables()}, 5) def testConstructAdamaxWithLR(self): opt = adamax.Adamax(lr=1.0) opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0) opt_3 = adamax.Adamax(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) if __name__ == "__main__": tf.test.main()
15,419
41.016349
108
py
keras
keras-master/keras/optimizer_v2/gradient_descent_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for GradientDescent.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import combinations from keras.optimizer_v2 import gradient_descent from keras.optimizer_v2 import learning_rate_schedule class GradientDescentOptimizerTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) sgd = gradient_descent.SGD(3.0) sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) def _test_basic_sgd_with_learning_rate_decay(self, sgd, dtype): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) if not tf.executing_eagerly(): sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 2 steps of sgd if not tf.executing_eagerly(): self.evaluate(sgd_op) else: sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) if not tf.executing_eagerly(): self.evaluate(sgd_op) else: sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) # Validate updated params self.assertAllCloseAccordingToType( [1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType( [3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithLearningRateDecay(self): for dtype in [tf.half, tf.float32, tf.float64]: learning_rate = 3.0 decay = 0.5 sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay) self._test_basic_sgd_with_learning_rate_decay(sgd, dtype) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithLearningRateInverseTimeDecay(self): for dtype in [tf.half, tf.float32, tf.float64]: learning_rate = learning_rate_schedule.InverseTimeDecay( 3.0, decay_steps=1.0, decay_rate=0.5) sgd = gradient_descent.SGD(learning_rate=learning_rate) self._test_basic_sgd_with_learning_rate_decay(sgd, dtype) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithLearningRateInverseTimeDecaySerializeAndDeserialize(self): for dtype in [tf.half, tf.float32, tf.float64]: learning_rate = learning_rate_schedule.InverseTimeDecay( 3.0, decay_steps=1.0, decay_rate=0.5) sgd = gradient_descent.SGD(learning_rate=learning_rate) sgd = gradient_descent.SGD.from_config(sgd.get_config()) self._test_basic_sgd_with_learning_rate_decay(sgd, dtype) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicCallableParams(self): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) lr = lambda: 3.0 sgd = gradient_descent.SGD(lr) sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testMinimizeResourceVariable(self): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) var1 = tf.Variable([3.0], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) loss = lambda: tf.matmul(var0, x) + var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(1.0) sgd_op = sgd.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[1.0 - 4.0, 2.0 - 5.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 1.0], self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) var1 = tf.Variable([3.0], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop pred += var1 # pylint: disable=cell-var-from-loop return pred * pred sgd_op = gradient_descent.SGD(1.0).minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0 np_grad = 2 * np_pred self.assertAllCloseAccordingToType( [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1)) def testTensorLearningRate(self): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) lrate = tf.constant(3.0) sgd_op = gradient_descent.SGD(lrate).apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) def testGradWrtRef(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: opt = gradient_descent.SGD(3.0) values = [1.0, 3.0] vars_ = [tf.Variable([v], dtype=dtype) for v in values] loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop grads_and_vars = opt._compute_gradients(loss, vars_) self.evaluate(tf.compat.v1.global_variables_initializer()) for grad, _ in grads_and_vars: self.assertAllCloseAccordingToType([1.0], self.evaluate(grad)) def testSparseBasic(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([[1.0], [2.0]], dtype=dtype) var1 = tf.Variable([[3.0], [4.0]], dtype=dtype) grads0 = tf.IndexedSlices( tf.constant([0.1], shape=[1, 1], dtype=dtype), tf.constant([0]), tf.constant([2, 1])) grads1 = tf.IndexedSlices( tf.constant([0.01], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) sgd_op = gradient_descent.SGD(3.0).apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]], self.evaluate(var1)) def testSparseBasicWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([[1.0], [2.0]], dtype=dtype) var1 = tf.Variable([[3.0], [4.0]], dtype=dtype) grads0 = tf.IndexedSlices( tf.constant([0.1], shape=[1, 1], dtype=dtype), tf.constant([0]), tf.constant([2, 1])) grads1 = tf.IndexedSlices( tf.constant([0.01], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) sgd_op = gradient_descent.SGD( 3.0, decay=0.5).apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 2 steps of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]], self.evaluate(var1)) self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType( [[1.0 - 3.0 * 0.1 - 2.0 * 0.1], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType( [[3.0], [4.0 - 3.0 * 0.01 - 2.0 * 0.01]], self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["eager"])) def testCapturingInFunctionWhileExecutingEagerly(self): optimizer = gradient_descent.SGD(1.0) var_holder = {} def step(): if not var_holder: var_holder["var"] = tf.Variable(1.0) else: var_holder["var"].assign(1.0) with tf.GradientTape() as tape: loss = var_holder["var"]**2 grad = tape.gradient(loss, var_holder["var"]) optimizer.apply_gradients([(grad, var_holder["var"])]) return var_holder["var"].read_value() compiled_step = tf.function(step) self.assertEqual(float(step()), -1.0) self.assertEqual(float(compiled_step()), -1.0) # This shouldn't fail; in particular, the learning rate tensor should # be an EagerTensor once again, not a graph Tensor. self.assertEqual(float(step()), -1.0) def testConstructSGDWithLR(self): opt = gradient_descent.SGD(lr=1.0) opt_2 = gradient_descent.SGD(learning_rate=0.1, lr=1.0) opt_3 = gradient_descent.SGD(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) class MomentumOptimizerTest(tf.test.TestCase, parameterized.TestCase): def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): accum = accum * momentum - g * lr var += (accum * momentum - g * lr) return var, accum @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasic(self): for _, dtype in enumerate([tf.half, tf.float32, tf.float64]): var0 = tf.Variable([1.0, 2.0], dtype=dtype, name="var0") var1 = tf.Variable([3.0, 4.0], dtype=dtype, name="var1") grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) learning_rate = 2.0 momentum = 0.9 mom_opt = gradient_descent.SGD( learning_rate=learning_rate, momentum=momentum) # self.assertFalse(mom_opt._initial_decay) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) # Check we have slots slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([-0.2, -0.2]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([-0.02, -0.02]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), self.evaluate(var1)) # Step 2: the momentum accumulators contain the previous update. self.evaluate(mom_update) if tf.executing_eagerly(): mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)) def testNesterovMomentum(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.float32, tf.float64]: var0 = tf.Variable([1.0, 2.0], dtype=dtype, name="var0") var1 = tf.Variable([3.0, 4.0], dtype=dtype, name="var1") var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) loss = lambda: 5 * var0 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop mom_op = gradient_descent.SGD( learning_rate=2.0, momentum=0.9, nesterov=True) opt_op = mom_op.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(1, 5): self.evaluate(opt_op) var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 3, 2.0, 0.9) self.assertAllClose(var0_np, self.evaluate(var0)) self.assertAllClose(var1_np, self.evaluate(var1)) def testSparseNesterovMomentum(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session() as sess: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) grads = [] for t in range(1, 5): grads.append(var0_np * 10) var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 3, 2.0, 0.9) var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, dtype=dtype, name="var0") var1 = tf.Variable(var1_np, dtype=dtype, name="var1") mom_op = gradient_descent.SGD( learning_rate=2.0, momentum=0.9, nesterov=True) x_feed = tf.compat.v1.placeholder(dtype) y_feed = tf.IndexedSlices(x_feed, tf.constant([0, 1]), tf.constant([2])) grads_and_vars = [(y_feed, var0), (tf.constant([3.0, 3.0], dtype=dtype), var1)] opt_update = mom_op.apply_gradients(grads_and_vars) self.evaluate(tf.compat.v1.global_variables_initializer()) for t in range(1, 5): sess.run(opt_update, feed_dict={x_feed: grads[t - 1]}) var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 3, 2.0, 0.9) self.assertAllClose(var0_np, self.evaluate(var0)) self.assertAllClose(var1_np, self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) # pylint: disable=cell-var-from-loop def loss(): x = tf.constant([[4.0], [5.0]], dtype=dtype) pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) return pred * pred # pylint: enable=cell-var-from-loop opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9) sgd_op = opt.minimize(loss, [var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testMinimizeWith2DIndicesForEmbeddingLookup(self): var0 = tf.Variable(tf.ones([2, 2])) def loss(): return tf.reduce_sum(tf.compat.v1.nn.embedding_lookup(var0, [[1]])) opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9) sgd_op = opt.minimize(loss, [var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(sgd_op) self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0)) def testTensorLearningRateAndMomentum(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = gradient_descent.SGD( learning_rate=tf.constant(2.0), momentum=tf.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Check we have slots slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([-0.2, -0.2]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([-0.02, -0.02]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), self.evaluate(var1)) # Step 2: the momentum accumulators contain the previous update. self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)) def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype)) var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2])) grads0 = tf.IndexedSlices( tf.constant([[.1, .1]], dtype=dtype), tf.constant([1]), tf.constant([4, 2])) grads1 = tf.IndexedSlices( tf.constant([[.01, .01], [.01, .01]], dtype=dtype), tf.constant([2, 3]), tf.constant([4, 2])) mom_opt = gradient_descent.SGD(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Check we have slots slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Fetch params to validate initial values self.assertAllClose([0, 0], self.evaluate(var0)[0]) self.assertAllClose([0, 0], self.evaluate(var0)[1]) self.assertAllClose([1, 1], self.evaluate(var1)[2]) # Step 1: the momentum accumulators are 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([0, 0]), self.evaluate(slot0)[0]) self.assertAllCloseAccordingToType( np.array([-2.0 * .1, -2.0 * .1]), self.evaluate(slot0)[1]) self.assertAllCloseAccordingToType( np.array([-2.0 * .01, -2.0 * .01]), self.evaluate(slot1)[2]) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([0, 0]), self.evaluate(var0)[0]) self.assertAllCloseAccordingToType( np.array([-(0.1 * 2.0), -(0.1 * 2.0)]), self.evaluate(var0)[1]) self.assertAllCloseAccordingToType( np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), self.evaluate(var1)[2]) # Step 2: the momentum accumulators contain the previous update. self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0]) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)[1]) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)[2]) # Check that the parameters have been updated. self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0]) self.assertAllCloseAccordingToType( np.array([ -(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), -(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)[1]) self.assertAllCloseAccordingToType( np.array([ 0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)[2]) def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = gradient_descent.SGD(learning_rate=2.0, momentum=0.9) mom_update1 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) mom_update2 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(mom_update1) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([-0.2, -0.2]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([-0.02, -0.02]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), self.evaluate(var1)) # Step 2: the second momentum accumulators contain the previous update. self.evaluate(mom_update2) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testConfig(self): opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9, nesterov=True) config = opt.get_config() opt2 = gradient_descent.SGD.from_config(config) lr = opt.lr lr2 = opt2.lr self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(lr), self.evaluate(lr2)) self.assertAllClose( self.evaluate(opt._get_hyper("momentum")), self.evaluate(opt2._get_hyper("momentum"))) self.assertAllClose( self.evaluate(opt._get_hyper("decay")), self.evaluate(opt2._get_hyper("decay"))) var0 = tf.Variable([[1.0], [2.0]], dtype=tf.float32) loss = lambda: 3 * var0 # learning rate variable created when calling minimize. opt.minimize(loss, [var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) config = opt.get_config() opt3 = gradient_descent.SGD.from_config(config) lr3 = opt3.lr self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(lr), self.evaluate(lr3)) self.assertAllClose( self.evaluate(opt._get_hyper("momentum")), self.evaluate(opt3._get_hyper("momentum"))) self.assertAllClose( self.evaluate(opt._get_hyper("decay")), self.evaluate(opt3._get_hyper("decay"))) self.assertTrue(opt3.nesterov) def testNesterovWithoutMomentum(self): with self.assertRaisesRegex(ValueError, "must be between"): gradient_descent.SGD(learning_rate=1.0, momentum=2.0) def testConstructMomentumWithLR(self): opt = gradient_descent.SGD(lr=1.0, momentum=0.9) opt_2 = gradient_descent.SGD(learning_rate=0.1, momentum=0.9, lr=1.0) opt_3 = gradient_descent.SGD(learning_rate=0.1, momentum=0.9) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) @combinations.generate(combinations.combine(mode=["eager"])) def testMinimizeLossTensor(self): for dtype in [tf.half, tf.float32, tf.float64]: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) var1 = tf.Variable([3.0], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) tape = tf.GradientTape() with tape: loss = tf.matmul(var0, x) + var1 sgd = gradient_descent.SGD(1.0) with self.assertRaisesRegex(ValueError, "`tape` is required"): sgd.minimize(loss, [var0, var1]) sgd.minimize(loss, [var0, var1], tape=tape) self.assertAllCloseAccordingToType([[1.0 - 4.0, 2.0 - 5.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 1.0], self.evaluate(var1)) if __name__ == "__main__": tf.test.main()
33,000
44.962396
114
py
keras
keras-master/keras/optimizer_v2/adam.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adam optimizer implementation.""" import tensorflow.compat.v2 as tf from keras import backend_config from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adam') class Adam(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adam algorithm. Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments. According to [Kingma et al., 2014](http://arxiv.org/abs/1412.6980), the method is "*computationally efficient, has little memory requirement, invariant to diagonal rescaling of gradients, and is well suited for problems that are large in terms of data/parameters*". Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use, The learning rate. Defaults to 0.001. beta_1: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use. The exponential decay rate for the 1st moment estimates. Defaults to 0.9. beta_2: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use, The exponential decay rate for the 2nd moment estimates. Defaults to 0.999. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to 1e-7. amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from the paper "On the Convergence of Adam and beyond". Defaults to `False`. name: Optional name for the operations created when applying gradients. Defaults to `"Adam"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips gradients by value. Usage: >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> # The first step is `-learning_rate*sign(grad)` >>> var1.numpy() 9.9 Reference: - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) - [Reddi et al., 2018]( https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`. Notes: The default value of 1e-7 for epsilon might not be a good default in general. For example, when training an Inception network on ImageNet a current good choice is 1.0 or 0.1. Note that since Adam uses the formulation just before Section 2.1 of the Kingma and Ba paper rather than the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon hat" in the paper. The sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) does apply momentum to variable slices even if they were not used in the forward pass (meaning they have a gradient equal to zero). Momentum decay (beta1) is also applied to the entire momentum accumulator. This means that the sparse behavior is equivalent to the dense behavior (in contrast to some momentum implementations which ignore momentum unless a variable slice was actually used). """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=False, name='Adam', **kwargs): super(Adam, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() self.amsgrad = amsgrad def _create_slots(self, var_list): # Create slots for the first and second moments. # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: self.add_slot(var, 'm') for var in var_list: self.add_slot(var, 'v') if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def _prepare_local(self, var_device, var_dtype, apply_state): super(Adam, self)._prepare_local(var_device, var_dtype, apply_state) local_step = tf.cast(self.iterations + 1, var_dtype) beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype)) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) lr = (apply_state[(var_device, var_dtype)]['lr_t'] * (tf.sqrt(1 - beta_2_power) / (1 - beta_1_power))) apply_state[(var_device, var_dtype)].update( dict( lr=lr, epsilon=tf.convert_to_tensor( self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_1_power=beta_1_power, one_minus_beta_1_t=1 - beta_1_t, beta_2_t=beta_2_t, beta_2_power=beta_2_power, one_minus_beta_2_t=1 - beta_2_t)) def set_weights(self, weights): params = self.weights # If the weights are generated by Keras V1 optimizer, it includes vhats # even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2 # optimizer has 2x + 1 variables. Filter vhats out for compatibility. num_vars = int((len(params) - 1) / 2) if len(weights) == 3 * num_vars + 1: weights = weights[:len(params)] super(Adam, self).set_weights(weights) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') if not self.amsgrad: return tf.raw_ops.ResourceApplyAdam( var=var.handle, m=m.handle, v=v.handle, beta1_power=coefficients['beta_1_power'], beta2_power=coefficients['beta_2_power'], lr=coefficients['lr_t'], beta1=coefficients['beta_1_t'], beta2=coefficients['beta_2_t'], epsilon=coefficients['epsilon'], grad=grad, use_locking=self._use_locking) else: vhat = self.get_slot(var, 'vhat') return tf.raw_ops.ResourceApplyAdamWithAmsgrad( var=var.handle, m=m.handle, v=v.handle, vhat=vhat.handle, beta1_power=coefficients['beta_1_power'], beta2_power=coefficients['beta_2_power'], lr=coefficients['lr_t'], beta1=coefficients['beta_1_t'], beta2=coefficients['beta_2_t'], epsilon=coefficients['epsilon'], grad=grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_scaled_g_values = grad * coefficients['one_minus_beta_1_t'] m_t = tf.compat.v1.assign(m, m * coefficients['beta_1_t'], use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) v = self.get_slot(var, 'v') v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t'] v_t = tf.compat.v1.assign(v, v * coefficients['beta_2_t'], use_locking=self._use_locking) with tf.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) if not self.amsgrad: v_sqrt = tf.sqrt(v_t) var_update = tf.compat.v1.assign_sub( var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']), use_locking=self._use_locking) return tf.group(*[var_update, m_t, v_t]) else: v_hat = self.get_slot(var, 'vhat') v_hat_t = tf.maximum(v_hat, v_t) with tf.control_dependencies([v_hat_t]): v_hat_t = tf.compat.v1.assign( v_hat, v_hat_t, use_locking=self._use_locking) v_hat_sqrt = tf.sqrt(v_hat_t) var_update = tf.compat.v1.assign_sub( var, coefficients['lr'] * m_t / (v_hat_sqrt + coefficients['epsilon']), use_locking=self._use_locking) return tf.group(*[var_update, m_t, v_t, v_hat_t]) def get_config(self): config = super(Adam, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, }) return config class NonFusedAdam(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adam algorithm without fused kernels. Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments. According to the paper [Adam: A Method for Stochastic Optimization. Kingma et al., 2014](http://arxiv.org/abs/1412.6980), the method is "*computationally efficient, has little memory requirement, invariant to diagonal rescaling of gradients, and is well suited for problems that are large in terms of data/parameters*". For AMSGrad see [On The Convergence Of Adam And Beyond. Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ). **If amsgrad = False**: initialize $m_0$ as 1st moment vector initialize $v_0$ as 2nd moment vector The update rule for $\theta$ with gradient $g$ uses an optimization described at the end of section 2 of the paper: $$lr_t = \mathrm{learning\_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ $$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$ $$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ **If amsgrad = True**: initialize $m_0$ as 1st moment vector initialize $v_0$ as 2nd moment vector initialize $\hat{v}_0$ as 2nd moment vector The update rule for $\theta$ with gradient $g$ uses an optimization described at the end of section 2 of the paper: $$lr_t = \mathrm{learning\_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ $$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$ $$\hat{v}_t = \max(\hat{v}_{t-1}, v_t)$$ $$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ The default value of 1e-7 for epsilon might not be a good default in general. For example, when training an Inception network on ImageNet a current good choice is 1.0 or 0.1. Note that since Adam uses the formulation just before Section 2.1 of the Kingma and Ba paper rather than the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon hat" in the paper. The sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) does apply momentum to variable slices even if they were not used in the forward pass (meaning they have a gradient equal to zero). Momentum decay (beta1) is also applied to the entire momentum accumulator. This means that the sparse behavior is equivalent to the dense behavior (in contrast to some momentum implementations which ignore momentum unless a variable slice was actually used). Usage: >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> # The first step is `-learning_rate*sign(grad)` >>> var1.numpy() 9.9 """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=False, name='Adam', **kwargs): """Construct a new Adam optimizer. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use, The learning rate. Defaults to 0.001. beta_1: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use. The exponential decay rate for the 1st moment estimates. Defaults to 0.9. beta_2: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use, The exponential decay rate for the 2nd moment estimates. Defaults to 0.999. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to 1e-7. amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from the paper "On the Convergence of Adam and beyond". Defaults to `False`. name: Optional name for the operations created when applying gradients. Defaults to "Adam". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(NonFusedAdam, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() self.amsgrad = amsgrad def _create_slots(self, var_list): # Create slots for the first and second moments. # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: self.add_slot(var, 'm') for var in var_list: self.add_slot(var, 'v') if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def _prepare_local(self, var_device, var_dtype, apply_state): super(NonFusedAdam, self)._prepare_local(var_device, var_dtype, apply_state) local_step = tf.cast(self.iterations + 1, var_dtype) beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype)) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) lr = ( apply_state[(var_device, var_dtype)]['lr_t'] * (tf.sqrt(1 - beta_2_power) / (1 - beta_1_power))) apply_state[(var_device, var_dtype)].update( dict( lr=lr, epsilon=tf.convert_to_tensor( self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_1_power=beta_1_power, one_minus_beta_1_t=1 - beta_1_t, beta_2_t=beta_2_t, beta_2_power=beta_2_power, one_minus_beta_2_t=1 - beta_2_t)) def set_weights(self, weights): params = self.weights # If the weights are generated by Keras V1 optimizer, it includes vhats # even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2 # optimizer has 2x + 1 variables. Filter vhats out for compatibility. num_vars = int((len(params) - 1) / 2) if len(weights) == 3 * num_vars + 1: weights = weights[:len(params)] super(NonFusedAdam, self).set_weights(weights) @tf.function(jit_compile=True) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') alpha = ( coefficients['lr_t'] * tf.sqrt(1 - coefficients['beta_2_power']) / (1 - coefficients['beta_1_power'])) m.assign_add((grad - m) * (1 - coefficients['beta_1_t'])) v.assign_add((tf.square(grad) - v) * (1 - coefficients['beta_2_t'])) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat.assign(tf.maximum(vhat, v)) v = vhat var.assign_sub( (m * alpha) / (tf.sqrt(v) - coefficients['epsilon'])) @tf.function(jit_compile=True) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_scaled_g_values = grad * coefficients['one_minus_beta_1_t'] m.assign(m * coefficients['beta_1_t']) m.scatter_add(tf.IndexedSlices(m_scaled_g_values, indices)) # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) v = self.get_slot(var, 'v') v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t'] v.assign(v * coefficients['beta_2_t']) v.scatter_add(tf.IndexedSlices(v_scaled_g_values, indices)) if not self.amsgrad: var.assign_sub(coefficients['lr'] * m / (tf.sqrt(v) + coefficients['epsilon'])) else: v_hat = self.get_slot(var, 'vhat') v_hat.assign(tf.maximum(v_hat, v)) var.assign_sub(coefficients['lr'] * m / (tf.sqrt(v_hat) + coefficients['epsilon'])) def get_config(self): config = super(NonFusedAdam, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, }) return config
19,667
41.025641
80
py
keras
keras-master/keras/optimizer_v2/nadam_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Nadam.""" import tensorflow.compat.v2 as tf import numpy as np from keras.optimizer_v2 import nadam def get_beta_accumulators(opt, dtype): local_step = tf.cast(opt.iterations + 1, dtype) beta_1_t = tf.cast(opt._get_hyper("beta_1"), dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_t = tf.cast(opt._get_hyper("beta_2"), dtype) beta_2_power = tf.pow(beta_2_t, local_step) return (beta_1_power, beta_2_power) def update_m_cache(m_cache, t, beta1=0.9): mu_t = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 1))) m_cache_t = m_cache * mu_t return m_cache_t def nadam_update_numpy(param, g_t, t, m, v, m_cache, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): mu_t = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 1))) mu_t_1 = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 2))) m_cache_t_1 = m_cache * mu_t_1 g_prime_t = g_t / (1 - m_cache) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t m_prime_t = m_t / (1 - m_cache_t_1) v_prime_t = v_t / (1 - beta2**(t + 1)) m_bar_t = (1 - mu_t) * g_prime_t + mu_t_1 * m_prime_t param_t = param - alpha * m_bar_t / (np.sqrt(v_prime_t) + epsilon) return param_t, m_t, v_t class NadamOptimizerTest(tf.test.TestCase): def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. sparse_epsilon = 1e-7 for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1, mcache = 0.0, 0.0, 0.0, 0.0, 1.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np[grads0_np_indices]), tf.constant(grads0_np_indices), tf.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np[grads1_np_indices]), tf.constant(grads1_np_indices), tf.constant([3])) opt = nadam.Nadam(epsilon=sparse_epsilon) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], var0) self.assertAllClose([3.0, 3.0, 4.0], var1) beta1_power, beta2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Nadam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power) self.assertAllCloseAccordingToType(0.999**(t + 1), beta2_power) update.run() mcache = update_m_cache(mcache, t) var0_np, m0, v0 = nadam_update_numpy( var0_np, grads0_np, t, m0, v0, mcache, epsilon=sparse_epsilon) var1_np, m1, v1 = nadam_update_numpy( var1_np, grads1_np, t, m1, v1, mcache, epsilon=sparse_epsilon) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0) self.assertAllCloseAccordingToType(var1_np, var1) def testBasic(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1, mcache = 0.0, 0.0, 0.0, 0.0, 1.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) opt = nadam.Nadam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0) self.assertAllClose([3.0, 4.0], var1) # Run 3 steps of Nadam for t in range(3): update.run() mcache = update_m_cache(mcache, t) var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0, mcache) var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1, mcache) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0) self.assertAllCloseAccordingToType(var1_np, var1) def testConstructNAdamWithLR(self): opt = nadam.Nadam(lr=1.0) opt_2 = nadam.Nadam(learning_rate=0.1, lr=1.0) opt_3 = nadam.Nadam(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) def testConstructNAdamWithScheduleDecay(self): opt = nadam.Nadam(schedule_decay=0.2) self.assertIsInstance(opt.decay, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.decay), (0.2)) if __name__ == "__main__": tf.test.main()
6,787
38.236994
80
py
keras
keras-master/keras/optimizer_v2/ftrl_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for Ftrl operations.""" import tensorflow.compat.v2 as tf import numpy as np from keras.optimizer_v2 import ftrl class FtrlOptimizerTest(tf.test.TestCase): def doTestFtrlwithoutRegularization(self, use_resource=False): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.float32]: with tf.Graph().as_default(), self.cached_session(): if use_resource: var0 = tf.Variable([0.0, 0.0], dtype=dtype) var1 = tf.Variable([0.0, 0.0], dtype=dtype) else: var0 = tf.Variable([0.0, 0.0], dtype=dtype) var1 = tf.Variable([0.0, 0.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) self.assertAllClose([0.0, 0.0], v1_val) # Run 3 steps FTRL for _ in range(3): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.60260963, -4.29698515]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.28432083, -0.56694895]), v1_val) def testFtrlWithoutRegularization(self): self.doTestFtrlwithoutRegularization(use_resource=False) def testResourceFtrlWithoutRegularization(self): self.doTestFtrlwithoutRegularization(use_resource=True) def testFtrlwithoutRegularization2(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([4.0, 3.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 3 steps FTRL for _ in range(3): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.55607247, -3.98729396]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.28232238, -0.56096673]), v1_val) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32, tf.float64]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd sgd_op.run() # Validate updated params self.assertAllCloseAccordingToType([[0, 1]], self.evaluate(var0), atol=0.01) def testFtrlWithL1(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([4.0, 3.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-7.66718769, -10.91273689]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.93460727, -1.86147261]), v1_val) def testFtrlWithBeta(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([4.0, 3.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl(3.0, initial_accumulator_value=0.1, beta=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-6.096838, -9.162214]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.717741, -1.425132]), v1_val) def testFtrlWithL2_Beta(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([4.0, 3.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.1, beta=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.735487, -4.704625]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.294335, -0.586556]), v1_val) def testFtrlWithL1_L2(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([4.0, 3.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-0.24059935, -0.46829352]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.02406147, -0.04830509]), v1_val) def testFtrlWithL1_L2_L2Shrinkage(self): """Test the new FTRL op with support for l2 shrinkage. The addition of this parameter which places a constant pressure on weights towards the origin causes the gradient descent trajectory to differ. The weights will tend to have smaller magnitudes with this parameter set. """ # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([4.0, 3.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-0.22578995, -0.44345796]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.14378493, -0.13229476]), v1_val) def testFtrlWithL1_L2_L2ShrinkageSparse(self): """Tests the new FTRL op with support for l2 shrinkage on sparse grads.""" # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): var0 = tf.Variable([[1.0], [2.0]], dtype=dtype) var1 = tf.Variable([[4.0], [3.0]], dtype=dtype) grads0 = tf.IndexedSlices( tf.constant([0.1], shape=[1, 1], dtype=dtype), tf.constant([0]), tf.constant([2, 1])) grads1 = tf.IndexedSlices( tf.constant([0.02], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val) self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val) self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val) def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self): """Verifies that l2 shrinkage in FTRL does not change lr schedule.""" # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session() as sess: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([1.0, 2.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.1, 0.2], dtype=dtype) opt0 = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) opt1 = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([1.0, 2.0], v1_val) # Run 10 steps FTRL for _ in range(10): update0.run() update1.run() v0_val, v1_val = self.evaluate([var0, var1]) # var0 is experiencing L2 shrinkage so it should be smaller than var1 # in magnitude. self.assertTrue((v0_val**2 < v1_val**2).all()) accum0 = sess.run(opt0.get_slot(var0, "accumulator")) accum1 = sess.run(opt1.get_slot(var1, "accumulator")) # L2 shrinkage should not change how we update grad accumulator. self.assertAllCloseAccordingToType(accum0, accum1) def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False): if is_sparse: var0 = tf.Variable([[0.0], [0.0]], dtype=dtype) var1 = tf.Variable([[0.0], [0.0]], dtype=dtype) grads0 = tf.IndexedSlices( tf.constant([0.1], shape=[1, 1], dtype=dtype), tf.constant([0]), tf.constant([2, 1])) grads1 = tf.IndexedSlices( tf.constant([0.02], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) else: var0 = tf.Variable([0.0, 0.0], dtype=dtype) var1 = tf.Variable([0.0, 0.0], dtype=dtype) grads0 = tf.constant([0.1, 0.2], dtype=dtype) grads1 = tf.constant([0.01, 0.02], dtype=dtype) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) if is_sparse: self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val) self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val) else: self.assertAllCloseAccordingToType([0.0, 0.0], v0_val) self.assertAllCloseAccordingToType([0.0, 0.0], v1_val) # Run Ftrl for a few steps for _ in range(steps): update.run() v0_val, v1_val = self.evaluate([var0, var1]) return v0_val, v1_val # When variables are initialized with Zero, FTRL-Proximal has two properties: # 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical # with GradientDescent. # 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical # with Adagrad. # So, basing on these two properties, we test if our implementation of # FTRL-Proximal performs same updates as Adagrad or GradientDescent. def testEquivAdagradwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Adagrad learning rate learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype) with tf.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( tf.compat.v1.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) def testEquivSparseAdagradwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Adagrad learning rate learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype, is_sparse=True) with tf.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( tf.compat.v1.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype, is_sparse=True) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) def testEquivSparseGradientDescentwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Fixed learning rate learning_rate_power=-0.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype, is_sparse=True) with tf.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( tf.compat.v1.train.GradientDescentOptimizer(3.0), dtype, is_sparse=True) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) def testEquivGradientDescentwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [tf.half, tf.float32]: with tf.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Fixed learning rate learning_rate_power=-0.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype) with tf.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( tf.compat.v1.train.GradientDescentOptimizer(3.0), dtype) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) if __name__ == "__main__": tf.test.main()
19,775
39.775258
114
py
keras
keras-master/keras/optimizer_v2/rmsprop.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RMSprop optimizer implementation.""" import tensorflow.compat.v2 as tf # pylint: disable=g-classes-have-attributes import numpy as np from keras import backend_config from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export("keras.optimizers.RMSprop") class RMSprop(optimizer_v2.OptimizerV2): r"""Optimizer that implements the RMSprop algorithm. The gist of RMSprop is to: - Maintain a moving (discounted) average of the square of gradients - Divide the gradient by the root of this average This implementation of RMSprop uses plain momentum, not Nesterov momentum. The centered version additionally maintains a moving average of the gradients, and uses that average to estimate the variance. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to 0.001. rho: Discounting factor for the history/coming gradient. Defaults to 0.9. momentum: A scalar or a scalar `Tensor`. Defaults to 0.0. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to 1e-7. centered: Boolean. If `True`, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to `True` may help with training, but is slightly more expensive in terms of computation and memory. Defaults to `False`. name: Optional name prefix for the operations created when applying gradients. Defaults to `"RMSprop"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips gradients by value. Note that in the dense implementation of this algorithm, variables and their corresponding accumulators (momentum, gradient moving average, square gradient moving average) will be updated even if the gradient is zero (i.e. accumulators will decay, momentum will be applied). The sparse implementation (used when the gradient is an `IndexedSlices` object, typically because of `tf.gather` or an embedding lookup in the forward pass) will not update variable slices or their accumulators unless those slices were used in the forward pass (nor is there an "eventual" correction to account for these omitted updates). This leads to more efficient updates for large embedding lookup tables (where most of the slices are not accessed in a particular graph execution), but differs from the published algorithm. Usage: >>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> var1.numpy() 9.683772 Reference: - [Hinton, 2012]( http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-7, centered=False, name="RMSprop", **kwargs): """Construct a new RMSprop optimizer. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to 0.001. rho: Discounting factor for the history/coming gradient. Defaults to 0.9. momentum: A scalar or a scalar `Tensor`. Defaults to 0.0. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to 1e-7. centered: Boolean. If `True`, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to `True` may help with training, but is slightly more expensive in terms of computation and memory. Defaults to `False`. name: Optional name prefix for the operations created when applying gradients. Defaults to "RMSprop". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. @compatibility(eager) When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and `epsilon` can each be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility """ super(RMSprop, self).__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) self._set_hyper("decay", self._initial_decay) self._set_hyper("rho", rho) self._momentum = False if isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0: self._momentum = True if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1): raise ValueError(f"`momentum` must be between [0, 1]. Received: " f"momentum={momentum} (of type {type(momentum)}).") self._set_hyper("momentum", momentum) self.epsilon = epsilon or backend_config.epsilon() self.centered = centered def _create_slots(self, var_list): for var in var_list: self.add_slot(var, "rms") if self._momentum: for var in var_list: self.add_slot(var, "momentum") if self.centered: for var in var_list: self.add_slot(var, "mg") def _prepare_local(self, var_device, var_dtype, apply_state): super(RMSprop, self)._prepare_local(var_device, var_dtype, apply_state) rho = tf.identity(self._get_hyper("rho", var_dtype)) apply_state[(var_device, var_dtype)].update( dict( neg_lr_t=-apply_state[(var_device, var_dtype)]["lr_t"], epsilon=tf.convert_to_tensor( self.epsilon, var_dtype), rho=rho, momentum=tf.identity(self._get_hyper("momentum", var_dtype)), one_minus_rho=1. - rho)) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) rms = self.get_slot(var, "rms") if self._momentum: mom = self.get_slot(var, "momentum") if self.centered: mg = self.get_slot(var, "mg") return tf.raw_ops.ResourceApplyCenteredRMSProp( var=var.handle, mg=mg.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, use_locking=self._use_locking) else: return tf.raw_ops.ResourceApplyRMSProp( var=var.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, use_locking=self._use_locking) else: rms_t = (coefficients["rho"] * rms + coefficients["one_minus_rho"] * tf.square(grad)) rms_t = tf.compat.v1.assign(rms, rms_t, use_locking=self._use_locking) denom_t = rms_t if self.centered: mg = self.get_slot(var, "mg") mg_t = coefficients["rho"] * mg + coefficients["one_minus_rho"] * grad mg_t = tf.compat.v1.assign(mg, mg_t, use_locking=self._use_locking) denom_t = rms_t - tf.square(mg_t) var_t = var - coefficients["lr_t"] * grad / ( tf.sqrt(denom_t) + coefficients["epsilon"]) return tf.compat.v1.assign(var, var_t, use_locking=self._use_locking).op def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) rms = self.get_slot(var, "rms") if self._momentum: mom = self.get_slot(var, "momentum") if self.centered: mg = self.get_slot(var, "mg") return tf.raw_ops.ResourceSparseApplyCenteredRMSProp( var=var.handle, mg=mg.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, indices=indices, use_locking=self._use_locking) else: return tf.raw_ops.ResourceSparseApplyRMSProp( var=var.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, indices=indices, use_locking=self._use_locking) else: rms_scaled_g_values = (grad * grad) * coefficients["one_minus_rho"] rms_t = tf.compat.v1.assign(rms, rms * coefficients["rho"], use_locking=self._use_locking) with tf.control_dependencies([rms_t]): rms_t = self._resource_scatter_add(rms, indices, rms_scaled_g_values) rms_slice = tf.gather(rms_t, indices) denom_slice = rms_slice if self.centered: mg = self.get_slot(var, "mg") mg_scaled_g_values = grad * coefficients["one_minus_rho"] mg_t = tf.compat.v1.assign(mg, mg * coefficients["rho"], use_locking=self._use_locking) with tf.control_dependencies([mg_t]): mg_t = self._resource_scatter_add(mg, indices, mg_scaled_g_values) mg_slice = tf.gather(mg_t, indices) denom_slice = rms_slice - tf.square(mg_slice) var_update = self._resource_scatter_add( var, indices, coefficients["neg_lr_t"] * grad / ( tf.sqrt(denom_slice) + coefficients["epsilon"])) if self.centered: return tf.group(*[var_update, rms_t, mg_t]) return tf.group(*[var_update, rms_t]) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of Keras V1 optimizer # since it does not include iteration at head of the weight list. Set # iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super(RMSprop, self).set_weights(weights) def get_config(self): config = super(RMSprop, self).get_config() config.update({ "learning_rate": self._serialize_hyperparameter("learning_rate"), "decay": self._initial_decay, "rho": self._serialize_hyperparameter("rho"), "momentum": self._serialize_hyperparameter("momentum"), "epsilon": self.epsilon, "centered": self.centered, }) return config RMSProp = RMSprop
12,665
41.790541
80
py
keras
keras-master/keras/optimizer_v2/adagrad.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adagrad optimizer implementation.""" import tensorflow.compat.v2 as tf # pylint: disable=g-classes-have-attributes import numpy as np from keras import backend_config from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adagrad') class Adagrad(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adagrad algorithm. Adagrad is an optimizer with parameter-specific learning rates, which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives, the smaller the updates. Args: learning_rate: Initial value for the learning rate: either a floating point value, or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance. Defaults to 0.001. Note that `Adagrad` tends to benefit from higher initial learning rate values compared to other optimizers. To match the exact form in the original paper, use 1.0. initial_accumulator_value: Floating point value. Starting value for the accumulators (per-parameter momentum values). Must be non-negative. epsilon: Small floating point value used to maintain numerical stability. name: Optional name prefix for the operations created when applying gradients. Defaults to `"Adagrad"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm and represents the maximum L2 norm of each weight variable; `"clipvalue"` (float) clips gradient by value and represents the maximum absolute value of each weight variable. Reference: - [Duchi et al., 2011]( http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf). """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-7, name='Adagrad', **kwargs): if initial_accumulator_value < 0.0: raise ValueError('initial_accumulator_value must be non-negative: %s' % initial_accumulator_value) if epsilon is None: epsilon = backend_config.epsilon() super(Adagrad, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._initial_accumulator_value = initial_accumulator_value self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): for var in var_list: dtype = var.dtype.base_dtype init = tf.compat.v1.constant_initializer( self._initial_accumulator_value, dtype=dtype) self.add_slot(var, 'accumulator', init) def _prepare_local(self, var_device, var_dtype, apply_state): super(Adagrad, self)._prepare_local(var_device, var_dtype, apply_state) apply_state[(var_device, var_dtype)].update( dict( epsilon=tf.convert_to_tensor( self.epsilon, var_dtype), neg_lr_t=-apply_state[(var_device, var_dtype)]['lr_t'], zero=tf.zeros((), dtype=tf.int64))) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of Keras V1 optimizer # since it does not include iteration at head of the weight list. Set # iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super(Adagrad, self).set_weights(weights) @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if 'initial_accumulator_value' not in config: config['initial_accumulator_value'] = 0.1 if 'lr' in config: config['learning_rate'] = config.pop('lr') return cls(**config) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) acc = self.get_slot(var, 'accumulator') return tf.raw_ops.ResourceApplyAdagradV2( var=var.handle, accum=acc.handle, lr=coefficients['lr_t'], epsilon=coefficients['epsilon'], grad=grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) acc = self.get_slot(var, 'accumulator') return tf.raw_ops.ResourceSparseApplyAdagradV2( var=var.handle, accum=acc.handle, lr=coefficients['lr_t'], epsilon=coefficients['epsilon'], grad=grad, indices=indices, use_locking=self._use_locking) def get_config(self): config = super(Adagrad, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'initial_accumulator_value': self._initial_accumulator_value, 'epsilon': self.epsilon, }) return config
6,475
38.012048
80
py
keras
keras-master/keras/optimizer_v2/adadelta.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adadelta optimizer implementation.""" import tensorflow.compat.v2 as tf # pylint: disable=g-classes-have-attributes import numpy as np from keras import backend_config from keras.optimizer_v2 import optimizer_v2 from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adadelta') class Adadelta(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adadelta algorithm. Adadelta optimization is a stochastic gradient descent method that is based on adaptive learning rate per dimension to address two drawbacks: - The continual decay of learning rates throughout training. - The need for a manually selected global learning rate. Adadelta is a more robust extension of Adagrad that adapts learning rates based on a moving window of gradient updates, instead of accumulating all past gradients. This way, Adadelta continues learning even when many updates have been done. Compared to Adagrad, in the original version of Adadelta you don't have to set an initial learning rate. In this version, the initial learning rate can be set, as in most other Keras optimizers. Args: learning_rate: Initial value for the learning rate: either a floating point value, or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance. Defaults to 0.001. Note that `Adadelta` tends to benefit from higher initial learning rate values compared to other optimizers. To match the exact form in the original paper, use 1.0. rho: A `Tensor` or a floating point value. The decay rate. epsilon: Small floating point value used to maintain numerical stability. name: Optional name prefix for the operations created when applying gradients. Defaults to `"Adadelta"`. **kwargs: Keyword arguments. Allowed to be one of `"clipnorm"` or `"clipvalue"`. `"clipnorm"` (float) clips gradients by norm and represents the maximum norm of each parameter; `"clipvalue"` (float) clips gradient by value and represents the maximum absolute value of each parameter. Reference: - [Zeiler, 2012](http://arxiv.org/abs/1212.5701) """ _HAS_AGGREGATE_GRAD = True def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-7, name='Adadelta', **kwargs): super(Adadelta, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('rho', rho) self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): # Separate for-loops to respect the ordering of slot variables from v1. for v in var_list: self.add_slot(v, 'accum_grad') for v in var_list: self.add_slot(v, 'accum_var') def _prepare_local(self, var_device, var_dtype, apply_state): super(Adadelta, self)._prepare_local(var_device, var_dtype, apply_state) apply_state[(var_device, var_dtype)].update( dict( epsilon=tf.convert_to_tensor( self.epsilon, var_dtype), rho=tf.identity(self._get_hyper('rho', var_dtype)))) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of Keras V1 optimizer # since it does not include iteration at head of the weight list. Set # iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super(Adadelta, self).set_weights(weights) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return tf.raw_ops.ResourceApplyAdadelta( var=var.handle, accum=accum_grad.handle, accum_update=accum_var.handle, lr=coefficients['lr_t'], rho=coefficients['rho'], epsilon=coefficients['epsilon'], grad=grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return tf.raw_ops.ResourceSparseApplyAdadelta( var=var.handle, accum=accum_grad.handle, accum_update=accum_var.handle, lr=coefficients['lr_t'], rho=coefficients['rho'], epsilon=coefficients['epsilon'], grad=grad, indices=indices, use_locking=self._use_locking) def get_config(self): config = super(Adadelta, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._initial_decay, 'rho': self._serialize_hyperparameter('rho'), 'epsilon': self.epsilon, }) return config
5,969
39.337838
80
py
keras
keras-master/keras/benchmarks/keras_cpu_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark tests for CPU performance of Keras models.""" import tensorflow as tf import numpy as np from keras.benchmarks import benchmark_util # Loss function and optimizer. _LOSS = 'binary_crossentropy' _OPTIMIZER = 'rmsprop' class KerasModelCPUBenchmark( # pylint: disable=undefined-variable tf.test.Benchmark, metaclass=tf.__internal__.test.ParameterizedBenchmark): """Required Arguments for measure_performance. x: Input data, it could be Numpy or load from tfds. y: Target data. If `x` is a dataset, generator instance, `y` should not be specified. loss: Loss function for model. optimizer: Optimizer for model. Other details can see in `measure_performance()` method of benchmark_util. """ # The parameters of each benchmark is a tuple: # (benchmark_name_suffix, batch_size, run_iters). # benchmark_name_suffix: The suffix of the benchmark test name with # convention `{bs}_{batch_size}`. # batch_size: Integer. Number of samples per gradient update. # run_iters: Integer. Number of iterations to run the # performance measurement. _benchmark_parameters = [ ('bs_32', 32, 3), ('bs_64', 64, 2), ('bs_128', 128, 2), ('bs_256', 256, 1), ('bs_512', 512, 1)] def _mnist_mlp(self): """Simple MLP model.""" model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(784,))) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(512, activation='relu')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(10, activation='softmax')) return model def _mnist_convnet(self): """Simple Convnet model.""" model = tf.keras.Sequential() model.add( tf.keras.layers.Conv2D( 32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10, activation='softmax')) return model def _imdb_lstm(self): """Simple LSTM model.""" model = tf.keras.Sequential() model.add(tf.keras.layers.Embedding(20000, 128)) model.add(tf.keras.layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) return model def benchmark_mnist_mlp(self, batch_size, run_iters): """Benchmark for MLP model on synthetic mnist data.""" mlp_x = np.random.random((5000, 784)) mlp_y = np.random.random((5000, 10)) metrics, wall_time, extras = benchmark_util.measure_performance( self._mnist_mlp, x=mlp_x, y=mlp_y, batch_size=batch_size, run_iters=run_iters, optimizer=_OPTIMIZER, loss=_LOSS) self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_mnist_convnet(self, batch_size, run_iters): """Benchmark for Convnet model on synthetic mnist data.""" convnet_x = np.random.random((5000, 28, 28, 1)) convnet_y = np.random.random((5000, 10)) metrics, wall_time, extras = benchmark_util.measure_performance( self._mnist_convnet, x=convnet_x, y=convnet_y, batch_size=batch_size, run_iters=run_iters, optimizer=_OPTIMIZER, loss=_LOSS) self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_imdb_lstm(self, batch_size, run_iters): """Benchmark for LSTM model on synthetic imdb review dataset.""" lstm_x = np.random.randint(0, 1999, size=(2500, 100)) lstm_y = np.random.random((2500, 1)) metrics, wall_time, extras = benchmark_util.measure_performance( self._imdb_lstm, x=lstm_x, y=lstm_y, batch_size=batch_size, run_iters=run_iters, optimizer=_OPTIMIZER, loss=_LOSS) self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
4,999
35.49635
80
py
keras
keras-master/keras/benchmarks/model_memory_profile.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Memory profile on Keras model. To add a new model for memory profile: 1. Create the model. 2. Decorate it with `@memory_profiler.profile`. 3. Add the model function to the dict `models`. """ import tensorflow as tf from absl import app from absl import flags from absl import logging import numpy as np try: import memory_profiler # pylint:disable=g-import-not-at-top except ImportError: memory_profiler = None FLAGS = flags.FLAGS flags.DEFINE_string('model', None, 'The model to run memory profiler.') @memory_profiler.profile def _imdb_lstm_model(): """LSTM model.""" x_train = np.random.randint(0, 1999, size=(2500, 100)) y_train = np.random.random((2500, 1)) # IMDB LSTM model. model = tf.keras.Sequential() model.add(tf.keras.layers.Embedding(20000, 128)) model.add(tf.keras.layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) model.compile('sgd', 'mse') # Warm up the model with one epoch. model.fit(x_train, y_train, batch_size=512, epochs=3) def main(_): # Add the model for memory profile. models = { 'lstm': _imdb_lstm_model, } if FLAGS.model in models: logging.info('Run memory profile on %s.', FLAGS.model) run_model = models[FLAGS.model] run_model() else: logging.info('The model does not exist. Please verify the model name.') if __name__ == '__main__': flags.mark_flags_as_required(['model']) if memory_profiler: app.run(main)
2,198
27.192308
80
py
keras
keras-master/keras/benchmarks/metrics_memory_benchmark_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark tests for Keras metrics memory consumption.""" import tensorflow as tf import numpy as np try: import memory_profiler # pylint:disable=g-import-not-at-top except ImportError: memory_profiler = None class KerasMetricMemoryBenchmark(tf.test.Benchmark): # This test is added to measure the memory footprint for # metrics_utils._update_confusion_matrix_variables_optimized(). def benchmark_auc_memory_usage(self): if memory_profiler is None: self.skipTest('Skip test since memory_profiler is not available.') with tf.compat.forward_compatibility_horizon(2021, 6, 9): self.y_true = np.random.randint(2, size=(1024, 1024)) self.y_pred = np.random.rand(1024, 1024) memory_usage_1 = memory_profiler.memory_usage((self.even_thresholds_auc)) memory_usage_2 = memory_profiler.memory_usage( (self.uneven_thresholds_auc)) # memory usage is a list of number which sampled when running the function # The pure memory consumption is approximately max(usage) - min(usage) memory_usage_1 = max(memory_usage_1) - min(memory_usage_1) memory_usage_2 = max(memory_usage_2) - min(memory_usage_2) metrics = {'even_threshold_memory_usage': memory_usage_1, 'uneven_threshold_memory_usage': memory_usage_2} self.report_benchmark(iters=1, metrics=metrics) def even_thresholds_auc(self): auc = tf.keras.metrics.AUC(num_thresholds=200) self.assertTrue(auc._thresholds_distributed_evenly) auc(self.y_true, self.y_pred) def uneven_thresholds_auc(self): num_thresholds = 200 thresholds = [x / (num_thresholds - 1) for x in range(num_thresholds)] thresholds[100] += 1 / 200 thresholds = thresholds[1:-1] auc = tf.keras.metrics.AUC(thresholds=thresholds) self.assertFalse(auc._thresholds_distributed_evenly) self.assertEqual(auc.num_thresholds, num_thresholds) auc(self.y_true, self.y_pred) if __name__ == '__main__': tf.test.main()
2,679
35.712329
80
py
keras
keras-master/keras/benchmarks/benchmark_util.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utils for benchmarks.""" import tensorflow as tf import timeit import numpy as np from keras.benchmarks import distribution_util def get_benchmark_name(name): """Split the suffix of the benchmark name. For example, for the name = 'benchmark_layer_call__Conv2D_small_shape', the return value is ['Conv2D', 'small', 'shape']. This is to generate the metadata of the benchmark test. Args: name: A string, the benchmark name. Returns: A list of strings of the suffix in the benchmark name. """ if '__' not in name or '_' not in name: raise ValueError('The format of the benchmark name is wrong.') return name.split('__')[-1].split('_') def generate_benchmark_params_cpu_gpu(*params_list): """Extend the benchmark names with CPU and GPU suffix. Args: *params_list: A list of tuples represents the benchmark parameters. Returns: A list of strings with the benchmark name extended with CPU and GPU suffix. """ benchmark_params = [] for params in params_list: benchmark_params.extend([ ((param[0] + '_CPU',) + param[1:]) for param in params ]) benchmark_params.extend([ ((param[0] + '_GPU',) + param[1:]) for param in params ]) return benchmark_params def get_keras_examples_metadata(keras_model, batch_size, impl='.keras.cfit_graph'): return { 'model_name': 'keras_examples', 'implementation': keras_model + impl, 'parameters': 'bs_' + str(batch_size), } class TimerCallBack(tf.keras.callbacks.Callback): """Callback for logging time in each epoch or batch.""" def __init__(self): self.times = [] self.timer = timeit.default_timer self.startup_time = timeit.default_timer() self.recorded_startup = False def on_epoch_begin(self, e, logs): self.epoch_start_time = self.timer() def on_epoch_end(self, e, logs): self.times.append(self.timer() - self.epoch_start_time) def on_batch_end(self, e, logs): if not self.recorded_startup: self.startup_time = self.timer() - self.startup_time self.recorded_startup = True def measure_performance(model_fn, x=None, y=None, epochs=2, batch_size=32, run_iters=4, optimizer=None, loss=None, metrics=None, verbose=0, num_gpus=0, distribution_strategy='off'): """Run models and measure the performance. Args: model_fn: Model function to be benchmarked. x: Input data. See `x` in the `fit()` method of `keras.Model`. y: Target data. See `y` in the `fit()` method of `keras.Model`. epochs: Integer. Number of epochs to train the model. If unspecified, `epochs` will default to 2. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. run_iters: Integer. Number of iterations to run the performance measurement. If unspecified, `run_iters` will default to 4. optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or `tf.keras.losses.Loss` instance. See `tf.keras.losses`. metrics: Lists of metrics to be evaluated by the model during training. See `metrics` in the `compile()` method of `keras.Model`. verbose: 0, 1, 2. Verbosity mode. See `verbose` in the `fit()` method of `keras.Model`. If unspecified, `verbose` will default to 0. num_gpus: Number of GPUs to run the model. distribution_strategy: Distribution strategies. It could be `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified, `distribution_strategy` will default to 'off'. Note that, `TPU` and `parameter_server` are not supported yet. Returns: Performance summary, which contains build_time, compile_time, startup_time, avg_epoch_time, wall_time, exp_per_sec, epochs, distribution_strategy. Raise: ValueError: If `x` is none or if `optimizer` is not provided or if `loss` is not provided or if `num_gpus` is negative. """ if 'x' is None: raise ValueError('Input data is required.') if 'optimizer' is None: raise ValueError('Optimizer is required.') if 'loss' is None: raise ValueError('Loss function is required.') if num_gpus < 0: raise ValueError('`num_gpus` cannot be negative') # TODO(xingyulong): we will add tfds support later and # get the `num_examples` from info. num_examples = x.shape[0] build_time_list, compile_time_list, startup_time_list = [], [], [] avg_epoch_time_list, wall_time_list, exp_per_sec_list = [], [], [] total_num_examples = epochs * num_examples strategy = distribution_util.get_distribution_strategy( distribution_strategy=distribution_strategy, num_gpus=num_gpus) for _ in range(run_iters): timer = timeit.default_timer start_time = timer() # Init the distribution strategy scope for each iteration. strategy_scope = distribution_util.get_strategy_scope(strategy) with strategy_scope: t0 = timer() model = model_fn() build_time = timer() - t0 t1 = timer() model.compile( optimizer=optimizer, loss=loss, metrics=metrics, ) compile_time = timer() - t1 # Run one warm up epoch. model.fit(x=x, y=y, batch_size=batch_size, epochs=1) cbk = TimerCallBack() t2 = timer() model.fit( x=x, y=y, batch_size=batch_size, epochs=epochs, callbacks=[cbk], verbose=verbose) end_time = timer() build_time_list.append(build_time) compile_time_list.append(compile_time) startup_time_list.append(cbk.startup_time) avg_epoch_time_list.append(np.mean(cbk.times)) wall_time_list.append(end_time - start_time) exp_per_sec_list.append(total_num_examples / (end_time - t2)) metrics = [] metrics.append({'name': 'build_time', 'value': np.mean(build_time_list)}) metrics.append({'name': 'compile_time', 'value': np.mean(compile_time_list)}) metrics.append({'name': 'startup_time', 'value': np.mean(startup_time_list)}) metrics.append({ 'name': 'avg_epoch_time', 'value': np.mean(avg_epoch_time_list) }) metrics.append({'name': 'exp_per_sec', 'value': np.mean(exp_per_sec_list)}) metrics.append({'name': 'epochs', 'value': epochs}) wall_time = np.mean(wall_time_list) extras = { 'distribution_strategy': distribution_strategy, 'num_gpus': num_gpus } return metrics, wall_time, extras
7,478
33.465438
80
py
keras
keras-master/keras/benchmarks/benchmark_util_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for benchmark utitilies.""" import tensorflow as tf from keras.benchmarks import benchmark_util class BenchmarkUtilTest(tf.test.TestCase): def test_get_benchmark_name(self): name = "benchmark_layer_call__Conv2D_small_shape" expected = ["Conv2D", "small", "shape"] out = benchmark_util.get_benchmark_name(name) self.assertAllEqual(out, expected) def test_generate_benchmark_params_cpu_gpu(self): adam_opt = tf.keras.optimizers.Adam() sgd_opt = tf.keras.optimizers.SGD() params = [ ("Adam", adam_opt, 10), ("SGD", sgd_opt, 10), ] expected = [ ("Adam_CPU", adam_opt, 10), ("SGD_CPU", sgd_opt, 10), ("Adam_GPU", adam_opt, 10), ("SGD_GPU", sgd_opt, 10), ] out = benchmark_util.generate_benchmark_params_cpu_gpu(params) self.assertAllEqual(out, expected) if __name__ == "__main__": tf.test.main()
1,600
31.02
80
py
keras
keras-master/keras/benchmarks/optimizer_benchmarks_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark tests for Keras optimizers.""" import tensorflow as tf from keras.benchmarks import benchmark_util from keras.optimizer_v2 import adam from tensorflow.python.platform.benchmark import ParameterizedBenchmark def bidirect_imdb_lstm_config(): """Bidirectional LSTM model and IMDB data.""" def model_fn(): inputs = tf.keras.Input(shape=(None,), dtype="int32") x = tf.keras.layers.Embedding(20000, 128)(inputs) x = tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(64, return_sequences=True))( x) x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64))(x) outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x) model = tf.keras.Model(inputs, outputs) return model (x_train, y_train), _ = tf.keras.datasets.imdb.load_data(num_words=20000) x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=200) return model_fn, x_train, y_train class KerasOptimizerBenchmark( tf.test.Benchmark, metaclass=ParameterizedBenchmark): """Keras optimizer benchmarks.""" # The parameter of each benchmark test is a tuple, and the first one is # the optimizer name. _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu([ ("Adam", tf.keras.optimizers.Adam(), 10), ("NonFusedAdam", adam.NonFusedAdam(), 10), ]) def benchmark_optimizer(self, optimizer, num_iters): """Optimizer benchmark with Bidirectional LSTM model on IMDB data. Args: optimizer: The optimizer instance to be benchmarked. num_iters: The number of iterations to run for performance measurement. """ model, train_x, train_y = bidirect_imdb_lstm_config() metrics, wall_time, extras = benchmark_util.measure_performance( model, x=train_x, y=train_y, batch_size=512, optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]) name = benchmark_util.get_benchmark_name(self._get_name()) metadata = { "implementation": name[0], "model_name": "optimizers", "parameters": "lstm.512", } extras.update(metadata) self.report_benchmark( iters=num_iters, wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == "__main__": tf.test.main()
2,979
34.47619
80
py
keras
keras-master/keras/benchmarks/__init__.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Benchmarks."""
713
43.625
80
py
keras
keras-master/keras/benchmarks/distribution_util.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for running models in a distribution setting. Mostly from https://github.com/tensorflow/models/blob/master/official/utils/misc/distribution_utils.py. """ import tensorflow as tf import json import os def _collective_communication(all_reduce_alg): """Return a CollectiveCommunication based on all_reduce_alg. Args: all_reduce_alg: a string specifying which collective communication to pick, or None. Returns: tf.distribute.experimental.CollectiveCommunication object Raises: ValueError: if `all_reduce_alg` not in [None, "ring", "nccl"] """ collective_communication_options = { None: tf.distribute.experimental.CollectiveCommunication.AUTO, "ring": tf.distribute.experimental.CollectiveCommunication.RING, "nccl": tf.distribute.experimental.CollectiveCommunication.NCCL } if all_reduce_alg not in collective_communication_options: raise ValueError( "When used with `multi_worker_mirrored`, valid values for " "all_reduce_alg are [`ring`, `nccl`]. Supplied value: {}".format( all_reduce_alg)) return collective_communication_options[all_reduce_alg] def _mirrored_cross_device_ops(all_reduce_alg, num_packs): """Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, "nccl", "hierarchical_copy"]. """ if all_reduce_alg is None: return None mirrored_all_reduce_options = { "nccl": tf.distribute.NcclAllReduce, "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( "When used with `mirrored`, valid values for all_reduce_alg are " "[`nccl`, `hierarchical_copy`]. Supplied value: {}".format( all_reduce_alg)) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs) def get_distribution_strategy(distribution_strategy="mirrored", num_gpus=0, all_reduce_alg=None, num_packs=1): """Return a DistributionStrategy for running the model. Args: distribution_strategy: a string specifying which distribution strategy to use. Accepted values are "off", "one_device", "mirrored", and "multi_worker_mirrored" -- case insensitive. "off" means not to use Distribution Strategy. num_gpus: Number of GPUs to run this model. Returns: tf.distribute.DistibutionStrategy object. Raises: ValueError: if `distribution_strategy` is "off" or "one_device" and `num_gpus` is larger than 1; or `num_gpus` is negative. """ if num_gpus < 0: raise ValueError("`num_gpus` can not be negative.") distribution_strategy = distribution_strategy.lower() if distribution_strategy == "off": if num_gpus > 1: raise ValueError("When {} GPUs are specified, distribution_strategy " "flag cannot be set to `off`.".format(num_gpus)) return None if distribution_strategy == "multi_worker_mirrored": return tf.distribute.experimental.MultiWorkerMirroredStrategy( communication=_collective_communication(all_reduce_alg)) if distribution_strategy == "one_device": if num_gpus == 0: return tf.distribute.OneDeviceStrategy("device:CPU:0") if num_gpus > 1: raise ValueError("`OneDeviceStrategy` can not be used for more than " "one device.") return tf.distribute.OneDeviceStrategy("device:GPU:0") if distribution_strategy == "mirrored": if num_gpus == 0: devices = ["device:CPU:0"] else: devices = ["device:GPU:%d" % i for i in range(num_gpus)] return tf.distribute.MirroredStrategy( devices=devices, cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs)) raise ValueError("Unrecognized Distribution Strategy: %r" % distribution_strategy) def configure_cluster(worker_hosts=None, task_index=-1): """Set multi-worker cluster spec in TF_CONFIG environment variable. Args: worker_hosts: comma-separated list of worker ip:port pairs. Returns: Number of workers in the cluster. """ tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) if tf_config: num_workers = ( len(tf_config["cluster"].get("chief", [])) + len(tf_config["cluster"].get("worker", []))) elif worker_hosts: workers = worker_hosts.split(",") num_workers = len(workers) if num_workers > 1 and task_index < 0: raise ValueError("Must specify task_index when number of workers > 1") task_index = 0 if num_workers == 1 else task_index os.environ["TF_CONFIG"] = json.dumps({ "cluster": { "worker": workers }, "task": { "type": "worker", "index": task_index } }) else: num_workers = 1 return num_workers def get_strategy_scope(strategy): if strategy: strategy_scope = strategy.scope() else: strategy_scope = DummyContextManager() return strategy_scope class DummyContextManager: def __enter__(self): pass def __exit__(self, *args): pass
6,167
32.16129
91
py
keras
keras-master/keras/benchmarks/model_components_benchmarks_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Benchmarks on Keras components with different Keras model types.""" import tensorflow as tf import time import numpy as np from tensorflow.python.eager import context from tensorflow.python.eager.context import get_executor class SubclassedKerasModel(tf.keras.Model): def __init__(self, initializer="ones"): super(SubclassedKerasModel, self).__init__() self.layer_a = tf.keras.layers.Dense( 64, kernel_initializer=initializer, bias_initializer="zeros") self.layer_b = tf.keras.layers.Dense( 128, kernel_initializer=initializer, bias_initializer="zeros") self.layer_c = tf.keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros") self.layer_d = tf.keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros") self.layer_e = tf.keras.layers.Dense( 10, kernel_initializer=initializer, bias_initializer="zeros") def call(self, x): x = self.layer_a(x) x = self.layer_b(x) x = self.layer_c(x) x = self.layer_d(x) return self.layer_e(x) def make_keras_model(initializer="ones"): model_input = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense( 64, kernel_initializer=initializer, bias_initializer="zeros")(model_input) x = tf.keras.layers.Dense( 128, kernel_initializer=initializer, bias_initializer="zeros")(x) x = tf.keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")(x) x = tf.keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")(x) x = tf.keras.layers.Dense( 10, kernel_initializer=initializer, bias_initializer="zeros")(x) return tf.keras.Model(inputs=model_input, outputs=x) def make_sequential_keras_model(initializer="ones"): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Dense( 64, kernel_initializer=initializer, bias_initializer="zeros", input_shape=(10,))) model.add(tf.keras.layers.Dense( 128, kernel_initializer=initializer, bias_initializer="zeros")) model.add(tf.keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")) model.add(tf.keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")) model.add(tf.keras.layers.Dense( 10, kernel_initializer=initializer, bias_initializer="zeros")) return model def run_benchmark(func, num_iters, execution_mode=None): with context.execution_mode(execution_mode): # call func to warm up func() if execution_mode == context.ASYNC: get_executor().wait() start = time.time() for _ in range(num_iters): func() if execution_mode == context.ASYNC: get_executor().wait() end = time.time() return end - start class KerasComponentsBenchmarks(tf.test.Benchmark): def _run(self, func, num_iters, execution_mode=None): total_time = run_benchmark(func, num_iters, execution_mode) mean_us = total_time * 1e6 / num_iters self.report_benchmark( iters=num_iters, wall_time=mean_us, metrics=[ { "name": "exp_per_sec", "value": float("{0:.3f}".format(num_iters / total_time)) }, { "name": "us_per_exp", "value": float("{0:.3f}".format(total_time * 1e6 / num_iters)) }, ]) def benchmark_keras_model_subclassed(self): model = SubclassedKerasModel() data = tf.random.uniform((10, 10)) func = lambda: model(data) # pylint: disable=not-callable # First call is more expensive (creates variables etc.), discount that. func() # The whole point of this test is to contrast subclassing with # the functional style of keras model building, so validate that # the models are equivalent. assert np.equal(func(), make_keras_model()(data)).all() self._run(func, 30000) def benchmark_keras_model_functional(self): model = make_keras_model() data = tf.random.uniform((10, 10)) func = lambda: model(data) # pylint: disable=not-callable # Symmetry with benchmark_keras_model_subclassed func() assert np.equal(func(), SubclassedKerasModel()(data)).all() # pylint: disable=not-callable self._run(func, 30000) def benchmark_keras_model_sequential(self): model = make_sequential_keras_model() data = tf.random.uniform((10, 10)) func = lambda: model(data) # Symmetry with benchmark_keras_model_functional func() assert np.equal(func(), make_keras_model()(data)).all() self._run(func, 30000) def _benchmark_keras_model_fit(self, model, run_eagerly=False): data = tf.random.uniform((10, 10), minval=-1, maxval=1) labels = tf.random.uniform((10, 10), minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensors((data, labels)).repeat() model.compile( "sgd", loss="mse", run_eagerly=run_eagerly) func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0) # First call is more expensive (creates variables etc.), discount that. model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0) self._run(func, 1) def _benchmark_keras_model_evaluate(self, model, run_eagerly=False): data = tf.random.uniform((10, 10), minval=-1, maxval=1) labels = tf.random.uniform((10, 10), minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensors((data, labels)).repeat() model.compile( "sgd", loss="mse", run_eagerly=run_eagerly) func = lambda: model.evaluate(dataset, steps=1000, verbose=0) # First call is more expensive (creates variables etc.), discount that. model.evaluate(dataset, steps=1, verbose=0) self._run(func, 1) def _benchmark_keras_model_predict(self, model, run_eagerly=False): data = tf.random.uniform((10, 10), minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensors(data).repeat() model.compile( "sgd", loss="mse", run_eagerly=run_eagerly) func = lambda: model.predict(dataset, steps=1000, verbose=0) # First call is more expensive (creates variables etc.), discount that. model.predict(dataset, steps=1, verbose=0) self._run(func, 1) def benchmark_keras_model_subclassed_fit(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_subclassed_fit_graph_mode(self): with context.graph_mode(): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_subclassed_fit_run_model_eagerly(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) def benchmark_keras_model_functional_fit(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_functional_fit_graph_mode(self): with context.graph_mode(): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self): tf.profiler.experimental.start("") with context.graph_mode(): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) tf.profiler.experimental.stop(save=False) def benchmark_keras_model_functional_fit_run_model_eagerly(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) def benchmark_keras_model_functional_fit_run_model_eagerly_with_profiler( self): tf.profiler.experimental.start("") model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) tf.profiler.experimental.stop(save=False) def benchmark_keras_model_sequential_fit(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_sequential_fit_graph_mode(self): with context.graph_mode(): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_sequential_fit_run_model_eagerly(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) def benchmark_keras_model_subclassed_evaluate(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model) def benchmark_keras_model_subclassed_evaluate_run_model_eagerly(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model, run_eagerly=True) def benchmark_keras_model_functional_evaluate(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model) def benchmark_keras_model_functional_evaluate_run_model_eagerly(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model, run_eagerly=True) def benchmark_keras_model_sequential_evaluate(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model) def benchmark_keras_model_sequential_evaluate_run_model_eagerly(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model, run_eagerly=True) def benchmark_keras_model_subclassed_predict(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_predict(model) def benchmark_keras_model_subclassed_predict_run_model_eagerly(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_predict(model, run_eagerly=True) def benchmark_keras_model_functional_predict(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model) def benchmark_keras_model_functional_predict_run_model_eagerly(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model, run_eagerly=True) def benchmark_keras_model_sequential_predict(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model) def benchmark_keras_model_sequential_predict_run_model_eagerly(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model, run_eagerly=True) if __name__ == "__main__": tf.test.main()
11,436
38.302405
95
py
keras
keras-master/keras/benchmarks/eager_microbenchmarks_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Microbenchmarks for Keras components in eager mode.""" import tensorflow as tf import time from tensorflow.python.eager import context from tensorflow.python.eager.context import get_executor from keras.utils import tf_inspect def _run_benchmark(func, num_iters, execution_mode=None): with context.execution_mode(execution_mode): # call func to warm up func() if execution_mode == context.ASYNC: get_executor().wait() start = time.time() for _ in range(num_iters): func() if execution_mode == context.ASYNC: get_executor().wait() end = time.time() return end - start class MicroBenchmarksBase(tf.test.Benchmark): """Run and report benchmark results.""" def run_report(self, run_benchmark, func, num_iters, execution_mode=None): """Run and report benchmark results.""" total_time = run_benchmark(func, num_iters, execution_mode) mean_us = total_time * 1e6 / num_iters metrics = [{ "name": "exp_per_sec", "value": float("{0:.3f}".format(num_iters / total_time)) }, { "name": "us_per_exp", "value": float("{0:.3f}".format(total_time * 1e6 / num_iters)) }] benchmark_name = self._get_benchmark_name() self.report_benchmark( iters=num_iters, wall_time=mean_us, metrics=metrics, name=benchmark_name) def _get_benchmark_name(self): """Mostly copied from benchmark.py _get_name().""" stack = tf_inspect.stack() name = None for frame in stack[::-1]: f_locals = frame[0].f_locals f_self = f_locals.get("self", None) if isinstance(f_self, tf.test.Benchmark): name = frame[3] # Get the method name # This is a hack to get around the fact that some methods might have a # disable_tfrt decorator around them. In that case a function called # 'decorated' wraps the real called function underneath and so we # peek one deeper into the stack to get the real name. if name == "decorated": continue else: break if name is None: raise ValueError("Unable to determine calling Benchmark function.") if tf.__internal__.is_tfrt_enabled(): name = name + "_tfrt" return name def _run(self, func, num_iters, execution_mode=None): self.run_report(_run_benchmark, func, num_iters, execution_mode) def benchmark_layers_call_overhead(self): class OnlyOverheadLayer(tf.keras.layers.Layer): def call(self, x): return x layer = OnlyOverheadLayer() x = tf.convert_to_tensor([[1.]]) def fn(): layer(x) # pylint: disable=not-callable self._run(fn, 10000) def benchmark_op_layer_call_overhead(self): model_input = tf.keras.Input(shape=(1,)) model_output = model_input x = tf.convert_to_tensor([[1.1]]) for _ in range(20): model_output = tf.multiply(model_output, x) model = tf.keras.Model(inputs=model_input, outputs=model_output) def fn(): model(x) # pylint: disable=not-callable fn() self._run(fn, 100) def benchmark_model_predict_tensorlike_overhead(self): class OnlyOverheadLayer(tf.keras.layers.Layer): def call(self, x): return x model = tf.keras.Sequential([OnlyOverheadLayer()]) x = tf.convert_to_tensor([[1.]]) def fn(): model.predict(x) self._run(fn, 20) def benchmark_layers_embeddings_embedding_overhead(self): layer = tf.keras.layers.Embedding(1, 1) x = tf.zeros((1, 1), dtype="int32") def fn(): layer(x) self._run(fn, 10000) class KerasLayerCallOverheadBenchmarks( # pylint: disable=undefined-variable MicroBenchmarksBase, metaclass=tf.__internal__.test.ParameterizedBenchmark): # The set of layers for benchmarking. To add benchmarks for new layers, # please add the parameter configs to "_benchmark_paramters". # The parameter of each layer benchmark is a tuple contains: # 1) The benchmark name with convention "{module_name}_{layer_name}"; # 2) The layer instance; # 3) The shape of the input to the layer; # 4) The kwargs used in the benchmark. It can include the number of # iterations to run the benchmarks, and kwargs used in the layer call. # By default, # of iteration is 10000. _benchmark_parameters = [ ("advanced_activations_leaky_relu", tf.keras.layers.LeakyReLU(), (1, 1)), ("advanced_activations_prelu", tf.keras.layers.PReLU(), (1, 1)), ("advanced_activations_elu", tf.keras.layers.ELU(), (1, 1)), ("advanced_activations_thresholded_relu", tf.keras.layers.ThresholdedReLU(), (1, 1)), ("advanced_activations_softmax", tf.keras.layers.Softmax(), (1, 1)), ("advanced_activations_relu", tf.keras.layers.ReLU(), (1, 1)), ("core_masking", tf.keras.layers.Masking(), (1, 1)), ("core_dropout", tf.keras.layers.Dropout(0.5), (1, 1), { "training": True }), ("core_flatten", tf.keras.layers.Flatten(), (1, 1, 1)), ("core_dense", tf.keras.layers.Dense(1), (1, 1)), ("convolutional_conv1d", tf.keras.layers.Conv1D(1, (1,)), (1, 1, 1)), ("convolutional_conv2d", tf.keras.layers.Conv2D(1, (1, 1)), (1, 1, 1, 1)), ("convolutional_conv3d", tf.keras.layers.Conv3D( 1, (1, 1, 1)), (1, 1, 1, 1, 1)), ("batch_norm_fused_inf", tf.keras.layers.BatchNormalization(fused=True), (1, 1, 1, 1)), ("batch_norm_fused_train", tf.keras.layers.BatchNormalization(fused=True), (1, 1, 1, 1), {"training": True}), ("batch_norm_nonfused_inf", tf.keras.layers.BatchNormalization(fused=False), (1, 1, 1, 1)), ("batch_norm_nonfused_train", tf.keras.layers.BatchNormalization(fused=False), (1, 1, 1, 1), {"training": True}), ("normalization_layer_normalization", tf.keras.layers.LayerNormalization(), (1, 1), {"iters": 100, "training": True}), ] def benchmark_layer(self, layer, input_shape, kwargs=None): x = tf.ones(input_shape) def fn(): layer(x, **(kwargs or {})) default_iters = 10000 iters = kwargs.pop("iters", default_iters) if kwargs else default_iters self._run(fn, iters) if __name__ == "__main__": assert tf.executing_eagerly() tf.test.main()
6,953
32.757282
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Text classification with Transformer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks import benchmark_util class TextWithTransformerBenchmark(tf.test.Benchmark): """Benchmarks for Text classification with Transformer using `tf.test.Benchmark`. """ def __init__(self): super(TextWithTransformerBenchmark, self).__init__() self.max_feature = 20000 self.max_len = 200 (self.imdb_x, self.imdb_y), _ = tf.keras.datasets.imdb.load_data( num_words=self.max_feature) self.imdb_x = tf.keras.preprocessing.sequence.pad_sequences( self.imdb_x, maxlen=self.max_len) def _build_model(self): """Model from https://keras.io/examples/nlp/text_classification_with_transformer/.""" embed_dim = 32 num_heads = 2 ff_dim = 32 inputs = tf.keras.layers.Input(shape=(self.max_len,)) embedding_layer = TokenAndPositionEmbedding(self.max_len, self.max_feature, embed_dim) x = embedding_layer(inputs) #pylint: disable=not-callable transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim) x = transformer_block(x) #pylint: disable=not-callable x = tf.keras.layers.GlobalAvgPool1D()(x) x = tf.keras.layers.Dropout(0.1)(x) x = tf.keras.layers.Dense(20, activation='relu')(x) x = tf.keras.layers.Dropout(0.1)(x) outputs = tf.keras.layers.Dense(2, activation='softmax')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_text_classification_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'transformer', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_text_classification_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'transformer', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_text_classification_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'transformer', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_text_classification_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=1 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'transformer', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) class MultiHeadSelfAttention(tf.keras.layers.Layer): """Implement multi head self attention as a Keras layer.""" def __init__(self, embed_dim, num_heads=8): super(MultiHeadSelfAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads if embed_dim % num_heads != 0: raise ValueError('embedding dimension = {embed_dim} should be divisible' 'by number of heads = {num_heads}') self.projection_dim = embed_dim // num_heads self.query_dense = tf.keras.layers.Dense(embed_dim) self.key_dense = tf.keras.layers.Dense(embed_dim) self.value_dense = tf.keras.layers.Dense(embed_dim) self.combine_heads = tf.keras.layers.Dense(embed_dim) def attention(self, query, key, value): score = tf.matmul(query, key, transpose_b=True) dim_key = tf.cast(tf.shape(key)[-1], tf.float32) scaled_score = score / tf.math.sqrt(dim_key) weights = tf.nn.softmax(scaled_score, axis=-1) output = tf.matmul(weights, value) return output, weights def separate_heads(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs): #pylint: disable=arguments-differ # x.shape = [batch_size, seq_len, embedding_dim] batch_size = tf.shape(inputs)[0] query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim) key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim) value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim) query = self.separate_heads( query, batch_size) # (batch_size, num_heads, seq_len, projection_dim) key = self.separate_heads( key, batch_size) # (batch_size, num_heads, seq_len, projection_dim) value = self.separate_heads( value, batch_size) # (batch_size, num_heads, seq_len, projection_dim) attention, _ = self.attention(query, key, value) attention = tf.transpose( attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len, num_heads, projection_dim) concat_attention = tf.reshape( attention, (batch_size, -1, self.embed_dim)) # (batch_size, seq_len, embed_dim) output = self.combine_heads( concat_attention) # (batch_size, seq_len, embed_dim) return output class TransformerBlock(tf.keras.layers.Layer): """Implement a Transformer block as a layer.""" def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): super(TransformerBlock, self).__init__() self.att = MultiHeadSelfAttention(embed_dim, num_heads) self.ffn = tf.keras.Sequential([ tf.keras.layers.Dense(ff_dim, activation='relu'), tf.keras.layers.Dense(embed_dim) ]) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) def call(self, inputs, training): #pylint: disable=arguments-differ attn_output = self.att(inputs) #pylint: disable=not-callable attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output) class TokenAndPositionEmbedding(tf.keras.layers.Layer): """Implement embedding layer.""" def __init__(self, maxlen, vocab_size, embed_dim): super(TokenAndPositionEmbedding, self).__init__() self.token_emb = tf.keras.layers.Embedding( input_dim=vocab_size, output_dim=embed_dim) self.pos_emb = tf.keras.layers.Embedding( input_dim=maxlen, output_dim=embed_dim) def call(self, x): #pylint: disable=arguments-differ maxlen = tf.shape(x)[-1] positions = tf.range(start=0, limit=maxlen, delta=1) positions = self.pos_emb(positions) x = self.token_emb(x) return x + positions if __name__ == '__main__': tf.test.main()
9,319
38.159664
89
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on IRNN on MNIST digits.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks import benchmark_util class IRNNMnistBenchmark(tf.test.Benchmark): """Benchmarks for IRNN using `tf.test.Benchmark`.""" def __init__(self): super(IRNNMnistBenchmark, self).__init__() self.num_classes = 10 self.hidden_units = 100 self.learning_rate = 1e-6 (self.x_train, self.y_train), _ = tf.keras.datasets.mnist.load_data() self.x_train = self.x_train.reshape(self.x_train.shape[0], -1, 1) self.x_train = self.x_train.astype('float32') / 255 self.y_train = tf.keras.utils.to_categorical(self.y_train, self.num_classes) def _build_model(self): """Model from https://github.com/keras-team/keras/ blob/master/examples/mnist_irnn.py. """ model = tf.keras.Sequential() model.add( tf.keras.layers.SimpleRNN( self.hidden_units, kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001), recurrent_initializer=tf.keras.initializers.Identity(gain=1.0), activation='relu', input_shape=self.x_train.shape[1:])) model.add(tf.keras.layers.Dense(self.num_classes)) model.add(tf.keras.layers.Activation('softmax')) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_irnn_mnist_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('irnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_irnn_mnist_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('irnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_irnn_mnist_bs_1024(self): """Measure performance with batch_size=1024.""" batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('irnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_irnn_mnist_bs_1024_gpu_2(self): """Measure performance with batch_size=1024, gpu=2 and distribution_strategy='mirrored' """ batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('irnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
5,159
36.941176
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Bidirectional LSTM on IMDB.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks import benchmark_util class BidirectionalLSTMBenchmark(tf.test.Benchmark): """Benchmarks for Bidirectional LSTM using `tf.test.Benchmark`.""" def __init__(self): super(BidirectionalLSTMBenchmark, self).__init__() self.max_feature = 20000 self.max_len = 200 (self.imdb_x, self.imdb_y), _ = tf.keras.datasets.imdb.load_data( num_words=self.max_feature) self.imdb_x = tf.keras.preprocessing.sequence.pad_sequences( self.imdb_x, maxlen=self.max_len) def _build_model(self): """Model from https://keras.io/examples/nlp/bidirectional_lstm_imdb/.""" inputs = tf.keras.Input(shape=(None,), dtype='int32') x = tf.keras.layers.Embedding(self.max_feature, 128)(inputs) x = tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(64, return_sequences=True))( x) x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64))(x) outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) model = tf.keras.Model(inputs, outputs) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_bidirect_lstm_imdb_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'bidirectional_lstm', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_bidirect_lstm_imdb_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'bidirectional_lstm', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_bidirect_lstm_imdb_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'bidirectional_lstm', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_bidirect_lstm_imdb_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'bidirectional_lstm', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
4,889
35.492537
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Hierarchical RNN on MNIST digits.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks import benchmark_util class HierarchicalRNNBenchmark(tf.test.Benchmark): """Benchmarks for Hierarchical RNN using `tf.test.Benchmark`.""" def __init__(self): super(HierarchicalRNNBenchmark, self).__init__() self.num_classes = 10 self.row_hidden, self.col_hidden = 128, 128 (self.x_train, self.y_train), _ = tf.keras.datasets.mnist.load_data() self.x_train = self.x_train.reshape(self.x_train.shape[0], 28, 28, 1) self.x_train = self.x_train.astype('float32') / 255 self.y_train = tf.keras.utils.to_categorical(self.y_train, self.num_classes) def _build_model(self): """Model from https://github.com/keras-team/keras/blob/master/examples /mnist_hierarchical_rnn.py. """ row, col, pixel = self.x_train.shape[1:] inputs = tf.keras.layers.Input(shape=(row, col, pixel)) encoded_rows = tf.keras.layers.TimeDistributed( tf.keras.layers.LSTM(self.row_hidden))( inputs) encoded_cols = tf.keras.layers.LSTM(self.col_hidden)(encoded_rows) outputs = tf.keras.layers.Dense( self.num_classes, activation='softmax')( encoded_cols) model = tf.keras.Model(inputs, outputs) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_hrnn_mnist_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'hierarchical_rnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_hrnn_mnist_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'hierarchical_rnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_hrnn_mnist_bs_1024(self): """Measure performance with batch_size=1024.""" batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'hierarchical_rnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_hrnn_mnist_bs_1024_gpu_2(self): """Measure performance with batch_size=1024, gpu=2 and distribution_strategy='mirrored' """ batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'hierarchical_rnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
5,070
35.221429
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks using custom training loop on MNIST dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import timeit import numpy as np from keras.benchmarks import benchmark_util from keras.benchmarks import distribution_util class CustomMnistBenchmark(tf.test.Benchmark): """Benchmarks for custom training loop using `tf.test.Benchmark`.""" def __init__(self): super(CustomMnistBenchmark, self).__init__() self.num_classes = 10 self.input_shape = (28, 28, 1) self.epochs = 15 (x_train, y_train), _ = tf.keras.datasets.mnist.load_data() x_train = x_train.astype('float32') / 255 x_train = np.expand_dims(x_train, -1) y_train = tf.keras.utils.to_categorical(y_train, self.num_classes) self.num_examples = x_train.shape[0] # Use `tf.data.Dataset` for custom training loop. self.train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) def _build_model(self): """Model from https://keras.io/examples/vision/mnist_convnet/.""" model = tf.keras.Sequential([ tf.keras.Input(shape=self.input_shape), tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(self.num_classes, activation='softmax'), ]) return model def compute_loss(self, targets, predictions, loss_fn, batch_size): """Compute average loss.""" per_example_loss = loss_fn(targets, predictions) return tf.nn.compute_average_loss( per_example_loss, global_batch_size=batch_size) @tf.function(experimental_relax_shapes=True) def train_step(self, inputs, model, loss_fn, optimizer, batch_size): """Compute loss and optimize model by optimizer. Args: inputs: `tf.data`. model: See `model` in `train_function()` method. loss_fn: See `loss_fn` in `train_function()` method. optimizer: See `optimizer` in `train_function()` method. batch_size: See `batch_size` in `train_function()` method. Returns: Loss value. """ train_x, train_y = inputs with tf.GradientTape() as tape: predictions = model(train_x, training=True) loss = self.compute_loss(train_y, predictions, loss_fn, batch_size) grads = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) return loss @tf.function(experimental_relax_shapes=True) def distributed_train_step(self, batch_dataset, model, loss_fn, optimizer, batch_size, distribution_strategy): """Train step in distribution strategy setting. Args: batch_dataset: `tf.data`. model: See `model` in `train_function()` method. loss_fn: See `loss_fn` in `train_function()` method. optimizer: See `optimizer` in `train_function()` method. batch_size: See `batch_size` in `train_function()` method. distribution_strategy: See `distribution_strategy` in `train_function()` method. Returns: Sum of per_replica_losses. """ per_replica_losses = distribution_strategy.run( self.train_step, args=( batch_dataset, model, loss_fn, optimizer, batch_size, )) return distribution_strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) def train_function(self, model, train_dataset, loss_fn, optimizer, epochs=2, distribution_strategy=None, batch_size=256): """Train model in custom training loop and return average train_step_time. Args: model: Model function to be benchmarked. train_dataset: `tf.data` dataset. Should return a tuple of either (inputs, targets) or (inputs, targets, sample_weights). loss_fn: `tf.keras.losses.Loss` instance. optimizer: `tf.keras.optimizers` instance. epochs: Integer. Number of epochs to train the model. If unspecified, `epochs` will default to 2. distribution_strategy: Distribution strategies. It could be `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified, `distribution_strategy` will default to 'off'. Note that, `TPU` and `parameter_server` are not supported yet. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Returns: Average train_step_time. """ train_step_time_list = [] timer = timeit.default_timer total_loss = 0.0 num_batches = 0 for _ in range(epochs): # Iterate over the batches of the dataset. for batch_dataset in train_dataset: start_time = timer() if distribution_strategy is not None: total_loss += self.distributed_train_step(batch_dataset, model, loss_fn, optimizer, batch_size, distribution_strategy) else: total_loss += self.train_step(batch_dataset, model, loss_fn, optimizer, batch_size) num_batches += 1 end_time = timer() train_step_time_list.append(end_time - start_time) return np.mean(train_step_time_list) def measure_performance(self, model, dataset, loss_fn, optimizer, batch_size=32, run_iters=4, epochs=10, distribution_strategy=None): """Run models and measure the performance. Args: model_fn: Model function to be benchmarked. dataset: `tf.data` dataset. Should return a tuple of either (inputs, targets) or (inputs, targets, sample_weights). loss_fn: `tf.keras.losses.Loss` instance. optimizer: `tf.keras.optimizers` instance. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. run_iters: Integer. Number of iterations to run the performance measurement. If unspecified, `run_iters` will default to 4. epochs: Integer. Number of epochs to train the model. If unspecified, `epochs` will default to 10. distribution_strategy: Distribution strategies. It could be `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified, `distribution_strategy` will default to 'off'. Note that, `TPU` and `parameter_server` are not supported yet. Returns: Performance summary, which contains build_time, avg_epoch_time, wall_time, exp_per_sec, epochs, warmup_time, train_step_time. Raise: ValueError: if `dataset` is None or if `optimizer` instance is not provided or if `loss_fn` instance is not provided. """ if distribution_strategy is not None and \ not isinstance(dataset, tf.distribute.DistributedDataset): raise ValueError('tf.distribute.DistributedDataset' ' required in distribution strategy.') if distribution_strategy is None and \ not isinstance(dataset, tf.data.Dataset): raise ValueError('`tf.data` is required.') if not isinstance(loss_fn, tf.keras.losses.Loss): raise ValueError('`tf.keras.losses.Loss` instance ' 'for loss_fn is required.') if not isinstance(optimizer, tf.keras.optimizers.Optimizer): raise ValueError('`tf.keras.optimizers` instance ' 'for optimizer is required.') avg_epoch_time_list, train_step_time_list = [], [] wall_time_list, exp_per_sec_list, warmup_time_list = [], [], [] total_num_examples = epochs * self.num_examples for _ in range(run_iters): timer = timeit.default_timer start_time = timer() t1 = timer() self.train_function(model, dataset, loss_fn, optimizer, 1, distribution_strategy, batch_size) warmup_time = timer() - t1 t2 = timer() train_step_time = self.train_function(model, dataset, loss_fn, optimizer, epochs, distribution_strategy, batch_size) end_time = timer() train_step_time_list.append(train_step_time) warmup_time_list.append(warmup_time) wall_time_list.append(end_time - start_time) exp_per_sec_list.append(total_num_examples / (end_time - t2)) avg_epoch_time_list.append((end_time - t2) / epochs) metrics = [] metrics.append({ 'name': 'avg_epoch_time', 'value': np.mean(avg_epoch_time_list) }) metrics.append({'name': 'exp_per_sec', 'value': np.mean(exp_per_sec_list)}) metrics.append({'name': 'warmup_time', 'value': np.mean(warmup_time_list)}) metrics.append({ 'name': 'train_step_time', 'value': np.mean(train_step_time_list) }) metrics.append({'name': 'epochs', 'value': epochs}) wall_time = np.mean(wall_time_list) return metrics, wall_time def benchmark_custom_training_mnist_bs_128(self): """Measure performance with batch_size=128 and run_iters=5.""" batch_size = 128 run_iters = 5 train_dataset = self.train_dataset.shuffle( buffer_size=1024).batch(batch_size) # Instantiate a loss function. loss_fn = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) # Instantiate an optimizer to train the model. optimizer = tf.keras.optimizers.Adam() model = self._build_model() metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn, optimizer, batch_size, run_iters, self.epochs) extras = benchmark_util.get_keras_examples_metadata('conv', batch_size, '.keras.ctl_graph') self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_custom_training_mnist_bs_256(self): """Measure performance with batch_size=256 and run_iters=5.""" batch_size = 256 run_iters = 5 train_dataset = self.train_dataset.shuffle( buffer_size=1024).batch(batch_size) # Instantiate a loss function. loss_fn = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) # Instantiate an optimizer to train the model. optimizer = tf.keras.optimizers.Adam() model = self._build_model() metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn, optimizer, batch_size, run_iters, self.epochs) extras = benchmark_util.get_keras_examples_metadata('conv', batch_size, '.keras.ctl_graph') self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_custom_training_mnist_bs_512(self): """Measure performance with batch_size=512 and run_iters=10.""" batch_size = 512 run_iters = 5 train_dataset = self.train_dataset.shuffle( buffer_size=1024).batch(batch_size) # Instantiate a loss function. loss_fn = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) # Instantiate an optimizer to train the model. optimizer = tf.keras.optimizers.Adam() model = self._build_model() metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn, optimizer, batch_size, run_iters, self.epochs) extras = benchmark_util.get_keras_examples_metadata('conv', batch_size, '.keras.ctl_graph') self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_custom_training_mnist_bs_512_gpu_2(self): """Measure performance with batch_size=512, run_iters=10, gpu=2 and distribution_strategy='mirrored'. """ batch_size = 512 run_iters = 10 train_dataset = self.train_dataset.shuffle( buffer_size=1024).batch(batch_size) distribution_strategy = 'mirrored' strategy = distribution_util.get_distribution_strategy( distribution_strategy=distribution_strategy, num_gpus=2) if distribution_strategy != 'off': train_dataset = strategy.experimental_distribute_dataset(train_dataset) strategy_scope = distribution_util.get_strategy_scope(strategy) with strategy_scope: # Instantiate a loss function. loss_fn = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) # Instantiate an optimizer to train the model. optimizer = tf.keras.optimizers.Adam() model = self._build_model() metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn, optimizer, batch_size, run_iters, self.epochs, strategy) extras = benchmark_util.get_keras_examples_metadata('conv', batch_size, '.keras.ctl_graph') self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
14,866
38.751337
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on MLP on Reuters dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from keras.benchmarks import benchmark_util class MLPReutersBenchmark(tf.test.Benchmark): """Benchmarks for MLP using `tf.test.Benchmark`.""" def __init__(self): super(MLPReutersBenchmark, self).__init__() self.max_words = 1000 (self.x_train, self.y_train), _ = tf.keras.datasets.reuters.load_data( num_words=self.max_words) self.num_classes = np.max(self.y_train) + 1 tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=self.max_words) self.x_train = tokenizer.sequences_to_matrix(self.x_train, mode='binary') self.y_train = tf.keras.utils.to_categorical(self.y_train, self.num_classes) self.epochs = 5 def _build_model(self): """Model from https://github.com/keras-team/keras/blob/master/ examples/reuters_mlp.py. """ model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(512, input_shape=(self.max_words,))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(self.num_classes)) model.add(tf.keras.layers.Activation('softmax')) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_mlp_reuters_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('mlp', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_mlp_reuters_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('mlp', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_mlp_reuters_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('mlp', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_mlp_reuters_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('mlp', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
4,995
34.942446
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Convnet on MNIST dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from keras.benchmarks import benchmark_util class ConvMnistBenchmark(tf.test.Benchmark): """Benchmarks for Convnet using `tf.test.Benchmark`.""" def __init__(self): super(ConvMnistBenchmark, self).__init__() self.num_classes = 10 self.input_shape = (28, 28, 1) (self.x_train, self.y_train), _ = tf.keras.datasets.mnist.load_data() self.x_train = self.x_train.astype('float32') / 255 self.x_train = np.expand_dims(self.x_train, -1) self.y_train = tf.keras.utils.to_categorical(self.y_train, self.num_classes) self.epochs = 15 def _build_model(self): """Model from https://keras.io/examples/vision/mnist_convnet/.""" model = tf.keras.Sequential([ tf.keras.Input(shape=self.input_shape), tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(self.num_classes, activation='softmax'), ]) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_conv_mnist_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('conv', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_conv_mnist_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('conv', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_conv_mnist_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('conv', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_conv_mnist_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('conv', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
5,053
35.359712
80
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on CNN on cifar10 dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks import benchmark_util class Cifar10CNNBenchmark(tf.test.Benchmark): """Benchmarks for CNN using `tf.test.Benchmark`.""" def __init__(self): super(Cifar10CNNBenchmark, self).__init__() self.num_classes = 10 (self.x_train, self.y_train), _ = tf.keras.datasets.cifar10.load_data() self.x_train = self.x_train.astype('float32') / 255 self.y_train = tf.keras.utils.to_categorical(self.y_train, self.num_classes) self.epochs = 5 def _build_model(self): """Model from https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py.""" model = tf.keras.Sequential() model.add( tf.keras.layers.Conv2D( 32, (3, 3), padding='same', input_shape=self.x_train.shape[1:])) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(32, (3, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same')) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(64, (3, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(self.num_classes)) model.add(tf.keras.layers.Activation('softmax')) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_cnn_cifar10_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('cnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_cnn_cifar10_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('cnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_cnn_cifar10_bs_1024(self): """Measure performance with batch_size=1024.""" batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('cnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_cnn_cifar10_bs_1024_gpu_2(self): """Measure performance with batch_size=1024, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', epochs=self.epochs, optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata('cnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) if __name__ == '__main__': tf.test.main()
5,724
37.682432
93
py
keras
keras-master/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Antirectifier.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks import benchmark_util class AntirectifierBenchmark(tf.test.Benchmark): """Benchmarks for Antirectifier using `tf.test.Benchmark`.""" def __init__(self): super(AntirectifierBenchmark, self).__init__() (self.x_train, self.y_train), _ = tf.keras.datasets.mnist.load_data() self.x_train = self.x_train.reshape(-1, 784) self.x_train = self.x_train.astype("float32") / 255 def _build_model(self): """Model from https://keras.io/examples/keras_recipes/antirectifier/.""" model = tf.keras.Sequential([ tf.keras.Input(shape=(784,)), tf.keras.layers.Dense(256), Antirectifier(), tf.keras.layers.Dense(256), Antirectifier(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10), ]) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_antirectifier_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer="rmsprop", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["sparse_categorical_accuracy"]) metadata = benchmark_util.get_keras_examples_metadata( "antirectifier", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_antirectifier_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer="rmsprop", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["sparse_categorical_accuracy"]) metadata = benchmark_util.get_keras_examples_metadata( "antirectifier", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_antirectifier_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer="rmsprop", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["sparse_categorical_accuracy"]) metadata = benchmark_util.get_keras_examples_metadata( "antirectifier", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) def benchmark_antirectifier_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy="mirrored", optimizer="rmsprop", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["sparse_categorical_accuracy"]) metadata = benchmark_util.get_keras_examples_metadata( "antirectifier", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras) class Antirectifier(tf.keras.layers.Layer): """Build simple custome layer.""" def __init__(self, initializer="he_normal", **kwargs): super(Antirectifier, self).__init__(**kwargs) self.initializer = tf.keras.initializers.get(initializer) def build(self, input_shape): output_dim = input_shape[-1] self.kernel = self.add_weight( shape=(output_dim * 2, output_dim), initializer=self.initializer, name="kernel", trainable=True, ) def call(self, inputs): #pylint: disable=arguments-differ inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True) pos = tf.nn.relu(inputs) neg = tf.nn.relu(-inputs) concatenated = tf.concat([pos, neg], axis=-1) mixed = tf.matmul(concatenated, self.kernel) return mixed def get_config(self): # Implement get_config to enable serialization. This is optional. base_config = super(Antirectifier, self).get_config() config = {"initializer": tf.keras.initializers.serialize(self.initializer)} return dict(list(base_config.items()) + list(config.items())) if __name__ == "__main__": tf.test.main()
5,950
35.509202
80
py
keras
keras-master/keras/benchmarks/layer_benchmarks/layer_benchmarks_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on Keras layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import functools import numpy as np from keras.benchmarks import benchmark_util from keras.benchmarks.layer_benchmarks import layer_benchmarks_test_base def _get_metadata(name): return { "model_name": "ideal_layers", "parameters": name[1] + "_shape", } def _get_layer_args(layer_cls, layer_args): # To make benchmark parameters compatible with GPU platform. if layer_cls is tf.keras.layers.Bidirectional: return {"layer": tf.keras.layers.LSTM(1)} return layer_args def _get_input_data(inputs): if "input_shape" in inputs: return tf.ones(inputs["input_shape"]) elif "input" in inputs: return inputs["input"] else: raise ValueError("Please specify either `input_shape` or `input`" "for the benchmark test") def _layer_call_backward(layer, x): with tf.GradientTape() as tape: y = layer(x) loss = tf.reduce_mean(y**2) _ = tape.gradient(loss, layer.trainable_variables) CORE_LAYERS = [ ("Dense_small_shape", tf.keras.layers.Dense, {"units": 32, "activation": "relu"}, {"input_shape": (1, 16)}, 100), ("Activation_small_shape", tf.keras.layers.Activation, {"activation": "relu"}, {"input_shape": (1, 4)}, 100), ("Embedding_small_shape", tf.keras.layers.Embedding, {"input_dim": 1, "output_dim": 1, "input_length": 1}, {"input": np.random.randint(1, size=(1, 1))}, 100), ("Embedding_normal_shape", tf.keras.layers.Embedding, {"input_dim": 1000, "output_dim": 64, "input_length": 10}, {"input": np.random.randint(1000, size=(32, 10))}, 100), ("Masking_small_shape", tf.keras.layers.Masking, {"mask_value": 1}, {"input_shape": (1, 1)}, 100), ("Lambda_small_shape", tf.keras.layers.Lambda, {"function": lambda x: x ** 2}, {"input_shape": (1, 1)}, 100), ("Flatten_small_shape", tf.keras.layers.Flatten, {}, {"input_shape": (1, 1)}, 100), ] CONV_LAYERS = [ ("Conv1D_small_shape", tf.keras.layers.Conv1D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1)}, 100), ("Conv2D_small_shape", tf.keras.layers.Conv2D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1)}, 100), ("Conv2D_normal_shape", tf.keras.layers.Conv2D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (64, 28, 28, 3)}, 100), ("Conv3D_small_shape", tf.keras.layers.Conv3D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("Conv1DTranspose_small_shape", tf.keras.layers.Conv1DTranspose, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1)}, 100), ("Conv2DTranspose_small_shape", tf.keras.layers.Conv2DTranspose, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1)}, 100), ("Conv3DTranspose_small_shape", tf.keras.layers.Conv3DTranspose, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("SeparableConv1D_small_shape", tf.keras.layers.SeparableConv1D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1)}, 100), ("SeparableConv2D_small_shape", tf.keras.layers.SeparableConv2D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1)}, 100), ("DepthwiseConv2D_small_shape", tf.keras.layers.DepthwiseConv2D, {"kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1)}, 100), ] RECURRENT_LAYERS = [ ("LSTM_small_shape", tf.keras.layers.LSTM, {"units": 1}, {"input_shape": (1, 1, 1)}, 100), ("LSTM_normal_shape", tf.keras.layers.LSTM, {"units": 4}, {"input_shape": (32, 10, 8)}, 100), ("GRU_small_shape", tf.keras.layers.GRU, {"units": 1}, {"input_shape": (1, 1, 1)}, 100), ("SimpleRNN_small_shape", tf.keras.layers.SimpleRNN, {"units": 1}, {"input_shape": (1, 1, 1)}, 100), ("TimeDistributed_small_shape", tf.keras.layers.TimeDistributed, {"layer": tf.keras.layers.Conv2D(1, 1)}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("Bidirectional_small_shape", tf.keras.layers.Bidirectional, {}, {"input_shape": (1, 1, 1)}, 100), ("ConvLSTM2D_small_shape", tf.keras.layers.ConvLSTM2D, {"filters": 1, "kernel_size": 1, "activation": "relu"}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("RNN_small_shape", tf.keras.layers.RNN, {"cell": tf.keras.layers.LSTMCell(1)}, {"input_shape": (1, 1, 1)}, 100), ] NORMALIZATION_LAYERS = [ ("BatchNormalization_small_shape", tf.keras.layers.BatchNormalization, {"axis": -1}, {"input_shape": (1, 1, 1)}, 100), ("LayerNormalization_small_shape", tf.keras.layers.LayerNormalization, {"axis": -1}, {"input_shape": (1, 1, 1)}, 100), ] REGULARIZATION_LAYERS = [ ("Dropout_small_shape", tf.keras.layers.Dropout, {"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100), ("SpatialDropout1D_small_shape", tf.keras.layers.SpatialDropout1D, {"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100), ("SpatialDropout2D_small_shape", tf.keras.layers.SpatialDropout2D, {"rate": 0.2}, {"input_shape": (1, 1, 1, 1)}, 100), ("SpatialDropout3D_small_shape", tf.keras.layers.SpatialDropout3D, {"rate": 0.2}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("GaussianDropout_small_shape", tf.keras.layers.GaussianDropout, {"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100), ("GaussianNoise_small_shape", tf.keras.layers.GaussianNoise, {"stddev": 0.1}, {"input_shape": (1, 1, 1)}, 100), ("ActivityRegularization_small_shape", tf.keras.layers.ActivityRegularization, {"l1": 0.3}, {"input_shape": (1, 1, 1)}, 100), ("AlphaDropout_small_shape", tf.keras.layers.AlphaDropout, {"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100), ] ATTENSION_LAYERS = [ ("Attention_small_shape", tf.keras.layers.Attention, {"use_scale": False}, {"input": [np.ones((1, 1, 1)), np.ones((1, 1, 1))]}, 100), ("AdditiveAttention_small_shape", tf.keras.layers.AdditiveAttention, {"use_scale": True}, {"input": [np.ones((1, 1, 1)), np.ones((1, 1, 1))]}, 100), ] POOLING_LAYERS = [ ("MaxPooling1D_small_shape", tf.keras.layers.MaxPooling1D, {"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1)}, 100), ("MaxPooling2D_small_shape", tf.keras.layers.MaxPooling2D, {"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1)}, 100), ("MaxPooling3D_small_shape", tf.keras.layers.MaxPooling3D, {"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("AveragePooling1D_small_shape", tf.keras.layers.AveragePooling1D, {"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1)}, 100), ("AveragePooling2D_small_shape", tf.keras.layers.AveragePooling2D, {"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1)}, 100), ("AveragePooling3D_small_shape", tf.keras.layers.AveragePooling3D, {"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("GlobalMaxPooling1D_small_shape", tf.keras.layers.GlobalMaxPooling1D, {}, {"input_shape": (1, 1, 1)}, 100), ("GlobalMaxPooling2D_small_shape", tf.keras.layers.GlobalMaxPooling2D, {}, {"input_shape": (1, 1, 1, 1)}, 100), ("GlobalMaxPooling3D_small_shape", tf.keras.layers.GlobalMaxPooling3D, {}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ("GlobalAveragePooling1D_small_shape", tf.keras.layers.GlobalAveragePooling1D, {}, {"input_shape": (1, 1, 1)}, 100), ("GlobalAveragePooling2D_small_shape", tf.keras.layers.GlobalAveragePooling2D, {}, {"input_shape": (1, 1, 1, 1)}, 100), ("GlobalAveragePooling3D_small_shape", tf.keras.layers.GlobalAveragePooling3D, {}, {"input_shape": (1, 1, 1, 1, 1)}, 100), ] class KerasLayerBenchmarks( # pylint: disable=undefined-variable layer_benchmarks_test_base.LayerBenchmarksBase, metaclass=tf.__internal__.test.ParameterizedBenchmark): # The parameter of each layer benchmark is a tuple, and the first one is # the benchmark name. It must follow the convention of # "{layer_name}_{small|normal|large}_shape" to make it compatible with # `self.report_benchmark()` method. _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu( CORE_LAYERS + CONV_LAYERS + RECURRENT_LAYERS + NORMALIZATION_LAYERS + REGULARIZATION_LAYERS + ATTENSION_LAYERS + POOLING_LAYERS) def benchmark_layer_call(self, layer_cls, layer_args, inputs, num_iters): layer = layer_cls(**_get_layer_args(layer_cls, layer_args)) x = _get_input_data(inputs) fn = functools.partial(layer, x) name = benchmark_util.get_benchmark_name(self._get_name()) metadata = {"implementation": name[0] + ".layer.call"} metadata.update(_get_metadata(name)) self.run_report(fn, num_iters, metadata) def benchmark_layer_call_with_function( self, layer_cls, layer_args, inputs, num_iters): layer = layer_cls(**_get_layer_args(layer_cls, layer_args)) x = _get_input_data(inputs) layer.call = tf.function(layer.call) fn = functools.partial(layer, x) name = benchmark_util.get_benchmark_name(self._get_name()) metadata = {"implementation": name[0] + ".layer.call.function"} metadata.update(_get_metadata(name)) self.run_report(fn, num_iters, metadata) def benchmark_layer_call_with_xla( self, layer_cls, layer_args, inputs, num_iters): name = benchmark_util.get_benchmark_name(self._get_name()) # TODO(b/173461426) if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU": return layer = layer_cls(**_get_layer_args(layer_cls, layer_args)) x = _get_input_data(inputs) layer.call = tf.function( layer.call, jit_compile=True) fn = functools.partial(layer, x) metadata = {"implementation": name[0] + ".layer.call.xla"} metadata.update(_get_metadata(name)) self.run_report(fn, num_iters, metadata) def benchmark_layer_call_backward( self, layer_cls, layer_args, inputs, num_iters): layer = layer_cls(**_get_layer_args(layer_cls, layer_args)) x = _get_input_data(inputs) fn = functools.partial(_layer_call_backward, layer, x) name = benchmark_util.get_benchmark_name(self._get_name()) metadata = {"implementation": name[0] + ".layer.call.backward"} metadata.update(_get_metadata(name)) self.run_report(fn, num_iters, metadata) def benchmark_layer_call_backward_with_function( self, layer_cls, layer_args, inputs, num_iters): layer = layer_cls(**_get_layer_args(layer_cls, layer_args)) x = _get_input_data(inputs) layer.call = tf.function(layer.call) fn = functools.partial(_layer_call_backward, layer, x) name = benchmark_util.get_benchmark_name(self._get_name()) metadata = {"implementation": name[0] + ".layer.call.backward.function"} metadata.update(_get_metadata(name)) self.run_report(fn, num_iters, metadata) def benchmark_layer_call_backward_with_xla( self, layer_cls, layer_args, inputs, num_iters): name = benchmark_util.get_benchmark_name(self._get_name()) # TODO(b/153480400) if layer_cls in [ tf.keras.layers.LSTM, tf.keras.layers.Bidirectional, tf.keras.layers.ConvLSTM2D, tf.keras.layers.GRU, tf.keras.layers.RNN, tf.keras.layers.SimpleRNN ]: return # TODO(b/173461426) if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU": return layer = layer_cls(**_get_layer_args(layer_cls, layer_args)) x = _get_input_data(inputs) layer.call = tf.function( layer.call, jit_compile=True) fn = functools.partial(_layer_call_backward, layer, x) metadata = {"implementation": name[0] + ".layer.call.backward.xla"} metadata.update(_get_metadata(name)) self.run_report(fn, num_iters, metadata) if __name__ == "__main__": tf.test.main()
12,774
41.301325
80
py
keras
keras-master/keras/benchmarks/layer_benchmarks/run_xprof.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import as _absolute_import from __future__ import division as _division from __future__ import print_function as _print_function import time import uuid from tensorflow.python.profiler import profiler_v2 as profiler def run_with_xprof(self, func, num_iters_xprof=100, enable_python_trace=True, logdir='/tmp/layer_benchmark_xprof/'): suid = str(uuid.uuid4()) if enable_python_trace: options = profiler.ProfilerOptions(python_tracer_level=1) logdir = os.path.join(logdir, str(uuid.uuid4()) + "_with_python") else: options = profiler.ProfilerOptions(python_tracer_level=0) logdir = os.path.join(logdir, suid) start = time.time() with profiler.Profile(logdir, options): for _ in range(num_iters_xprof): func() total_time = time.time() - start us_per_example = float("{0:.3f}".format(total_time * 1e6 / num_iters_xprof)) return logdir, us_per_example
1,632
38.829268
80
py
keras
keras-master/keras/benchmarks/layer_benchmarks/layer_benchmarks_test_base.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Benchmark base to run and report Keras layers benchmark results.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import time from keras.benchmarks.layer_benchmarks import run_xprof class LayerBenchmarksBase(tf.test.Benchmark): """Run and report benchmark results. The first run is without any profiling to purly measure running time. Second run is with xprof but no python trace. Third run is with xprof and python trace. Note: xprof runs fewer iterations, and the maximum iterations is 100. """ def run_report(self, func, num_iters, metadata=None): """Run and report benchmark results for different settings.""" # 0. Warm up. func() # 1. Run without profiling. start = time.time() for _ in range(num_iters): func() total_time = time.time() - start us_mean_time = total_time * 1e6 / num_iters metrics = [ {"name": "examples_per_sec", "value": float("{0:.3f}".format(num_iters / total_time))}, {"name": "us_per_example", "value": float("{0:.3f}".format(us_mean_time))}] # 2. Run with xprof with no python trace. num_iters_xprof = min(100, num_iters) xprof_link, us_per_example = run_xprof.run_with_xprof( func, num_iters_xprof, False) # This xprof link will appear in the benchmark dashboard. extras = { "xprof_link": xprof_link, "us_per_example_with_xprof": us_per_example } # 3. Run with xprof and python trace. xprof_link, us_per_example = run_xprof.run_with_xprof( func, num_iters_xprof, True) extras["python_trace_xprof_link"] = xprof_link extras["us_per_example_with_xprof_and_python"] = us_per_example if metadata: extras.update(metadata) self.report_benchmark( iters=num_iters, wall_time=us_mean_time, extras=extras, metrics=metrics)
2,620
33.486842
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/vgg_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on VGG19.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_vgg19(self): app = tf.keras.applications.VGG19 save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,510
32.577778
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/efficientnet_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on EfficientNetB7.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_efficient_net_b7(self): app = tf.keras.applications.EfficientNetB7 save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,538
33.977273
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/xception_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on Xception.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_xception(self): app = tf.keras.applications.Xception save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,519
32.777778
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/mobilenet_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on MobileNetV2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_mobilenet_v2(self): app = tf.keras.applications.MobileNetV2 save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,528
33.75
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/saved_model_benchmark_util.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for saved model benchmarks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import tempfile import time def save_and_load_benchmark(app): """Util for saved model benchmarks.""" trials = 3 model = app(weights=None) model_name = app.__name__ tmp_dir = tf.compat.v1.test.get_temp_dir() tf.io.gfile.makedirs(tmp_dir) save_dir = tempfile.mkdtemp(dir=tmp_dir) total_save_time = 0 total_load_time = 0 # Run one untimed iteration of saving/loading. model.save(save_dir, save_format='tf') tf.keras.models.load_model(save_dir) for _ in range(trials): start_time = time.time() model.save(save_dir, save_format='tf') total_save_time += time.time() - start_time start_time = time.time() tf.keras.models.load_model(save_dir) total_load_time += time.time() - start_time save_result = { 'iters': trials, 'wall_time': total_save_time / trials, 'name': '{}.save'.format(model_name) } load_result = { 'iters': trials, 'wall_time': total_load_time / trials, 'name': '{}.load'.format(model_name) } tf.compat.v1.gfile.DeleteRecursively(save_dir) return save_result, load_result
1,954
27.75
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/resnet152_v2_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on ResNet152V2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_resnet152_v2(self): app = tf.keras.applications.ResNet152V2 save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,529
33
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/densenet_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on DenseNet201.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_densenet_201(self): app = tf.keras.applications.DenseNet201 save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,528
33.75
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/nasnet_large_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on NASNetLarge.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_nasnet_large(self): app = tf.keras.applications.NASNetLarge save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,528
33.75
80
py
keras
keras-master/keras/benchmarks/saved_model_benchmarks/inception_resnet_v2_benchmark_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for saved model on InceptionResNetV2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.benchmarks.saved_model_benchmarks import saved_model_benchmark_util class BenchmarkSaveApplications(tf.test.Benchmark): def benchmark_save_and_load_inception_resnet_v2(self): app = tf.keras.applications.InceptionResNetV2 save_result, load_result = ( saved_model_benchmark_util.save_and_load_benchmark(app)) self.report_benchmark( iters=save_result['iters'], wall_time=save_result['wall_time'], name=save_result['name']) self.report_benchmark( iters=load_result['iters'], wall_time=load_result['wall_time'], name=load_result['name']) if __name__ == '__main__': tf.test.main()
1,548
33.422222
80
py
keras
keras-master/keras/legacy_tf_layers/pooling.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=g-classes-have-attributes """Contains the pooling layer classes and their functional aliases.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from keras import layers as keras_layers from keras.legacy_tf_layers import base from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export @keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling1D']) @tf_export(v1=['layers.AveragePooling1D']) class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer): """Average Pooling layer for 1D inputs. Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.AveragePooling1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python pooling = tf.compat.v1.layers.AveragePooling1D(pool_size=2, strides=2) ``` After: ```python pooling = tf.keras.layers.AveragePooling1D(pool_size=2, strides=2) ``` @end_compatibility """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling1D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.average_pooling1d']) @tf_export(v1=['layers.average_pooling1d']) def average_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average Pooling layer for 1D inputs. Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.AveragePooling1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.average_pooling1d(x, pool_size=2, strides=2) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.AveragePooling1D(pool_size=2, strides=2)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.average_pooling1d` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.AveragePooling1D` instead.') layer = AveragePooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling1D']) @tf_export(v1=['layers.MaxPooling1D']) class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer): """Max Pooling layer for 1D inputs. Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.MaxPooling1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python pooling = tf.compat.v1.layers.MaxPooling1D(pool_size=2, strides=2) ``` After: ```python pooling = tf.keras.layers.MaxPooling1D(pool_size=2, strides=2) ``` @end_compatibility """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling1D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.max_pooling1d']) @tf_export(v1=['layers.max_pooling1d']) def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max Pooling layer for 1D inputs. Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.MaxPooling1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.max_pooling1d(x, pool_size=2, strides=2) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.MaxPooling1D(pool_size=2, strides=2)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.max_pooling1d` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.MaxPooling1D` instead.') layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling2D']) @tf_export(v1=['layers.AveragePooling2D']) class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer): """Average pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.AveragePooling2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python pooling = tf.compat.v1.layers.AveragePooling2D(pool_size=2, strides=2) ``` After: ```python pooling = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2) ``` @end_compatibility """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.average_pooling2d']) @tf_export(v1=['layers.average_pooling2d']) def average_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average pooling layer for 2D inputs (e.g. images). Args: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.AveragePooling2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.average_pooling2d(x, pool_size=2, strides=2) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.average_pooling2d` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.AveragePooling2D` instead.') layer = AveragePooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling2D']) @tf_export(v1=['layers.MaxPooling2D']) class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer): """Max pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.MaxPooling2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python pooling = tf.compat.v1.layers.MaxPooling2D(pool_size=2, strides=2) ``` After: ```python pooling = tf.keras.layers.MaxPooling2D(pool_size=2, strides=2) ``` @end_compatibility """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.max_pooling2d']) @tf_export(v1=['layers.max_pooling2d']) def max_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max pooling layer for 2D inputs (e.g. images). Args: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.MaxPooling2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.max_pooling2d(x, pool_size=2, strides=2) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.MaxPooling2D(pool_size=2, strides=2)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.max_pooling2d` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.MaxPooling2D` instead.') layer = MaxPooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling3D']) @tf_export(v1=['layers.AveragePooling3D']) class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer): """Average pooling layer for 3D inputs (e.g. volumes). Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.AveragePooling3D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python pooling = tf.compat.v1.layers.AveragePooling3D(pool_size=2, strides=2) ``` After: ```python pooling = tf.keras.layers.AveragePooling3D(pool_size=2, strides=2) ``` @end_compatibility """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling3D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.average_pooling3d']) @tf_export(v1=['layers.average_pooling3d']) def average_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average pooling layer for 3D inputs (e.g. volumes). Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.AveragePooling3D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.average_pooling3d(x, pool_size=2, strides=2) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.AveragePooling3D(pool_size=2, strides=2)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.average_pooling3d` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.AveragePooling3D` instead.') layer = AveragePooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling3D']) @tf_export(v1=['layers.MaxPooling3D']) class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer): """Max pooling layer for 3D inputs (e.g. volumes). Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.MaxPooling3D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python pooling = tf.compat.v1.layers.MaxPooling3D(pool_size=2, strides=2) ``` After: ```python pooling = tf.keras.layers.MaxPooling3D(pool_size=2, strides=2) ``` @end_compatibility """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling3D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.max_pooling3d']) @tf_export(v1=['layers.max_pooling3d']) def max_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max pooling layer for 3D inputs (e.g. volumes). Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.MaxPooling3D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.max_pooling3d(x, pool_size=2, strides=2) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.MaxPooling3D(pool_size=2, strides=2)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.max_pooling3d` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.MaxPooling3D` instead.') layer = MaxPooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) # Aliases AvgPool2D = AveragePooling2D MaxPool2D = MaxPooling2D max_pool2d = max_pooling2d avg_pool2d = average_pooling2d
29,521
33.978673
80
py
keras
keras-master/keras/legacy_tf_layers/base.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=g-classes-have-attributes """Contains the base Layer class, from which all layers inherit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import copy import warnings from keras import backend from keras.engine import base_layer from keras.engine import base_layer_utils from keras.legacy_tf_layers import variable_scope_shim from keras.mixed_precision import policy from keras.utils import tf_contextlib from tensorflow.python.ops import variable_scope as vs from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export # Avoid breaking users who directly import this symbol from this file. # TODO(fchollet): remove this. InputSpec = base_layer.InputSpec # pylint: disable=invalid-name _KERAS_STYLE_SCOPE = False @keras_export( v1=['keras.__internal__.legacy.layers.experimental.keras_style_scope']) @tf_export(v1=['layers.experimental.keras_style_scope']) @tf_contextlib.contextmanager def keras_style_scope(): """Use Keras-style variable management. All tf.layers and tf RNN cells created in this scope use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this scope is to allow users of existing layers to slowly transition to a Keras layers API without breaking existing functionality. One example of this is when using TensorFlow's RNN classes with Keras Models or Networks. Because Keras models do not properly set variable scopes, users of RNNs may either accidentally share scopes between two different models, or get errors about variables that already exist. Example: ```python class RNNModel(tf.keras.Model): def __init__(self, name): super(RNNModel, self).__init__(name=name) self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell( [tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)]) def call(self, input, state): return self.rnn(input, state) model_1 = RNNModel("model_1") model_2 = RNNModel("model_2") # OK output_1, next_state_1 = model_1(input, state) # Raises an error about trying to create an already existing variable. output_2, next_state_2 = model_2(input, state) ``` The solution is to wrap the model construction and execution in a keras-style scope: ```python with keras_style_scope(): model_1 = RNNModel("model_1") model_2 = RNNModel("model_2") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` Yields: A keras layer style scope. """ global _KERAS_STYLE_SCOPE stack = _KERAS_STYLE_SCOPE _KERAS_STYLE_SCOPE = True try: yield finally: _KERAS_STYLE_SCOPE = stack @keras_export( v1=['keras.__internal__.legacy.layers.experimental.set_keras_style']) @tf_export(v1=['layers.experimental.set_keras_style']) def set_keras_style(): """Use Keras-style variable management. All tf.layers and tf RNN cells created after keras style ha been enabled use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this function is to allow users of existing layers to slowly transition to Keras layers API without breaking existing functionality. For more details, see the documentation for `keras_style_scope`. Note, once keras style has been set, it is set globally for the entire program and cannot be unset. Example: ```python set_keras_style() model_1 = RNNModel(name="model_1") model_2 = RNNModel(name="model_2") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` """ global _KERAS_STYLE_SCOPE _KERAS_STYLE_SCOPE = True def _is_in_keras_style_scope(): global _KERAS_STYLE_SCOPE return _KERAS_STYLE_SCOPE @keras_export(v1=['keras.__internal__.legacy.layers.Layer']) @tf_export(v1=['layers.Layer']) class Layer(base_layer.Layer): """Base layer class. It is considered legacy, and we recommend the use of `tf.keras.layers.Layer` instead. Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). Read-only properties: name: The name of the layer (string). dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). trainable_variables: List of trainable variables. non_trainable_variables: List of non-trainable variables. variables: List of all variables of this layer, trainable and non-trainable. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). Mutable properties: trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. """ def __init__(self, trainable=True, name=None, dtype=None, **kwargs): # For backwards compatibility, legacy layers do not use `ResourceVariable` # by default. self._use_resource_variables = False scope = kwargs.pop('_scope', None) self._reuse = kwargs.pop('_reuse', None) # Avoid an incorrect lint error self._trainable_weights = [] self.built = False if dtype is None: # Indicates to infer dtype from inputs. When the V2 dtype behavior is # enabled, Keras layers default their dtype to floatx instead, so we pass # an "_infer" policy to keep the old V1 behavior. dtype = policy.Policy('_infer') if 'autocast' not in kwargs: kwargs['autocast'] = False # Mark that legacy layers should not be instrumented as Keras usage self._disable_keras_instrumentation = True super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype, **kwargs) if _is_in_keras_style_scope(): if scope is not None: raise ValueError( 'scope argument not allowed when keras style layers are enabled, ' 'but saw: {}'.format(scope)) if self._reuse is not None: raise ValueError( 'reuse argument not allowed when keras style layers are enabled, ' 'but saw: {}'.format(self._reuse)) self._keras_style = True else: self._keras_style = False self._call_has_scope_arg = 'scope' in self._call_fn_args if scope: with tf.compat.v1.variable_scope(scope) as captured_scope: self._scope = captured_scope else: self._scope = None self._current_scope = None # We no longer track graph in tf.layers layers. This property is only kept to # maintain API backward compatibility. @property def graph(self): warnings.warn('`Layer.graph` is deprecated and ' 'will be removed in a future version. ' 'Please stop using this property because tf.layers layers no ' 'longer track their graph.') if tf.executing_eagerly(): raise RuntimeError('Layer.graph not supported when executing eagerly.') return None def _init_set_name(self, name): # Determine layer name (non-unique). if isinstance(name, tf.compat.v1.VariableScope): base_name = name.name self._name, _ = self._make_unique_name() else: base_name = name self._name = name if not name: self._name, base_name = self._make_unique_name() self._base_name = base_name def _make_unique_name(self, name_uid_map=None, avoid_names=None, namespace='', zero_based=False): base_name = base_layer.to_snake_case(self.__class__.__name__) name = backend.unique_object_name( base_name, name_uid_map=name_uid_map, avoid_names=avoid_names, namespace=namespace, zero_based=zero_based) return (name, base_name) @property def scope_name(self): if not self._scope: raise ValueError('No name available for layer scope because the layer "' + self._name + '" has not been used yet. The scope name ' + ' is determined the first time the layer instance is ' + 'called. You must therefore call the layer before ' + 'querying `scope_name`.') return self._scope.name def add_loss(self, losses, inputs=None): previous_losses_length = len(self._losses) previous_callable_losses_length = len(self._callable_losses) super(Layer, self).add_loss(losses, inputs=inputs) if not tf.executing_eagerly(): # TODO(fchollet): deprecate collection below. new_losses = self._losses[previous_losses_length:] new_callable_losses = self._callable_losses[ previous_callable_losses_length:] for regularizer in new_callable_losses: loss_tensor = regularizer() if loss_tensor is not None: new_losses.append(loss_tensor) _add_elements_to_collection( new_losses, tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) def _name_scope(self): # pylint: disable=method-hidden """Determines op naming for the Layer.""" if self._keras_style: return super(Layer, self)._name_scope() return self._current_scope.original_name_scope def _set_scope(self, scope=None): if self._scope is None: # If constructed with _scope=None, lazy setting of scope. if self._reuse: with tf.compat.v1.variable_scope( scope if scope is not None else self._base_name) as captured_scope: self._scope = captured_scope else: with tf.compat.v1.variable_scope( scope, default_name=self._base_name) as captured_scope: self._scope = captured_scope def add_weight(self, name, shape, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, use_resource=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE, partitioner=None, **kwargs): """Adds a new variable to the layer, or gets an existing one; returns it. Args: name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: initializer instance (callable). regularizer: regularizer instance (callable). trainable: whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean, stddev). Note, if the current variable scope is marked as non-trainable then this parameter is ignored and any added variables are also marked as non-trainable. `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. constraint: constraint instance (callable). use_resource: Whether to use `ResourceVariable`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. partitioner: (optional) partitioner instance (callable). If provided, when the requested variable is created it will be split into multiple partitions according to `partitioner`. In this case, an instance of `PartitionedVariable` is returned. Available partitioners include `tf.compat.v1.fixed_size_partitioner` and `tf.compat.v1.variable_axis_size_partitioner`. For more details, see the documentation of `tf.compat.v1.get_variable` and the "Variable Partitioners and Sharding" section of the API guide. **kwargs: Additional keyword arguments. Returns: The created variable. Usually either a `Variable` or `ResourceVariable` instance. If `partitioner` is not `None`, a `PartitionedVariable` instance is returned. Raises: RuntimeError: If called with partitioned variable regularization and eager execution is enabled. ValueError: When trainable has been set to True with synchronization set as `ON_READ`. """ for kwarg in kwargs: if kwarg != 'experimental_autocast': raise TypeError('Unknown keyword argument:', kwarg) if self._keras_style: return super(Layer, self).add_weight( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable and self.trainable, constraint=constraint, use_resource=use_resource, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE, partitioner=partitioner, **kwargs) if synchronization == tf.VariableSynchronization.ON_READ: if trainable: raise ValueError( 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.') else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True def _should_add_regularizer(variable, existing_variable_set): if base_layer_utils.is_split_variable(variable): for var in variable: if var in existing_variable_set: return False return True else: return variable not in existing_variable_set init_graph = None if not tf.executing_eagerly(): default_graph = tf.compat.v1.get_default_graph() if default_graph.building_function: with tf.init_scope(): # Retrieve the variables from the graph into which variables # will be lifted; if initialization ops will be lifted into # the eager context, then there is nothing to retrieve, since variable # collections are not supported when eager execution is enabled. if not tf.executing_eagerly(): init_graph = tf.compat.v1.get_default_graph() existing_variables = set(tf.compat.v1.global_variables()) else: # Initialization ops will not be lifted out of the default graph. init_graph = default_graph existing_variables = set(tf.compat.v1.global_variables()) if dtype is None: dtype = self.dtype or tf.float32 self._set_scope(None) reuse = self.built or self._reuse prev_len_trainable = len(self._trainable_weights) with tf.compat.v1.variable_scope( self._scope, reuse=reuse, auxiliary_name_scope=False) as scope: self._current_scope = scope with backend.name_scope(self._name_scope()): # pylint: disable=not-callable use_resource = (use_resource or self._use_resource_variables or scope.use_resource) if initializer is None: initializer = scope.initializer variable = super(Layer, self).add_weight( name, shape, dtype=tf.as_dtype(dtype), initializer=initializer, trainable=trainable and self.trainable, constraint=constraint, partitioner=partitioner, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, getter=tf.compat.v1.get_variable, **kwargs) if regularizer: if (tf.compat.v1.executing_eagerly_outside_functions() or _should_add_regularizer(variable, existing_variables)): self._handle_weight_regularization(name, variable, regularizer) var_store = vs._get_default_variable_store() # pylint: disable=protected-access # When the shim to get variable scope working in TF2 is used, # We need to explicitly make the shim track the regularization # losses as the collections will not be accessible. if hasattr(var_store, 'add_regularizer'): var_store.add_regularizer(variable, regularizer) if init_graph is not None: # Handle edge case where a custom getter has overridden `trainable`. # There is one known occurrence of this, in unit test # testBasicRNNCellNotTrainable in # contrib.rnn.python.kernel_tests.core_rnn_cell_test with init_graph.as_default(): trainable_variables = tf.compat.v1.trainable_variables() if (trainable and self.trainable and variable not in trainable_variables): # A custom getter / variable scope overrode the trainable flag. extra_trainable_vars = self._trainable_weights[prev_len_trainable:] self._trainable_weights = self._trainable_weights[ :prev_len_trainable] self._non_trainable_weights += extra_trainable_vars return variable def __call__(self, inputs, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. Args: inputs: input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. **Note**: kwarg `scope` is reserved for use by the layer. Returns: Output tensor(s). Note: - If the layer's `call` method takes a `scope` keyword argument, this argument will be automatically set to the current variable scope. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support. Raises: ValueError: if the layer's `call` method returns None (an invalid value). """ scope = kwargs.pop('scope', None) if self._keras_style: if scope is not None: raise ValueError( 'scope argument not allowed when keras style layers are enabled, ' 'but saw: {}'.format(scope)) return super(Layer, self).__call__(inputs, *args, **kwargs) self._set_scope(scope) if self.built: try: # Some classes which inherit from Layer do not use its constructor, so # rather than initializing to None we check for an AttributeError. scope_context_manager = self._always_reuse_variable_scope # pylint: disable=access-member-before-definition except AttributeError: scope_context_manager = None if scope_context_manager is None: # From this point we will always set reuse=True, so create a "final" # variable scope with this setting. We avoid re-creating variable scopes # after this point as an optimization. scope_context_manager = tf.compat.v1.variable_scope( self._scope, reuse=True, auxiliary_name_scope=False) # Do not cache variable scopes if Eager mode is enabled. If Eager mode # is enabled then we don't want to reuse scopes because the cached scope # might be from a FuncGraph or Eager scope we are no longer in. if not tf.compat.v1.executing_eagerly_outside_functions(): self._always_reuse_variable_scope = scope_context_manager else: scope_context_manager = tf.compat.v1.variable_scope( self._scope, reuse=self._reuse, auxiliary_name_scope=False) with scope_context_manager as scope: self._current_scope = scope try: call_has_scope_arg = self._call_has_scope_arg except AttributeError: self._call_fn_args = variable_scope_shim.fn_args(self.call) self._call_has_scope_arg = 'scope' in self._call_fn_args call_has_scope_arg = self._call_has_scope_arg if call_has_scope_arg: kwargs['scope'] = scope # Actually call layer outputs = super(Layer, self).__call__(inputs, *args, **kwargs) if not tf.executing_eagerly(): # Update global default collections. _add_elements_to_collection(self.updates, tf.compat.v1.GraphKeys.UPDATE_OPS) return outputs def __deepcopy__(self, memo): no_copy = set(['_graph', '_thread_local', '_metrics_lock']) shallow_copy = set(['_scope', '_always_reuse_variable_scope']) cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if k in no_copy: setattr(result, k, v) elif k in shallow_copy: setattr(result, k, copy.copy(v)) elif base_layer.is_tensor_or_tensor_list(v): setattr(result, k, v) else: setattr(result, k, copy.deepcopy(v, memo)) return result def __setattr__(self, value, name): # By-pass the automatic dependency tracking performed by the parent Layer. super(tf.__internal__.tracking.Trackable, self).__setattr__(value, name) # pylint: disable=bad-super-call @property def _is_legacy_layer(self): """Used by keras to check compatibility. This should not be overridden.""" return True def _add_elements_to_collection(elements, collection_list): if tf.executing_eagerly(): raise RuntimeError('Using collections from Layers not supported in Eager ' 'mode. Tried to add %s to %s' % (elements, collection_list)) elements = tf.nest.flatten(elements) collection_list = tf.nest.flatten(collection_list) for name in collection_list: collection = tf.compat.v1.get_collection_ref(name) collection_set = {id(e) for e in collection} for element in elements: if id(element) not in collection_set: collection.append(element)
24,064
38.193811
116
py
keras
keras-master/keras/legacy_tf_layers/pooling_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.pooling.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from tensorflow.python.framework import test_util from keras.legacy_tf_layers import pooling as pooling_layers class PoolingTest(tf.test.TestCase): def testInvalidDataFormat(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'data_format'): pooling_layers.max_pooling2d(images, 3, strides=2, data_format='invalid') def testInvalidStrides(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'strides'): pooling_layers.max_pooling2d(images, 3, strides=(1, 2, 3)) with self.assertRaisesRegex(ValueError, 'strides'): pooling_layers.max_pooling2d(images, 3, strides=None) def testInvalidPoolSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'pool_size'): pooling_layers.max_pooling2d(images, (1, 2, 3), strides=2) with self.assertRaisesRegex(ValueError, 'pool_size'): pooling_layers.max_pooling2d(images, None, strides=2) def testCreateMaxPooling2D(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = pooling_layers.MaxPooling2D([2, 2], strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4]) def testCreateAveragePooling2D(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = pooling_layers.AveragePooling2D([2, 2], strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4]) @test_util.run_deprecated_v1 def testCreateMaxPooling2DChannelsFirst(self): height, width = 7, 9 images = tf.random.uniform((5, 2, height, width)) layer = pooling_layers.MaxPooling2D([2, 2], strides=1, data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 2, 6, 8]) @test_util.run_deprecated_v1 def testCreateAveragePooling2DChannelsFirst(self): height, width = 5, 6 images = tf.random.uniform((3, 4, height, width)) layer = pooling_layers.AveragePooling2D((2, 2), strides=(1, 1), padding='valid', data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [3, 4, 4, 5]) @test_util.run_deprecated_v1 def testCreateAveragePooling2DChannelsFirstWithNoneBatch(self): height, width = 5, 6 images = tf.compat.v1.placeholder(dtype='float32', shape=(None, 4, height, width)) layer = pooling_layers.AveragePooling2D((2, 2), strides=(1, 1), padding='valid', data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [None, 4, 4, 5]) def testCreateMaxPooling1D(self): width = 7 channels = 3 images = tf.random.uniform((5, width, channels)) layer = pooling_layers.MaxPooling1D(2, strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, width // 2, channels]) def testCreateAveragePooling1D(self): width = 7 channels = 3 images = tf.random.uniform((5, width, channels)) layer = pooling_layers.AveragePooling1D(2, strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, width // 2, channels]) def testCreateMaxPooling1DChannelsFirst(self): width = 7 channels = 3 images = tf.random.uniform((5, channels, width)) layer = pooling_layers.MaxPooling1D( 2, strides=2, data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, channels, width // 2]) def testCreateAveragePooling1DChannelsFirst(self): width = 7 channels = 3 images = tf.random.uniform((5, channels, width)) layer = pooling_layers.AveragePooling1D( 2, strides=2, data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, channels, width // 2]) def testCreateMaxPooling3D(self): depth, height, width = 6, 7, 9 images = tf.random.uniform((5, depth, height, width, 4)) layer = pooling_layers.MaxPooling3D([2, 2, 2], strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 4, 4]) def testCreateAveragePooling3D(self): depth, height, width = 6, 7, 9 images = tf.random.uniform((5, depth, height, width, 4)) layer = pooling_layers.AveragePooling3D([2, 2, 2], strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 4, 4]) def testMaxPooling3DChannelsFirst(self): depth, height, width = 6, 7, 9 images = tf.random.uniform((5, 2, depth, height, width)) layer = pooling_layers.MaxPooling3D( [2, 2, 2], strides=2, data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3, 4]) def testAveragePooling3DChannelsFirst(self): depth, height, width = 6, 7, 9 images = tf.random.uniform((5, 2, depth, height, width)) layer = pooling_layers.AveragePooling3D( [2, 2, 2], strides=2, data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3, 4]) def testCreateMaxPooling2DIntegerPoolSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = pooling_layers.MaxPooling2D(2, strides=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4]) def testMaxPooling2DPaddingSame(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4), seed=1) layer = pooling_layers.MaxPooling2D( images.get_shape()[1:3], strides=2, padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 4, 5, 4]) def testCreatePooling2DWithStrides(self): height, width = 6, 8 # Test strides tuple images = tf.random.uniform((5, height, width, 3), seed=1) layer = pooling_layers.MaxPooling2D([2, 2], strides=(2, 2), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 3]) # Test strides integer layer = pooling_layers.MaxPooling2D([2, 2], strides=2, padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 3]) # Test unequal strides layer = pooling_layers.MaxPooling2D([2, 2], strides=(2, 1), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width, 3]) if __name__ == '__main__': tf.test.main()
8,336
39.275362
80
py
keras
keras-master/keras/legacy_tf_layers/convolutional.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=g-classes-have-attributes """Contains the convolutional layer classes and their functional aliases.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import warnings from keras import layers as keras_layers from keras.legacy_tf_layers import base from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export @keras_export(v1=['keras.__internal__.legacy.layers.Conv1D']) @tf_export(v1=['layers.Conv1D']) class Conv1D(keras_layers.Conv1D, base.Layer): """1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: An integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.Conv1D(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.Conv1D(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv1D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.conv1d']) @tf_export(v1=['layers.conv1d']) def conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for 1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Args: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: An integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.conv1d(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Conv1D(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.conv1d` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.Conv1D` instead.') layer = Conv1D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.Conv2D']) @tf_export(v1=['layers.Conv2D']) class Conv2D(keras_layers.Conv2D, base.Layer): """2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.Conv2D(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.Conv2D(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv2D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.conv2d']) @tf_export(v1=['layers.conv2d']) def conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the 2D convolution layer. This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Args: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.conv2d(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Conv2D(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.conv2d` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.Conv2D` instead.') layer = Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.Conv3D']) @tf_export(v1=['layers.Conv3D']) class Conv3D(keras_layers.Conv3D, base.Layer): """3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. dilation_rate: An integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.Conv3D(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.Conv3D(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv3D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.conv3d']) @tf_export(v1=['layers.conv3d']) def conv3d(inputs, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the 3D convolution layer. This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Args: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. dilation_rate: An integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.conv3d(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Conv3D(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.conv3d` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.Conv3D` instead.') layer = Conv3D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.SeparableConv1D']) @tf_export(v1=['layers.SeparableConv1D']) class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer): """Depthwise separable 1D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.SeparableConv1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.SeparableConv1D(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.SeparableConv1D(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(SeparableConv1D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.SeparableConv2D']) @tf_export(v1=['layers.SeparableConv2D']) class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer): """Depthwise separable 2D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.SeparableConv2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.SeparableConv2D(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.SeparableConv2D(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(SeparableConv2D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.separable_conv1d']) @tf_export(v1=['layers.separable_conv1d']) def separable_conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the depthwise separable 1D convolution layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.SeparableConv1D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.separable_conv1d(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.SeparableConv1D(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.separable_conv1d` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.SeparableConv1D` instead.') layer = SeparableConv1D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.separable_conv2d']) @tf_export(v1=['layers.separable_conv2d']) def separable_conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the depthwise separable 2D convolution layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.SeparableConv2D`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.separable_conv2d(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.SeparableConv2D(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.separable_conv2d` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.SeparableConv2D` instead.') layer = SeparableConv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.Conv2DTranspose']) @tf_export(v1=['layers.Conv2DTranspose']) class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer): """Transposed 2D convolution layer (sometimes called 2D Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2DTranspose`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.Conv2DTranspose(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.Conv2DTranspose(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv2DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.conv2d_transpose']) @tf_export(v1=['layers.conv2d_transpose']) def conv2d_transpose(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for transposed 2D convolution layer. The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. activation: Activation function. Set it to `None` to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If `None`, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2DTranspose`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.conv2d_transpose(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Conv2DTranspose(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.conv2d_transpose` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.Conv2DTranspose` instead.') layer = Conv2DTranspose( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.Conv3DTranspose']) @tf_export(v1=['layers.Conv3DTranspose']) class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer): """Transposed 3D convolution layer (sometimes called 3D Deconvolution). Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. activation: Activation function. Set it to `None` to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If `None`, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3DTranspose`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python conv = tf.compat.v1.layers.Conv3DTranspose(filters=3, kernel_size=3) ``` After: ```python conv = tf.keras.layers.Conv3DTranspose(filters=3, kernels_size=3) ``` @end_compatibility """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Conv3DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.conv3d_transpose']) @tf_export(v1=['layers.conv3d_transpose']) def conv3d_transpose(inputs, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for transposed 3D convolution layer. Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 3 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 3 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3DTranspose`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.conv3d_transpose(x, filters=3, kernel_size=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Conv3DTranspose(filters=3, kernels_size=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.conv3d_transpose` is deprecated and ' 'will be removed in a future version. ' 'Please Use `tf.keras.layers.Conv3DTranspose` instead.') layer = Conv3DTranspose( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs) # Aliases Convolution1D = Conv1D Convolution2D = Conv2D Convolution3D = Conv3D SeparableConvolution2D = SeparableConv2D Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose convolution1d = conv1d convolution2d = conv2d convolution3d = conv3d separable_convolution2d = separable_conv2d convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose
80,962
40.733505
80
py
keras
keras-master/keras/legacy_tf_layers/core.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=g-classes-have-attributes """Contains the core layers: Dense, Dropout. Also contains their functional aliases. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import warnings from keras import layers as keras_layers from keras.legacy_tf_layers import base from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export @keras_export(v1=['keras.__internal__.legacy.layers.Dense']) @tf_export(v1=['layers.Dense']) class Dense(keras_layers.Dense, base.Layer): """Densely-connected layer class. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` Where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Args: units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.compat.v1.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. _reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Properties: units: Python integer, dimensionality of the output space. activation: Activation function (callable). use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer instance (or name) for the kernel matrix. bias_initializer: Initializer instance (or name) for the bias. kernel_regularizer: Regularizer instance for the kernel matrix (callable) bias_regularizer: Regularizer instance for the bias (callable). activity_regularizer: Regularizer instance for the output (callable) kernel_constraint: Constraint function for the kernel matrix. bias_constraint: Constraint function for the bias. kernel: Weight matrix (TensorFlow variable or tensor). bias: Bias vector, if applicable (TensorFlow variable or tensor). @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Dense`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python dense = tf.compat.v1.layers.Dense(units=3) ``` After: ```python dense = tf.keras.layers.Dense(units=3) ``` @end_compatibility """ def __init__(self, units, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(Dense, self).__init__(units=units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) @keras_export(v1=['keras.__internal__.legacy.layers.dense']) @tf_export(v1=['layers.dense']) def dense( inputs, units, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.compat.v1.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): """Functional interface for the densely-connected layer. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Args: inputs: Tensor input. units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.compat.v1.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor the same shape as `inputs` except the last dimension is of size `units`. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Dense`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.dense(x, units=3) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28,)) y = tf.keras.layers.Dense(units=3)(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.dense` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.Dense` instead.') layer = Dense(units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _scope=name, _reuse=reuse) return layer.apply(inputs) @keras_export(v1=['keras.__internal__.legacy.layers.Dropout']) @tf_export(v1=['layers.Dropout']) class Dropout(keras_layers.Dropout, base.Layer): """Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. Args: rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)`, and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=[batch_size, 1, features]`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed`. for behavior. name: The name of the layer (string). @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Dropout`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python dropout = tf.compat.v1.layers.Dropout() ``` After: ```python dropout = tf.keras.layers.Dropout() ``` @end_compatibility """ def __init__(self, rate=0.5, noise_shape=None, seed=None, name=None, **kwargs): super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, name=name, **kwargs) def call(self, inputs, training=False): return super(Dropout, self).call(inputs, training=training) @keras_export(v1=['keras.__internal__.legacy.layers.dropout']) @tf_export(v1=['layers.dropout']) def dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None): """Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. Args: inputs: Tensor input. rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)`, and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=[batch_size, 1, features]`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (apply dropout) or in inference mode (return the input untouched). name: The name of the layer (string). Returns: Output tensor. Raises: ValueError: if eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Dropout`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.dropout(x) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Dropout()(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.dropout` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.Dropout` instead.') layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name) return layer.apply(inputs, training=training) @keras_export(v1=['keras.__internal__.legacy.layers.Flatten']) @tf_export(v1=['layers.Flatten']) class Flatten(keras_layers.Flatten, base.Layer): """Flattens an input tensor while preserving the batch axis (axis 0). Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = Flatten()(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = Flatten()(x) # now `y` has shape `(None, None)` ``` @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Flatten`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python flatten = tf.compat.v1.layers.Flatten() ``` After: ```python flatten = tf.keras.layers.Flatten() ``` @end_compatibility """ pass @keras_export(v1=['keras.__internal__.legacy.layers.flatten']) @tf_export(v1=['layers.flatten']) def flatten(inputs, name=None, data_format='channels_last'): """Flattens an input tensor while preserving the batch axis (axis 0). Args: inputs: Tensor input. name: The name of the layer (string). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. Returns: Reshaped tensor. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = flatten(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = flatten(x) # now `y` has shape `(None, None)` ``` @compatibility(TF2) This API is not compatible with eager execution or`tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.Flatten`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python y = tf.compat.v1.layers.flatten(x) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input((28, 28, 1)) y = tf.keras.layers.Flatten()(x) model = tf.keras.Model(x, y) ``` @end_compatibility """ warnings.warn('`tf.layers.flatten` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.Flatten` instead.') layer = Flatten(name=name, data_format=data_format) return layer.apply(inputs) # Aliases FullyConnected = Dense fully_connected = dense
18,386
34.089695
80
py
keras
keras-master/keras/legacy_tf_layers/normalization_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.normalization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import os import numpy as np from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.framework import test_util from keras.legacy_tf_layers import convolutional as conv_layers from keras.legacy_tf_layers import normalization as normalization_layers @test_util.run_v1_only('b/120545219') class BNTest(tf.test.TestCase): def _simple_model(self, image, fused, freeze_mode): output_channels, kernel_size = 2, 3 conv = conv_layers.conv2d( image, output_channels, kernel_size, use_bias=False, kernel_initializer=tf.compat.v1.ones_initializer()) bn_layer = normalization_layers.BatchNormalization(fused=fused) bn_layer._bessels_correction_test_only = False training = not freeze_mode bn = bn_layer.apply(conv, training=training) loss = tf.reduce_sum(tf.abs(bn)) optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.01) if not freeze_mode: update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss) else: train_op = optimizer.minimize(loss) saver = tf.compat.v1.train.Saver(write_version=saver_pb2.SaverDef.V2) return loss, train_op, saver def _train(self, checkpoint_path, shape, use_gpu, is_fused, restore=False, freeze_mode=False, dtype=tf.float32): tf.compat.v1.reset_default_graph() graph = tf.compat.v1.get_default_graph() with self.session(graph=graph, use_gpu=use_gpu) as sess: image = tf.compat.v1.placeholder(dtype=dtype, shape=shape) loss, train_op, saver = self._simple_model(image, is_fused, freeze_mode) if restore: saver.restore(sess, checkpoint_path) else: self.evaluate(tf.compat.v1.global_variables_initializer()) np.random.seed(0) for _ in range(2): image_val = np.random.rand(*shape).astype(dtype.as_numpy_dtype) sess.run([loss, train_op], feed_dict={image: image_val}) if restore: all_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES) all_vars_values = [var.eval() for var in all_vars] return all_vars_values else: saver.save(sess, checkpoint_path) def _infer(self, checkpoint_path, image_val, shape, use_gpu, is_fused): dtype = image_val.dtype tf.compat.v1.reset_default_graph() graph = tf.compat.v1.get_default_graph() with self.session(graph=graph, use_gpu=use_gpu) as sess: image = tf.compat.v1.placeholder(dtype=dtype, shape=shape) loss, _, saver = self._simple_model(image, is_fused, True) saver.restore(sess, checkpoint_path) loss_val = sess.run(loss, feed_dict={image: image_val}) return loss_val def _trainEvalSequence(self, dtype, train1_use_gpu, train2_use_gpu, infer_use_gpu): batch, height, width, input_channels = 2, 4, 5, 3 shape = [batch, height, width, input_channels] # Not all characters in a dtype string representation are allowed in # filenames in all operating systems. This map will sanitize these. dtype_to_valid_fn = { tf.float16: 'float16', tf.float32: 'float32', } checkpoint = os.path.join( self.get_temp_dir(), 'cp_%s_%s_%s_%s' % ( dtype_to_valid_fn[dtype], train1_use_gpu, train2_use_gpu, infer_use_gpu)) self._train( checkpoint, shape, use_gpu=train1_use_gpu, is_fused=True, restore=False, freeze_mode=False, dtype=dtype) train_vars = self._train( checkpoint, shape, use_gpu=train2_use_gpu, is_fused=True, restore=True, freeze_mode=False, dtype=dtype) np.random.seed(0) image_val = np.random.rand(batch, height, width, input_channels).astype( dtype.as_numpy_dtype) loss_val = self._infer( checkpoint, image_val, shape, use_gpu=infer_use_gpu, is_fused=True) return train_vars, loss_val def testHalfPrecision(self): ref_vars, ref_loss = self._trainEvalSequence( dtype=tf.float32, train1_use_gpu=True, train2_use_gpu=True, infer_use_gpu=True) self.assertEqual(len(ref_vars), 5) for train1_use_gpu in [True, False]: for train2_use_gpu in [True, False]: for infer_use_gpu in [True, False]: test_vars, test_loss = self._trainEvalSequence( tf.float16, train1_use_gpu, train2_use_gpu, infer_use_gpu) self.assertEqual(len(test_vars), 5) for test_var, ref_var in zip(test_vars, ref_vars): self.assertAllClose(test_var, ref_var, rtol=1.e-3, atol=1.e-3) self.assertAllClose(test_loss, ref_loss, rtol=1.e-3, atol=1.e-3) def _testCheckpoint(self, is_fused_checkpoint_a, is_fused_checkpoint_b, use_gpu_checkpoint_a, use_gpu_checkpoint_b, use_gpu_test_a, use_gpu_test_b, freeze_mode): batch, height, width, input_channels = 2, 4, 5, 3 shape = [batch, height, width, input_channels] base_path = '%s_%s_%s_%s_%s_%s' % (is_fused_checkpoint_a, is_fused_checkpoint_b, use_gpu_checkpoint_a, use_gpu_checkpoint_b, use_gpu_test_a, use_gpu_test_b) checkpoint_path_a = os.path.join(self.get_temp_dir(), 'checkpoint_a_%s' % base_path) self._train( checkpoint_path_a, shape, use_gpu_checkpoint_a, is_fused_checkpoint_a, restore=False, freeze_mode=freeze_mode) checkpoint_path_b = os.path.join(self.get_temp_dir(), 'checkpoint_b_%s' % base_path) self._train( checkpoint_path_b, shape, use_gpu_checkpoint_b, is_fused_checkpoint_b, restore=False, freeze_mode=freeze_mode) vars_fused = self._train( checkpoint_path_a, shape, use_gpu_test_a, True, restore=True, freeze_mode=freeze_mode) vars_nonfused = self._train( checkpoint_path_b, shape, use_gpu_test_b, False, restore=True, freeze_mode=freeze_mode) self.assertEqual(len(vars_fused), 5) self.assertEqual(len(vars_nonfused), 5) for var_fused, var_nonfused in zip(vars_fused, vars_nonfused): self.assertAllClose(var_fused, var_nonfused, atol=1e-5) image_val = np.random.rand(batch, height, width, input_channels).astype(np.float32) loss_fused_val = self._infer(checkpoint_path_a, image_val, shape, use_gpu_test_a, True) loss_nonfused_val = self._infer(checkpoint_path_b, image_val, shape, use_gpu_test_b, False) self.assertAllClose(loss_fused_val, loss_nonfused_val, atol=1e-6, rtol=3e-4) def _testCheckpointCrossDevice(self, ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused, ckpt_b_use_gpu): for use_gpu_test_a in [True, False]: for use_gpu_test_b in [True, False]: for freeze_mode in [True, False]: self._testCheckpoint(ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused, ckpt_b_use_gpu, use_gpu_test_a, use_gpu_test_b, freeze_mode) def testCheckpointFusedCPUAndFusedGPU(self): self._testCheckpointCrossDevice(True, False, True, True) def testCheckpointFusedCPUAndFusedCPU(self): self._testCheckpointCrossDevice(True, False, True, False) def testCheckpointFusedGPUAndFusedGPU(self): self._testCheckpointCrossDevice(True, True, True, True) def testCheckpointNonFusedCPUAndNonFusedGPU(self): self._testCheckpointCrossDevice(False, False, False, True) def testCheckpointNonFusedCPUAndNonFusedCPU(self): self._testCheckpointCrossDevice(False, False, False, False) def testCheckpointNonFusedGPUAndNonFusedGPU(self): self._testCheckpointCrossDevice(False, True, False, True) def testCheckpointNonFusedGPUAndFusedGPU(self): self._testCheckpointCrossDevice(False, True, True, True) def testCheckpointNonFusedGPUAndFusedCPU(self): self._testCheckpointCrossDevice(False, True, True, False) def testCheckpointNonFusedCPUAndFusedCPU(self): self._testCheckpointCrossDevice(False, False, True, False) def testCreateBN(self): # Call layer. bn = normalization_layers.BatchNormalization(axis=1) inputs = tf.random.uniform((5, 4, 3), seed=1) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 4) self.assertEqual(len(bn.trainable_variables), 2) self.assertEqual(len(bn.non_trainable_variables), 2) # Test that updates were created and added to UPDATE_OPS. self.assertEqual(len(bn.updates), 2) self.assertListEqual( tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS), bn.updates) # Test that weights were created and added to TRAINABLE_VARIABLES. self.assertListEqual( tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES), bn.trainable_variables) def testCreateFusedBNFloat16(self): # Call layer. bn = normalization_layers.BatchNormalization(axis=1, fused=True) inputs = tf.random.uniform( (5, 4, 3, 3), seed=1, dtype=tf.float16) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 4) self.assertEqual(len(bn.trainable_variables), 2) self.assertEqual(len(bn.non_trainable_variables), 2) for var in bn.variables: self.assertTrue(var.dtype._is_ref_dtype) # Test that updates were created and added to UPDATE_OPS. self.assertEqual(len(bn.updates), 2) self.assertListEqual( tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS), bn.updates) # Test that weights were created and added to TRAINABLE_VARIABLES. self.assertListEqual( tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES), bn.trainable_variables) def test3DInputAxis1(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1)) np_beta = np.reshape(np_beta, (1, 4, 1)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2)) std = np.std(np_inputs, axis=(0, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test3DInputAxis2(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=2, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 3)) np_beta = np.reshape(np_beta, (1, 1, 3)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1)) std = np.std(np_inputs, axis=(0, 1)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis1(self): if tf.test.is_gpu_available(cuda_only=True): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1, 1)) np_beta = np.reshape(np_beta, (1, 4, 1, 1)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2, 3)) std = np.std(np_inputs, axis=(0, 2, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis2(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=2, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 3, 1)) np_beta = np.reshape(np_beta, (1, 1, 3, 1)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 3)) std = np.std(np_inputs, axis=(0, 1, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis3(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=3, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis3Fused(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=3, epsilon=epsilon, momentum=0.9, fused=True) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis1Fused(self): if tf.test.is_gpu_available(cuda_only=True): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9, fused=True) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1, 1)) np_beta = np.reshape(np_beta, (1, 4, 1, 1)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2, 3)) std = np.std(np_inputs, axis=(0, 2, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testNegativeAxis(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=-1, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testBooleanLearningPhase(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=-1, epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=tf.float32) outputs_training = bn.apply(inputs, training=True) outputs_infer = bn.apply(inputs, training=False) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs_training] + bn.updates) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = self.evaluate(outputs_infer) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalNoReuse(self): inputs = tf.Variable( np.random.random((5, 4, 3, 6)), dtype=tf.float32) epsilon = 1e-3 training = tf.compat.v1.placeholder(dtype='bool') outputs = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn') updates = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) all_vars = dict([(v.name, v) for v in tf.compat.v1.global_variables()]) moving_mean = all_vars['bn/moving_mean:0'] moving_variance = all_vars['bn/moving_variance:0'] beta = all_vars['bn/beta:0'] gamma = all_vars['bn/gamma:0'] with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([gamma, beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. np_moving_mean, np_moving_var = self.evaluate( [moving_mean, moving_variance]) np_inputs = self.evaluate(inputs) np_mean = np.mean(np_inputs, axis=(0, 1, 2)) np_std = np.std(np_inputs, axis=(0, 1, 2)) np_variance = np.square(np_std) self.assertAllClose(np_mean, np_moving_mean, atol=1e-2) self.assertAllClose(np_variance, np_moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalReuse(self): inputs1 = tf.Variable( np.random.random((5, 4, 3, 6)), dtype=tf.float32) inputs2 = tf.Variable( np.random.random((5, 4, 3, 6)), dtype=tf.float32) epsilon = 1e-3 training = tf.compat.v1.placeholder(dtype='bool') _ = normalization_layers.batch_norm( inputs1, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn') outputs2 = normalization_layers.batch_norm( inputs2, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn', reuse=True) # Last 2 update ops updates = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)[-2:] all_vars = dict([(v.name, v) for v in tf.compat.v1.global_variables()]) moving_mean = all_vars['bn/moving_mean:0'] moving_variance = all_vars['bn/moving_variance:0'] beta = all_vars['bn/beta:0'] gamma = all_vars['bn/gamma:0'] with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(100): np_output, _, _ = sess.run([outputs2] + updates, feed_dict={training: True}) # Verify that the statistics are updated during training. np_moving_mean, np_moving_var = self.evaluate( [moving_mean, moving_variance]) np_inputs = self.evaluate(inputs2) np_mean = np.mean(np_inputs, axis=(0, 1, 2)) np_std = np.std(np_inputs, axis=(0, 1, 2)) np_variance = np.square(np_std) self.assertAllClose(np_mean, np_moving_mean, atol=1e-2) self.assertAllClose(np_variance, np_moving_var, atol=1e-2) # Verify that the axis is normalized during training. np_gamma, np_beta = self.evaluate([gamma, beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Test inference with placeholder learning phase. np_output = sess.run(outputs2, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalReuseFromScope(self): inputs = tf.Variable( np.random.random((5, 4, 3, 6)), dtype=tf.float32) epsilon = 1e-3 training = tf.compat.v1.placeholder(dtype='bool') with tf.compat.v1.variable_scope('scope'): _ = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training) self.assertEqual(len(tf.compat.v1.global_variables()), 5) with tf.compat.v1.variable_scope('scope', reuse=True): _ = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training) self.assertEqual(len(tf.compat.v1.global_variables()), 5) def testNoCenter(self): bn = normalization_layers.BatchNormalization(axis=1, center=False) inputs = tf.random.uniform((5, 4, 3), seed=1) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 3) self.assertEqual(len(bn.trainable_variables), 1) self.assertEqual(len(bn.non_trainable_variables), 2) def testNoScale(self): bn = normalization_layers.BatchNormalization(axis=1, scale=False) inputs = tf.random.uniform((5, 4, 3), seed=1) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 3) self.assertEqual(len(bn.trainable_variables), 1) self.assertEqual(len(bn.non_trainable_variables), 2) def testRegularizers(self): reg = lambda x: 0.1 * tf.reduce_sum(x) bn = normalization_layers.BatchNormalization(axis=1, beta_regularizer=reg) inputs = tf.random.uniform((5, 4, 3), seed=1) training = tf.compat.v1.placeholder(dtype='bool') _ = bn.apply(inputs, training=training) self.assertEqual(len(bn.losses), 1) bn = normalization_layers.BatchNormalization(axis=1, gamma_regularizer=reg) inputs = tf.random.uniform((5, 4, 3), seed=1) training = tf.compat.v1.placeholder(dtype='bool') _ = bn.apply(inputs, training=training) self.assertEqual(len(bn.losses), 1) def testConstraints(self): g_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) bn = normalization_layers.BatchNormalization(axis=1, gamma_constraint=g_constraint, beta_constraint=b_constraint) inputs = tf.random.uniform((5, 4, 3), seed=1) bn(inputs) self.assertEqual(bn.gamma_constraint, g_constraint) self.assertEqual(bn.beta_constraint, b_constraint) def testRenorm(self): shape = (4, 3) xt = tf.compat.v1.placeholder(tf.float32, shape) momentum = 0.99 renorm_momentum = 0.8 rmax = 1.1 rmin = 0.9 dmax = 0.1 gamma = 2. beta = 3. epsilon = 0.001 bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=tf.compat.v1.constant_initializer(gamma), beta_initializer=tf.compat.v1.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax}, renorm_momentum=renorm_momentum) training = tf.compat.v1.placeholder(tf.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) mean = x.mean(0) variance = x.var(0) stddev = np.sqrt(variance + epsilon) r = (stddev / renorm_stddev).clip(rmin, rmax) d = ((mean - renorm_mean) / renorm_stddev).clip(-dmax, dmax) y_train = ((x - mean) / stddev * r + d) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) moving_mean += (mean - moving_mean) * (1. - momentum) moving_stddev += (stddev - moving_stddev) * (1. - momentum) y_test = ((x - moving_mean) / (moving_stddev * moving_stddev)**0.5 * gamma) + beta yt_val_train, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: True}) yt_val_test, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False}) self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testRenormNoClippingSameMomentumGivesSameTestTrain(self): shape = (4, 3) xt = tf.compat.v1.placeholder(tf.float32, shape) momentum = 0.9 renorm_momentum = 0.9 gamma = 2. beta = 3. epsilon = 0.001 bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=tf.compat.v1.constant_initializer(gamma), beta_initializer=tf.compat.v1.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping=None, renorm_momentum=momentum) training = tf.compat.v1.placeholder(tf.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for step in range(6): x = np.random.random(shape) mean = x.mean(0) variance = x.var(0) stddev = np.sqrt(variance + epsilon) r = (stddev / renorm_stddev) d = ((mean - renorm_mean) / renorm_stddev) y_test = ((x - moving_mean) / (moving_stddev * moving_stddev)**0.5 * gamma) + beta y_train = ((x - mean) / stddev * r + d) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) moving_mean += (mean - moving_mean) * (1. - momentum) moving_stddev += (stddev - moving_stddev) * (1. - momentum) # Compute test values first, before the train mode updates the moving # averages. yt_val_test, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False}) yt_val_train, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: True}) # Due to initialization inconsistencies, values may not be identical # on the first iteration (but shouldn't be different by much more than # epsilon). After the first iteration they should be identical. atol = epsilon * 1.5 if step == 0 else 1e-5 self.assertAllClose(y_train, yt_val_train, atol=atol) self.assertAllClose(y_test, yt_val_test, atol=atol) self.assertAllClose(yt_val_train, yt_val_test, atol=atol) def testAdjustment(self): shape = (4, 3) xt = tf.compat.v1.placeholder(tf.float32, shape) momentum = 0.99 gamma = 2. beta = 3. epsilon = 0.001 adjust_scale = tf.random.uniform(shape[-1:], 0.5, 1.5) adjust_bias = tf.random.uniform(shape[-1:], -.2, .2) bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=tf.compat.v1.constant_initializer(gamma), beta_initializer=tf.compat.v1.constant_initializer(beta), epsilon=epsilon, momentum=momentum, adjustment=lambda _: (adjust_scale, adjust_bias)) training = tf.compat.v1.placeholder(tf.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_variance = 1. with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) yt_val_train, adj_scale_val, adj_bias_val = sess.run( [yt, adjust_scale, adjust_bias] + bn.updates, feed_dict={xt: x, training: True})[:3] yt_val_test = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False})[0] mean = x.mean(0) variance = x.var(0) y_train = (((x - mean) / (variance + epsilon) ** 0.5) * adj_scale_val + adj_bias_val) * gamma + beta moving_mean += (mean - moving_mean) * (1. - momentum) moving_variance += (variance - moving_variance) * (1. - momentum) y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 * gamma) + beta self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testRenormWithAdjustment(self): shape = (4, 3) xt = tf.compat.v1.placeholder(tf.float32, shape) momentum = 0.99 renorm_momentum = 0.8 rmax = 1.1 rmin = 0.9 dmax = 0.1 gamma = 2. beta = 3. epsilon = 0.001 adjust_scale = tf.random.uniform(shape[-1:], 0.5, 1.5) adjust_bias = tf.random.uniform(shape[-1:], -.2, .2) bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=tf.compat.v1.constant_initializer(gamma), beta_initializer=tf.compat.v1.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax}, renorm_momentum=renorm_momentum, adjustment=lambda _: (adjust_scale, adjust_bias)) training = tf.compat.v1.placeholder(tf.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) yt_val_train, adj_scale_val, adj_bias_val = sess.run( [yt, adjust_scale, adjust_bias] + bn.updates, feed_dict={xt: x, training: True})[:3] yt_val_test = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False})[0] mean = x.mean(0) variance = x.var(0) stddev = np.sqrt(variance + epsilon) r = (stddev / renorm_stddev).clip(rmin, rmax) d = ((mean - renorm_mean) / renorm_stddev).clip(-dmax, dmax) y_train = (((x - mean) / stddev * r + d) * adj_scale_val + adj_bias_val) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) moving_mean += (mean - moving_mean) * (1. - momentum) moving_stddev += (stddev - moving_stddev) * (1. - momentum) y_test = ((x - moving_mean) / (moving_stddev * moving_stddev)**0.5 * gamma) + beta self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testGhostBNNegativeVirtualBatch(self): shape = [6, 5, 4, 3] inp = tf.random.uniform(shape, seed=1) with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, virtual_batch_size=-1) def testGhostBNVirtualBatchFull(self): shape = [6, 5, 4, 3] inp = tf.random.uniform(shape, seed=1) out1 = normalization_layers.batch_normalization(inp) out2 = normalization_layers.batch_normalization( inp, virtual_batch_size=6) self.assertListEqual( out1.shape.as_list(), out2.shape.as_list()) with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) x = np.random.random(shape) y1, y2 = sess.run([out1, out2], feed_dict={inp: x}) self.assertAllClose(y1, y2, atol=1e-5) def testGhostBNInputOutputShapesMatch(self): shape = [6, 4, 3] inp = tf.random.uniform(shape, seed=1) out = normalization_layers.batch_normalization( inp, virtual_batch_size=3) self.assertListEqual(out.shape.as_list(), shape) def testGhostBNUnknownBatchSize(self): np_shape = [10, 5, 4] tf_shape = [None, 5, 4] inp = tf.compat.v1.placeholder(tf.float32, tf_shape) out = normalization_layers.batch_normalization( inp, virtual_batch_size=2) with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) x = np.random.random(np_shape) y = sess.run(out, feed_dict={inp: x}) self.assertListEqual(list(y.shape), np_shape) def testGhostBN2Dims(self): shape = [6, 2] virtual_batch_size = 3 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([2, 2], dtype=np.float32) moving_vars = np.ones([2, 2], dtype=np.float32) inp = tf.compat.v1.placeholder(tf.float32, shape) is_training = tf.compat.v1.placeholder(tf.bool) bn = normalization_layers.BatchNormalization( momentum=momentum, epsilon=epsilon, beta_initializer=tf.compat.v1.constant_initializer(beta), gamma_initializer=tf.compat.v1.constant_initializer(gamma), virtual_batch_size=virtual_batch_size) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size, shape[1]]) with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=0, keepdims=True) variances = np.var(sub_batched, axis=0, keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-5) self.assertAllClose(y_test, y_val_test, atol=1e-5) def testGhostBN4DimsAxis3(self): shape = [6, 10, 10, 3] virtual_batch_size = 2 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 1, 1, 3], dtype=np.float32) moving_vars = np.ones([1, 1, 1, 1, 3], dtype=np.float32) inp = tf.compat.v1.placeholder(tf.float32, shape) is_training = tf.compat.v1.placeholder(tf.bool) bn = normalization_layers.BatchNormalization( axis=3, momentum=momentum, epsilon=epsilon, beta_initializer=tf.compat.v1.constant_initializer(beta), gamma_initializer=tf.compat.v1.constant_initializer(gamma), virtual_batch_size=virtual_batch_size) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 2, 3), keepdims=True) variances = np.var(sub_batched, axis=(0, 2, 3), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) def testGhostBN4DimsAxis1(self): shape = [6, 3, 10, 10] virtual_batch_size = 2 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 3, 1, 1], dtype=np.float32) moving_vars = np.ones([1, 1, 3, 1, 1], dtype=np.float32) inp = tf.compat.v1.placeholder(tf.float32, shape) is_training = tf.compat.v1.placeholder(tf.bool) bn = normalization_layers.BatchNormalization( axis=1, momentum=momentum, epsilon=epsilon, beta_initializer=tf.compat.v1.constant_initializer(beta), gamma_initializer=tf.compat.v1.constant_initializer(gamma), virtual_batch_size=virtual_batch_size, fused=False) # NCHW is unsupported by CPU fused batch norm out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True) variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) def testMultiAxisInvalid(self): shape = [6, 5, 4, 3] inp = tf.random.uniform(shape, seed=1) with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[1, 4]) # out of bounds with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[-5, 1]) # out of bounds with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[1, 2, 1]) # duplicate def test3DInputMultiAxis12(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=[1, 2], epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 4, 3)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=0, keepdims=True) std = np.std(np_inputs, axis=0, keepdims=True) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test5DInputMultiAxis123(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=[1, 2, 3], epsilon=epsilon, momentum=0.9) inputs = tf.Variable( np.random.random((5, 3, 4, 4, 3)) + 100, dtype=tf.float32) training = tf.compat.v1.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(tf.compat.v1.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 4), keepdims=True) std = np.std(np_inputs, axis=(0, 4), keepdims=True) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testGhostBN5DimsMultiAxis14(self): shape = [6, 3, 10, 10, 4] virtual_batch_size = 3 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 3, 1, 1, 4], dtype=np.float32) moving_vars = np.ones([1, 1, 3, 1, 1, 4], dtype=np.float32) inp = tf.compat.v1.placeholder(tf.float32, shape) is_training = tf.compat.v1.placeholder(tf.bool) bn = normalization_layers.BatchNormalization( axis=[1, 4], momentum=momentum, epsilon=epsilon, beta_initializer=tf.compat.v1.constant_initializer(beta), gamma_initializer=tf.compat.v1.constant_initializer(gamma), virtual_batch_size=virtual_batch_size, fused=False) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session() as sess: self.evaluate(tf.compat.v1.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True) variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) if __name__ == '__main__': tf.test.main()
59,542
40.349306
87
py
keras
keras-master/keras/legacy_tf_layers/convolutional_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.convolutional.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import numpy as np from keras.legacy_tf_layers import convolutional as conv_layers class ConvTest(tf.test.TestCase): def testInvalidDataFormat(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'data_format'): conv_layers.conv2d(images, 32, 3, data_format='invalid') def testInvalidStrides(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3)) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.conv2d(images, 32, 3, strides=None) def testInvalidKernelSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.conv2d(images, 32, (1, 2, 3)) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.conv2d(images, 32, None) def testCreateConv2D(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2D(32, [3, 3], activation=tf.nn.relu) output = layer.apply(images) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'conv2d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testConv2DFloat16(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4), dtype='float16') output = conv_layers.conv2d(images, 32, [3, 3], activation=tf.nn.relu) self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) def testCreateConv2DIntegerKernelSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2D(32, 3) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testCreateConv2DChannelsFirst(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, 4, height, width)) layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 32, height - 2, width - 2]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testUnknownInputChannels(self): with tf.Graph().as_default(): images = tf.compat.v1.placeholder(tf.float32, (5, 7, 9, None)) layer = conv_layers.Conv2D(32, [3, 3], activation=tf.nn.relu) with self.assertRaisesRegex( ValueError, 'The channel dimension of the inputs ' 'should be defined. The input_shape received is'): _ = layer.apply(images) images = tf.compat.v1.placeholder(tf.float32, (5, None, 7, 9)) layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first') with self.assertRaisesRegex( ValueError, 'The channel dimension of the inputs ' 'should be defined. The input_shape received is'): _ = layer.apply(images) def testConv2DPaddingSame(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 32), seed=1) layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64]) def testCreateConvWithStrides(self): height, width = 6, 8 # Test strides tuple images = tf.random.uniform((5, height, width, 3), seed=1) layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 32]) # Test strides integer layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 32]) # Test unequal strides layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width, 32]) def testCreateConv1D(self): width = 7 data = tf.random.uniform((5, width, 4)) layer = conv_layers.Conv1D(32, 3, activation=tf.nn.relu) output = layer.apply(data) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'conv1d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testConv1DFloat16(self): width = 7 data = tf.random.uniform((5, width, 4), dtype='float16') output = conv_layers.conv1d(data, 32, 3, activation=tf.nn.relu) self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32]) def testCreateConv1DChannelsFirst(self): with tf.Graph().as_default(): width = 7 data = tf.random.uniform((5, 4, width)) layer = conv_layers.Conv1D(32, 3, data_format='channels_first') output = layer.apply(data) self.assertListEqual(output.get_shape().as_list(), [5, 32, width - 2]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testUnknownInputChannelsConv1D(self): with tf.Graph().as_default(): data = tf.compat.v1.placeholder(tf.float32, (5, 4, None)) layer = conv_layers.Conv1D(32, 3, activation=tf.nn.relu) with self.assertRaisesRegex( ValueError, 'The channel dimension of the inputs ' 'should be defined. The input_shape received is'): _ = layer.apply(data) data = tf.compat.v1.placeholder(tf.float32, (5, None, 4)) layer = conv_layers.Conv1D(32, 3, data_format='channels_first') with self.assertRaisesRegex( ValueError, 'The channel dimension of the inputs ' 'should be defined. The input_shape received is'): _ = layer.apply(data) def testCreateConv3D(self): depth, height, width = 6, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 4)) layer = conv_layers.Conv3D(32, [3, 3, 3], activation=tf.nn.relu) output = layer.apply(volumes) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'conv3d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, depth - 2, height - 2, width - 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testUnknownInputChannelsConv3D(self): with tf.Graph().as_default(): volumes = tf.compat.v1.placeholder(tf.float32, (5, 6, 7, 9, None)) layer = conv_layers.Conv3D(32, [3, 3, 3], activation=tf.nn.relu) with self.assertRaisesRegex( ValueError, 'The channel dimension of the inputs ' 'should be defined. The input_shape received is'): _ = layer.apply(volumes) def testConv2DKernelRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.Conv2D(32, [3, 3], kernel_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testConv2DBiasRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testConv2DNoBias(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2D( 32, [3, 3], activation=tf.nn.relu, use_bias=False) output = layer.apply(images) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'conv2d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32]) self.assertEqual(layer.bias, None) def testDilatedConv2D(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) # Test tuple dilation rate layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3)) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height - 2, 3, 32]) def testFunctionalConv2DReuse(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d(images, 32, [3, 3], name='conv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) conv_layers.conv2d(images, 32, [3, 3], name='conv1', reuse=True) self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) def testFunctionalConv2DReuseFromScope(self): with tf.Graph().as_default(): with tf.compat.v1.variable_scope('scope'): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d(images, 32, [3, 3], name='conv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) with tf.compat.v1.variable_scope('scope', reuse=True): conv_layers.conv2d(images, 32, [3, 3], name='conv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) def testFunctionalConv2DInitializerFromScope(self): with tf.Graph().as_default(), self.cached_session(): with tf.compat.v1.variable_scope( 'scope', initializer=tf.compat.v1.ones_initializer()): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d(images, 32, [3, 3], name='conv1') weights = tf.compat.v1.trainable_variables() # Check the names of weights in order. self.assertTrue('kernel' in weights[0].name) self.assertTrue('bias' in weights[1].name) self.evaluate(tf.compat.v1.global_variables_initializer()) weights = self.evaluate(weights) # Check that the kernel weights got initialized to ones (from scope) self.assertAllClose(weights[0], np.ones((3, 3, 3, 32))) # Check that the bias still got initialized to zeros. self.assertAllClose(weights[1], np.zeros((32))) def testFunctionalConv2DNoReuse(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d(images, 32, [3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) conv_layers.conv2d(images, 32, [3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 4) def testConstraints(self): # Conv1D k_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) conv1d = conv_layers.Conv1D(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 5), seed=1) conv1d(inputs) self.assertEqual(conv1d.kernel_constraint, k_constraint) self.assertEqual(conv1d.bias_constraint, b_constraint) # Conv2D k_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) conv2d = conv_layers.Conv2D(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 3, 5), seed=1) conv2d(inputs) self.assertEqual(conv2d.kernel_constraint, k_constraint) self.assertEqual(conv2d.bias_constraint, b_constraint) # Conv3D k_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) conv3d = conv_layers.Conv3D(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 3, 3, 5), seed=1) conv3d(inputs) self.assertEqual(conv3d.kernel_constraint, k_constraint) self.assertEqual(conv3d.bias_constraint, b_constraint) def testConv3DChannelsFirst(self): # Test case for GitHub issue 15655 with tf.Graph().as_default(): images = tf.compat.v1.placeholder( dtype=tf.float32, shape=[None, 1, 32, 32, 32]) conv_layers.conv3d(images, 32, 9, data_format='channels_first') class SeparableConv1DTest(tf.test.TestCase): def testInvalidDataFormat(self): length = 9 data = tf.random.uniform((5, length, 3), seed=1) with self.assertRaisesRegex(ValueError, 'data_format'): conv_layers.separable_conv1d(data, 32, 3, data_format='invalid') def testInvalidStrides(self): length = 9 data = tf.random.uniform((5, length, 3), seed=1) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.separable_conv1d(data, 32, 3, strides=(1, 2)) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.separable_conv1d(data, 32, 3, strides=None) def testInvalidKernelSize(self): length = 9 data = tf.random.uniform((5, length, 3), seed=1) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.separable_conv1d(data, 32, (1, 2)) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.separable_conv1d(data, 32, None) def testCreateSeparableConv1D(self): length = 9 data = tf.random.uniform((5, length, 4)) layer = conv_layers.SeparableConv1D(32, 3, activation=tf.nn.relu) output = layer.apply(data) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'separable_conv1d/Relu') self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32]) self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1]) self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32]) self.assertEqual(layer.bias.get_shape().as_list(), [32]) def testCreateSeparableConv1DDepthMultiplier(self): length = 9 data = tf.random.uniform((5, length, 4)) layer = conv_layers.SeparableConv1D(32, 3, depth_multiplier=2) output = layer.apply(data) self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32]) self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 2]) self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 8, 32]) self.assertEqual(layer.bias.get_shape().as_list(), [32]) def testCreateSeparableConv1DChannelsFirst(self): with tf.Graph().as_default(): length = 9 data = tf.random.uniform((5, 4, length)) layer = conv_layers.SeparableConv1D(32, 3, data_format='channels_first') output = layer.apply(data) self.assertEqual(output.get_shape().as_list(), [5, 32, length - 2]) self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1]) self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32]) self.assertEqual(layer.bias.get_shape().as_list(), [32]) def testSeparableConv1DPaddingSame(self): length = 9 data = tf.random.uniform((5, length, 32), seed=1) layer = conv_layers.SeparableConv1D( 64, length, padding='same') output = layer.apply(data) self.assertEqual(output.get_shape().as_list(), [5, length, 64]) def testCreateSeparableConv1DWithStrides(self): length = 10 data = tf.random.uniform((5, length, 3), seed=1) layer = conv_layers.SeparableConv1D(32, 3, strides=2, padding='same') output = layer.apply(data) self.assertEqual(output.get_shape().as_list(), [5, length // 2, 32]) def testCreateSeparableConv1DWithStridesChannelsFirst(self): with tf.Graph().as_default(): data_format = 'channels_first' length = 10 data = tf.random.uniform((5, 3, length), seed=1) layer = conv_layers.SeparableConv1D( 32, 3, strides=2, padding='same', data_format=data_format) output = layer.apply(data) self.assertEqual(output.get_shape().as_list(), [5, 32, length // 2]) def testFunctionalConv1DReuse(self): with tf.Graph().as_default(): length = 10 data = tf.random.uniform((5, length, 3), seed=1) conv_layers.separable_conv1d(data, 32, 3, name='sepconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) conv_layers.separable_conv1d(data, 32, 3, name='sepconv1', reuse=True) self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) def testFunctionalConv1DReuseFromScope(self): with tf.Graph().as_default(): with tf.compat.v1.variable_scope('scope'): length = 10 data = tf.random.uniform((5, length, 3), seed=1) conv_layers.separable_conv1d(data, 32, 3, name='sepconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) with tf.compat.v1.variable_scope('scope', reuse=True): conv_layers.separable_conv1d(data, 32, 3, name='sepconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) def testFunctionalConv1DNoReuse(self): with tf.Graph().as_default(): length = 10 data = tf.random.uniform((5, length, 3), seed=1) conv_layers.separable_conv1d(data, 32, 3) self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) conv_layers.separable_conv1d(data, 32, 3) self.assertEqual(len(tf.compat.v1.trainable_variables()), 6) def testSeparableConv1DDepthwiseRegularizer(self): with tf.Graph().as_default(): length = 9 data = tf.random.uniform((5, length, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.SeparableConv1D(32, 3, depthwise_regularizer=reg) layer.apply(data) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testSeparableConv1DPointwiseRegularizer(self): with tf.Graph().as_default(): length = 9 data = tf.random.uniform((5, length, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.SeparableConv1D(32, 3, pointwise_regularizer=reg) layer.apply(data) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testSeparableConv1DBiasRegularizer(self): with tf.Graph().as_default(): length = 9 data = tf.random.uniform((5, length, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.SeparableConv1D(32, 3, bias_regularizer=reg) layer.apply(data) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testSeparableConv1DNoBias(self): with tf.Graph().as_default(): length = 9 data = tf.random.uniform((5, length, 4)) layer = conv_layers.SeparableConv1D( 32, 3, activation=tf.nn.relu, use_bias=False) output = layer.apply(data) self.assertEqual(output.op.name, 'separable_conv1d/Relu') self.assertEqual(layer.bias, None) def testConstraints(self): d_constraint = lambda x: x / tf.reduce_sum(x) p_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) layer = conv_layers.SeparableConv1D(2, 3, depthwise_constraint=d_constraint, pointwise_constraint=p_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 5), seed=1) layer(inputs) self.assertEqual(layer.depthwise_constraint, d_constraint) self.assertEqual(layer.pointwise_constraint, p_constraint) self.assertEqual(layer.bias_constraint, b_constraint) class SeparableConv2DTest(tf.test.TestCase): def testInvalidDataFormat(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'data_format'): conv_layers.separable_conv2d(images, 32, 3, data_format='invalid') def testInvalidStrides(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3)) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.separable_conv2d(images, 32, 3, strides=None) def testInvalidKernelSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.separable_conv2d(images, 32, (1, 2, 3)) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.separable_conv2d(images, 32, None) def testCreateSeparableConv2D(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.SeparableConv2D(32, [3, 3], activation=tf.nn.relu) output = layer.apply(images) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'separable_conv2d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]) self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testCreateSeparableConv2DDepthMultiplier(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.SeparableConv2D(32, [3, 3], depth_multiplier=2) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 2]) self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 1, 8, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testCreateSeparableConv2DIntegerKernelSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.SeparableConv2D(32, 3) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]) self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testCreateSeparableConv2DChannelsFirst(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, 4, height, width)) layer = conv_layers.SeparableConv2D( 32, [3, 3], data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 32, height - 2, width - 2]) self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]) self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testSeparableConv2DPaddingSame(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 32), seed=1) layer = conv_layers.SeparableConv2D( 64, images.get_shape()[1:3], padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64]) def testCreateSeparableConvWithStrides(self): with tf.Graph().as_default(): height, width = 6, 8 # Test strides tuple images = tf.random.uniform((5, height, width, 3), seed=1) layer = conv_layers.SeparableConv2D( 32, [3, 3], strides=(2, 2), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 32]) # Test strides integer layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 32]) # Test unequal strides layer = conv_layers.SeparableConv2D( 32, [3, 3], strides=(2, 1), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width, 32]) def testCreateSeparableConvWithStridesChannelsFirst(self): with tf.Graph().as_default(): data_format = 'channels_first' height, width = 6, 8 # Test strides tuple images = tf.random.uniform((5, 3, height, width), seed=1) layer = conv_layers.SeparableConv2D( 32, [3, 3], strides=(2, 2), padding='same', data_format=data_format) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 32, height / 2, width / 2]) # Test strides integer layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same', data_format=data_format) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 32, height / 2, width / 2]) # Test unequal strides layer = conv_layers.SeparableConv2D( 32, [3, 3], strides=(2, 1), padding='same', data_format=data_format) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 32, height / 2, width]) def testFunctionalConv2DReuse(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) conv_layers.separable_conv2d( images, 32, [3, 3], name='sepconv1', reuse=True) self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) def testFunctionalConv2DReuseFromScope(self): with tf.Graph().as_default(): with tf.compat.v1.variable_scope('scope'): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) with tf.compat.v1.variable_scope('scope', reuse=True): conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) def testFunctionalConv2DInitializerFromScope(self): with tf.Graph().as_default(), self.cached_session(): with tf.compat.v1.variable_scope( 'scope', initializer=tf.compat.v1.ones_initializer()): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1') weights = tf.compat.v1.trainable_variables() # Check the names of weights in order. self.assertTrue('depthwise_kernel' in weights[0].name) self.assertTrue('pointwise_kernel' in weights[1].name) self.assertTrue('bias' in weights[2].name) self.evaluate(tf.compat.v1.global_variables_initializer()) weights = self.evaluate(weights) # Check that the kernel weights got initialized to ones (from scope) self.assertAllClose(weights[0], np.ones((3, 3, 3, 1))) self.assertAllClose(weights[1], np.ones((1, 1, 3, 32))) # Check that the bias still got initialized to zeros. self.assertAllClose(weights[2], np.zeros((32))) def testFunctionalConv2DNoReuse(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.separable_conv2d(images, 32, [3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) conv_layers.separable_conv2d(images, 32, [3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 6) def testSeparableConv2DDepthwiseRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.SeparableConv2D(32, [3, 3], depthwise_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testSeparableConv2DPointwiseRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.SeparableConv2D(32, [3, 3], pointwise_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testSeparableConv2DBiasRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.SeparableConv2D(32, [3, 3], bias_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testSeparableConv2DNoBias(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.SeparableConv2D( 32, [3, 3], activation=tf.nn.relu, use_bias=False) output = layer.apply(images) self.assertEqual(output.op.name, 'separable_conv2d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height - 2, width - 2, 32]) self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]) self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]) self.assertEqual(layer.bias, None) def testConstraints(self): d_constraint = lambda x: x / tf.reduce_sum(x) p_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) layer = conv_layers.SeparableConv2D(2, 3, depthwise_constraint=d_constraint, pointwise_constraint=p_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 3, 5), seed=1) layer(inputs) self.assertEqual(layer.depthwise_constraint, d_constraint) self.assertEqual(layer.pointwise_constraint, p_constraint) self.assertEqual(layer.bias_constraint, b_constraint) class Conv2DTransposeTest(tf.test.TestCase): def testInvalidDataFormat(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'data_format'): conv_layers.conv2d_transpose(images, 32, 3, data_format='invalid') def testInvalidStrides(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3)) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.conv2d_transpose(images, 32, 3, strides=None) def testInvalidKernelSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.conv2d_transpose(images, 32, (1, 2, 3)) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.conv2d_transpose(images, 32, None) def testCreateConv2DTranspose(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2DTranspose(32, [3, 3], activation=tf.nn.relu) output = layer.apply(images) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'conv2d_transpose/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height + 2, width + 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testConv2DTransposeFloat16(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4), dtype='float16') output = conv_layers.conv2d_transpose(images, 32, [3, 3], activation=tf.nn.relu) self.assertListEqual(output.get_shape().as_list(), [5, height + 2, width + 2, 32]) def testCreateConv2DTransposeIntegerKernelSize(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2DTranspose(32, 3) output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height + 2, width + 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testCreateConv2DTransposeChannelsFirst(self): height, width = 7, 9 images = tf.random.uniform((5, 4, height, width)) layer = conv_layers.Conv2DTranspose( 32, [3, 3], data_format='channels_first') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, 32, height + 2, width + 2]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4]) self.assertListEqual(layer.bias.get_shape().as_list(), [32]) def testConv2DTransposePaddingSame(self): height, width = 7, 9 images = tf.random.uniform((5, height, width, 32), seed=1) layer = conv_layers.Conv2DTranspose( 64, images.get_shape()[1:3], padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64]) def testCreateConv2DTransposeWithStrides(self): height, width = 6, 8 # Test strides tuple images = tf.random.uniform((5, height, width, 3), seed=1) layer = conv_layers.Conv2DTranspose( 32, [3, 3], strides=(2, 2), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height * 2, width * 2, 32]) # Test strides integer layer = conv_layers.Conv2DTranspose(32, [3, 3], strides=2, padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height * 2, width * 2, 32]) # Test unequal strides layer = conv_layers.Conv2DTranspose( 32, [3, 3], strides=(2, 1), padding='same') output = layer.apply(images) self.assertListEqual(output.get_shape().as_list(), [5, height * 2, width, 32]) def testConv2DTransposeKernelRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.Conv2DTranspose(32, [3, 3], kernel_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testConv2DTransposeBiasRegularizer(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.Conv2DTranspose(32, [3, 3], bias_regularizer=reg) layer.apply(images) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testConv2DTransposeNoBias(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 4)) layer = conv_layers.Conv2DTranspose( 32, [3, 3], activation=tf.nn.relu, use_bias=False) output = layer.apply(images) self.assertEqual(output.op.name, 'conv2d_transpose/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height + 2, width + 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4]) self.assertEqual(layer.bias, None) def testFunctionalConv2DTransposeReuse(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) conv_layers.conv2d_transpose( images, 32, [3, 3], name='deconv1', reuse=True) self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) def testFunctionalConv2DTransposeReuseFromScope(self): with tf.Graph().as_default(): with tf.compat.v1.variable_scope('scope'): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) with tf.compat.v1.variable_scope('scope', reuse=True): conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) def testFunctionalConv2DTransposeInitializerFromScope(self): with tf.Graph().as_default(), self.cached_session(): with tf.compat.v1.variable_scope( 'scope', initializer=tf.compat.v1.ones_initializer()): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1') weights = tf.compat.v1.trainable_variables() # Check the names of weights in order. self.assertTrue('kernel' in weights[0].name) self.assertTrue('bias' in weights[1].name) self.evaluate(tf.compat.v1.global_variables_initializer()) weights = self.evaluate(weights) # Check that the kernel weights got initialized to ones (from scope) self.assertAllClose(weights[0], np.ones((3, 3, 32, 3))) # Check that the bias still got initialized to zeros. self.assertAllClose(weights[1], np.zeros((32))) def testFunctionalConv2DTransposeNoReuse(self): with tf.Graph().as_default(): height, width = 7, 9 images = tf.random.uniform((5, height, width, 3), seed=1) conv_layers.conv2d_transpose(images, 32, [3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) conv_layers.conv2d_transpose(images, 32, [3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 4) def testConstraints(self): k_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) layer = conv_layers.Conv2DTranspose(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 3, 5), seed=1) layer(inputs) self.assertEqual(layer.kernel_constraint, k_constraint) self.assertEqual(layer.bias_constraint, b_constraint) class Conv3DTransposeTest(tf.test.TestCase): def testInvalidDataFormat(self): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32), seed=1) with self.assertRaisesRegex(ValueError, 'data_format'): conv_layers.conv3d_transpose(volumes, 4, 3, data_format='invalid') def testInvalidStrides(self): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32), seed=1) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2)) with self.assertRaisesRegex(ValueError, 'strides'): conv_layers.conv3d_transpose(volumes, 4, 3, strides=None) def testInvalidKernelSize(self): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32), seed=1) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.conv3d_transpose(volumes, 4, (1, 2)) with self.assertRaisesRegex(ValueError, 'kernel_size'): conv_layers.conv3d_transpose(volumes, 4, None) def testCreateConv3DTranspose(self): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32)) layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], activation=tf.nn.relu) output = layer.apply(volumes) if not tf.executing_eagerly(): self.assertEqual(output.op.name, 'conv3d_transpose/Relu') self.assertListEqual(output.get_shape().as_list(), [5, depth + 2, height + 2, width + 2, 4]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [4]) def testCreateConv3DTransposeIntegerKernelSize(self): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32)) layer = conv_layers.Conv3DTranspose(4, 3) output = layer.apply(volumes) self.assertListEqual(output.get_shape().as_list(), [5, depth + 2, height + 2, width + 2, 4]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [4]) def testCreateConv3DTransposeChannelsFirst(self): with tf.Graph().as_default(): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, 32, depth, height, width)) layer = conv_layers.Conv3DTranspose( 4, [3, 3, 3], data_format='channels_first') output = layer.apply(volumes) self.assertListEqual(output.get_shape().as_list(), [5, 4, depth + 2, height + 2, width + 2]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [4]) def testConv3DTransposePaddingSame(self): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 64), seed=1) layer = conv_layers.Conv3DTranspose( 32, volumes.get_shape()[1:4], padding='same') output = layer.apply(volumes) self.assertListEqual(output.get_shape().as_list(), [5, depth, height, width, 32]) def testCreateConv3DTransposeWithStrides(self): depth, height, width = 4, 6, 8 # Test strides tuple. volumes = tf.random.uniform((5, depth, height, width, 32), seed=1) layer = conv_layers.Conv3DTranspose( 4, [3, 3, 3], strides=(2, 2, 2), padding='same') output = layer.apply(volumes) self.assertListEqual(output.get_shape().as_list(), [5, depth * 2, height * 2, width * 2, 4]) # Test strides integer. layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], strides=2, padding='same') output = layer.apply(volumes) self.assertListEqual(output.get_shape().as_list(), [5, depth * 2, height * 2, width * 2, 4]) # Test unequal strides. layer = conv_layers.Conv3DTranspose( 4, [3, 3, 3], strides=(2, 1, 1), padding='same') output = layer.apply(volumes) self.assertListEqual(output.get_shape().as_list(), [5, depth * 2, height, width, 4]) def testConv3DTransposeKernelRegularizer(self): with tf.Graph().as_default(): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], kernel_regularizer=reg) layer.apply(volumes) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testConv3DTransposeBiasRegularizer(self): with tf.Graph().as_default(): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32)) reg = lambda x: 0.1 * tf.reduce_sum(x) layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], bias_regularizer=reg) layer.apply(volumes) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in layer.variables]) self.assertListEqual( self.evaluate(layer.losses), self.evaluate(loss_keys)) def testConv3DTransposeNoBias(self): with tf.Graph().as_default(): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32)) layer = conv_layers.Conv3DTranspose( 4, [3, 3, 3], activation=tf.nn.relu, use_bias=False) output = layer.apply(volumes) self.assertEqual(output.op.name, 'conv3d_transpose/Relu') self.assertListEqual(output.get_shape().as_list(), [5, depth + 2, height + 2, width + 2, 4]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]) self.assertEqual(layer.bias, None) def testFunctionalConv3DTransposeReuse(self): with tf.Graph().as_default(): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32), seed=1) conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) conv_layers.conv3d_transpose( volumes, 4, [3, 3, 3], name='deconv1', reuse=True) self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) def testFunctionalConv3DTransposeReuseFromScope(self): with tf.Graph().as_default(): with tf.compat.v1.variable_scope('scope'): depth, height, width = 5, 7, 9 volumes = tf.random.uniform( (5, depth, height, width, 32), seed=1) conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) with tf.compat.v1.variable_scope('scope', reuse=True): conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1') self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) def testFunctionalConv3DTransposeInitializerFromScope(self): with tf.Graph().as_default(), self.cached_session(): with tf.compat.v1.variable_scope( 'scope', initializer=tf.compat.v1.ones_initializer()): depth, height, width = 5, 7, 9 volumes = tf.random.uniform( (5, depth, height, width, 32), seed=1) conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1') weights = tf.compat.v1.trainable_variables() # Check the names of weights in order. self.assertTrue('kernel' in weights[0].name) self.assertTrue('bias' in weights[1].name) self.evaluate(tf.compat.v1.global_variables_initializer()) weights = self.evaluate(weights) # Check that the kernel weights got initialized to ones (from scope) self.assertAllClose(weights[0], np.ones((3, 3, 3, 4, 32))) # Check that the bias still got initialized to zeros. self.assertAllClose(weights[1], np.zeros((4))) def testFunctionalConv3DTransposeNoReuse(self): with tf.Graph().as_default(): depth, height, width = 5, 7, 9 volumes = tf.random.uniform((5, depth, height, width, 32), seed=1) conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 2) conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3]) self.assertEqual(len(tf.compat.v1.trainable_variables()), 4) def testConstraints(self): k_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) layer = conv_layers.Conv3DTranspose(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3, 3, 3, 5), seed=1) layer(inputs) self.assertEqual(layer.kernel_constraint, k_constraint) self.assertEqual(layer.bias_constraint, b_constraint) if __name__ == '__main__': tf.test.main()
52,865
44.107509
91
py
keras
keras-master/keras/legacy_tf_layers/base_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.base.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import copy from absl.testing import parameterized import numpy as np from keras import backend from keras import combinations from keras.engine import base_layer as keras_base_layer from keras.engine import input_spec from keras.legacy_tf_layers import base as base_layers from keras.legacy_tf_layers import core as core_layers class BaseLayerTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testLayerProperties(self): layer = base_layers.Layer(name='my_layer') self.assertEqual(layer.variables, []) self.assertEqual(layer.trainable_variables, []) self.assertEqual(layer.non_trainable_variables, []) if not tf.executing_eagerly(): # updates, losses only supported in GRAPH mode self.assertEqual(layer.updates, []) self.assertEqual(layer.losses, []) self.assertEqual(layer.built, False) layer = base_layers.Layer(name='my_layer', trainable=False) self.assertEqual(layer.trainable, False) # Assert that the layer was not instrumented as a Keras layer self.assertFalse(layer._instrumented_keras_api) # Assert this was instrumented as a legacy layer self.assertTrue( keras_base_layer.keras_api_gauge.get_cell('legacy_layer').value()) keras_base_layer.keras_api_gauge.get_cell('legacy_layer').set(False) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInt64Layer(self): layer = base_layers.Layer(name='my_layer', dtype='int64') layer.add_variable('my_var', [2, 2]) self.assertEqual(layer.name, 'my_layer') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testKerasStyleAddWeight(self): keras_layer = keras_base_layer.Layer(name='keras_layer') with backend.name_scope('foo'): keras_variable = keras_layer.add_variable( 'my_var', [2, 2], initializer=tf.compat.v1.zeros_initializer()) self.assertEqual(keras_variable.name, 'foo/my_var:0') with backend.name_scope('baz'): old_style_layer = base_layers.Layer(name='my_layer') # Test basic variable creation. variable = old_style_layer.add_variable( 'my_var', [2, 2], initializer=tf.compat.v1.zeros_initializer()) self.assertEqual(variable.name, 'my_layer/my_var:0') with base_layers.keras_style_scope(): layer = base_layers.Layer(name='my_layer') # Assert that the layer was not instrumented as a Keras layer self.assertFalse(layer._instrumented_keras_api) # Test basic variable creation. with backend.name_scope('bar'): variable = layer.add_variable( 'my_var', [2, 2], initializer=tf.compat.v1.zeros_initializer()) self.assertEqual(variable.name, 'bar/my_var:0') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testAddWeight(self): layer = base_layers.Layer(name='my_layer') # Test basic variable creation. variable = layer.add_variable( 'my_var', [2, 2], initializer=tf.compat.v1.zeros_initializer()) self.assertEqual(variable.name, 'my_layer/my_var:0') self.assertEqual(layer.variables, [variable]) self.assertEqual(layer.trainable_variables, [variable]) self.assertEqual(layer.non_trainable_variables, []) if not tf.executing_eagerly(): self.assertEqual( layer.variables, tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)) # Test non-trainable variable creation. # layer.add_variable should work even outside `build` and `call`. variable_2 = layer.add_variable( 'non_trainable_var', [2, 2], initializer=tf.compat.v1.zeros_initializer(), trainable=False) self.assertEqual(layer.variables, [variable, variable_2]) self.assertEqual(layer.trainable_variables, [variable]) self.assertEqual(layer.non_trainable_variables, [variable_2]) if not tf.executing_eagerly(): self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 1) regularizer = lambda x: tf.reduce_sum(x) * 1e-3 _ = layer.add_variable( 'reg_var', [2, 2], initializer=tf.compat.v1.zeros_initializer(), regularizer=regularizer) self.assertEqual(len(layer.losses), 1) added_variable = [False] # Test that sync `ON_READ` variables are defaulted to be non-trainable. variable_3 = layer.add_variable( 'sync_on_read_var', [2, 2], initializer=tf.compat.v1.zeros_initializer(), synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.compat.v1.VariableAggregation.SUM) self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3]) @tf.function def function_adds_weight(): if not added_variable[0]: layer.add_variable( 'reg_var_from_function', [2, 2], initializer=tf.compat.v1.zeros_initializer(), regularizer=regularizer) added_variable[0] = True function_adds_weight() self.assertEqual(len(layer.losses), 2) def testInvalidTrainableSynchronizationCombination(self): layer = base_layers.Layer(name='my_layer') with self.assertRaisesRegex( ValueError, 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.'): _ = layer.add_variable( 'v', [2, 2], initializer=tf.compat.v1.zeros_initializer(), synchronization=tf.VariableSynchronization.ON_READ, trainable=True) def testReusePartitionedVariablesAndRegularizers(self): with tf.Graph().as_default(): regularizer = lambda x: tf.reduce_sum(x) * 1e-3 partitioner = tf.compat.v1.fixed_size_partitioner(3) for reuse in [False, True]: with tf.compat.v1.variable_scope( tf.compat.v1.get_variable_scope(), partitioner=partitioner, reuse=reuse): layer = base_layers.Layer(name='my_layer') _ = layer.add_variable( 'reg_part_var', [4, 4], initializer=tf.compat.v1.zeros_initializer(), regularizer=regularizer) self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)), 3) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testCall(self): class MyLayer(base_layers.Layer): def call(self, inputs): return tf.square(inputs) layer = MyLayer(name='my_layer') inputs = tf.random.uniform((5,), seed=1) outputs = layer.apply(inputs) self.assertEqual(layer.built, True) if not tf.executing_eagerly(): # op is only supported in GRAPH mode self.assertEqual(outputs.op.name, 'my_layer/Square') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testDeepCopy(self): class MyLayer(base_layers.Layer): def call(self, inputs): return tf.square(inputs) layer = MyLayer(name='my_layer') layer._private_tensor = tf.random.uniform(()) inputs = tf.random.uniform((5,), seed=1) outputs = layer.apply(inputs) self.assertEqual(layer.built, True) if not tf.executing_eagerly(): # op only supported in GRAPH mode. self.assertEqual(outputs.op.name, 'my_layer/Square') layer_copy = copy.deepcopy(layer) self.assertEqual(layer_copy.name, layer.name) self.assertEqual(layer_copy._scope.name, layer._scope.name) self.assertEqual(layer_copy._private_tensor, layer._private_tensor) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testScopeNaming(self): class PrivateLayer(base_layers.Layer): def call(self, inputs): return inputs inputs = tf.random.uniform((5,)) default_layer = PrivateLayer() _ = default_layer.apply(inputs) self.assertEqual(default_layer._scope.name, 'private_layer') default_layer1 = PrivateLayer() default_layer1.apply(inputs) self.assertEqual(default_layer1._scope.name, 'private_layer_1') my_layer = PrivateLayer(name='my_layer') my_layer.apply(inputs) self.assertEqual(my_layer._scope.name, 'my_layer') my_layer1 = PrivateLayer(name='my_layer') my_layer1.apply(inputs) self.assertEqual(my_layer1._scope.name, 'my_layer_1') my_layer2 = PrivateLayer(name='my_layer') my_layer2.apply(inputs) self.assertEqual(my_layer2._scope.name, 'my_layer_2') # Name scope shouldn't affect names. with backend.name_scope('some_name_scope'): default_layer2 = PrivateLayer() default_layer2.apply(inputs) self.assertEqual(default_layer2._scope.name, 'private_layer_2') my_layer3 = PrivateLayer(name='my_layer') my_layer3.apply(inputs) self.assertEqual(my_layer3._scope.name, 'my_layer_3') other_layer = PrivateLayer(name='other_layer') other_layer.apply(inputs) self.assertEqual(other_layer._scope.name, 'other_layer') # Variable scope gets added to scope names. with tf.compat.v1.variable_scope('var_scope'): default_layer_scoped = PrivateLayer() default_layer_scoped.apply(inputs) self.assertEqual(default_layer_scoped._scope.name, 'var_scope/private_layer') my_layer_scoped = PrivateLayer(name='my_layer') my_layer_scoped.apply(inputs) self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer') my_layer_scoped1 = PrivateLayer(name='my_layer') my_layer_scoped1.apply(inputs) self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputSpecNdimCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(ndim=2) def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegex(ValueError, r'expected ndim=2'): layer.apply(tf.constant([1])) # Note that we re-create the layer since in Eager mode, input spec checks # only happen on first call. # Works layer = CustomerLayer() layer.apply(tf.constant([[1], [2]])) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputSpecMinNdimCheck(self): class CustomLayer(base_layers.Layer): def __init__(self): super(CustomLayer, self).__init__() self.input_spec = input_spec.InputSpec(min_ndim=2) def call(self, inputs): return inputs layer = CustomLayer() with self.assertRaisesRegex(ValueError, r'expected min_ndim=2'): layer.apply(tf.constant([1])) # Works layer = CustomLayer() layer.apply(tf.constant([[1], [2]])) layer = CustomLayer() layer.apply(tf.constant([[[1], [2]]])) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputSpecMaxNdimCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(max_ndim=2) def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegex(ValueError, r'expected max_ndim=2'): layer.apply(tf.constant([[[1], [2]]])) # Works layer = CustomerLayer() layer.apply(tf.constant([1])) layer = CustomerLayer() layer.apply(tf.constant([[1], [2]])) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputSpecDtypeCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(dtype='float32') def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegex(ValueError, r'expected dtype=float32'): layer.apply(tf.constant(1, dtype=tf.int32)) # Works layer = CustomerLayer() layer.apply(tf.constant(1.0, dtype=tf.float32)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputSpecAxesCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(axes={-1: 2}) def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegex(ValueError, r'expected axis'): layer.apply(tf.constant([1, 2, 3])) # Works layer = CustomerLayer() layer.apply(tf.constant([1, 2])) layer = CustomerLayer() layer.apply(tf.constant([[1, 2], [3, 4], [5, 6]])) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputSpecShapeCheck(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = input_spec.InputSpec(shape=(None, 3)) def call(self, inputs): return inputs layer = CustomerLayer() with self.assertRaisesRegex(ValueError, r'expected shape'): layer.apply(tf.constant([[1, 2]])) # Works layer = CustomerLayer() layer.apply(tf.constant([[1, 2, 3], [4, 5, 6]])) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoInputSpec(self): class CustomerLayer(base_layers.Layer): def __init__(self): super(CustomerLayer, self).__init__() self.input_spec = None def call(self, inputs): return inputs layer = CustomerLayer() layer.apply(tf.constant(1)) # Works if not tf.executing_eagerly(): layer.apply(tf.compat.v1.placeholder('int32')) layer.apply(tf.compat.v1.placeholder('int32', shape=(2, 3))) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_count_params(self): dense = core_layers.Dense(16) dense.build((None, 4)) self.assertEqual(dense.count_params(), 16 * 4 + 16) dense = core_layers.Dense(16) with self.assertRaises(ValueError): dense.count_params() @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testDictInputOutput(self): class DictLayer(base_layers.Layer): def call(self, inputs): return {'l' + key: inputs[key] for key in inputs} layer = DictLayer() if tf.executing_eagerly(): i1 = tf.constant(3) i2 = tf.constant(4.0) result = layer.apply({'abel': i1, 'ogits': i2}) self.assertTrue(isinstance(result, dict)) self.assertEqual(set(['label', 'logits']), set(result.keys())) self.assertEqual(3, result['label'].numpy()) self.assertEqual(4.0, result['logits'].numpy()) else: i1 = tf.compat.v1.placeholder('int32') i2 = tf.compat.v1.placeholder('float32') result = layer.apply({'abel': i1, 'ogits': i2}) self.assertTrue(isinstance(result, dict)) self.assertEqual(set(['label', 'logits']), set(result.keys())) def testActivityRegularizer(self): with tf.Graph().as_default(): regularizer = tf.reduce_sum layer = base_layers.Layer(activity_regularizer=regularizer) x = tf.compat.v1.placeholder('int32') layer.apply(x) self.assertEqual(len(layer.get_losses_for(x)), 1) def testNameScopeIsConsistentWithVariableScope(self): # Github issue 13429. class MyLayer(base_layers.Layer): def build(self, input_shape): self.my_var = self.add_variable('my_var', (), tf.float32) self.built = True def call(self, inputs): return tf.multiply(inputs, self.my_var, name='my_op') def _gen_layer(x, name=None): layer = MyLayer(name=name) out = layer.apply(x) return layer, out # unnamed layer with tf.Graph().as_default(): x = tf.compat.v1.placeholder(tf.float32, (), 'x') layer, op = _gen_layer(x) layer1, op1 = _gen_layer(op) layer2, op2 = _gen_layer(op1) self.assertEqual(layer.my_var.name, 'my_layer/my_var:0') self.assertEqual(op.name, 'my_layer/my_op:0') self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0') self.assertEqual(op1.name, 'my_layer_1/my_op:0') self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0') self.assertEqual(op2.name, 'my_layer_2/my_op:0') # name starts from zero with tf.Graph().as_default(): x = tf.compat.v1.placeholder(tf.float32, (), 'x') layer, op = _gen_layer(x, name='name') layer1, op1 = _gen_layer(op, name='name_1') layer2, op2 = _gen_layer(op1, name='name_2') self.assertEqual(layer.my_var.name, 'name/my_var:0') self.assertEqual(op.name, 'name/my_op:0') self.assertEqual(layer1.my_var.name, 'name_1/my_var:0') self.assertEqual(op1.name, 'name_1/my_op:0') self.assertEqual(layer2.my_var.name, 'name_2/my_var:0') self.assertEqual(op2.name, 'name_2/my_op:0') # name starts from one with tf.Graph().as_default(): x = tf.compat.v1.placeholder(tf.float32, (), 'x') layer, op = _gen_layer(x, name='name_1') layer1, op1 = _gen_layer(op, name='name_2') layer2, op2 = _gen_layer(op1, name='name_3') self.assertEqual(layer.my_var.name, 'name_1/my_var:0') self.assertEqual(op.name, 'name_1/my_op:0') self.assertEqual(layer1.my_var.name, 'name_2/my_var:0') self.assertEqual(op1.name, 'name_2/my_op:0') self.assertEqual(layer2.my_var.name, 'name_3/my_var:0') self.assertEqual(op2.name, 'name_3/my_op:0') def testVariablesAreLiftedFromFunctionBuildingGraphs(self): class MyLayer(base_layers.Layer): def build(self, input_shape): self.my_var = self.add_variable('my_var', (), tf.float32) self.built = True def call(self, inputs): return inputs outer_graph = tf.compat.v1.get_default_graph() function_building_graph = tf.Graph() function_building_graph._building_function = True with outer_graph.as_default(): with function_building_graph.as_default(): layer = MyLayer() # Create a variable by invoking build through __call__ and assert that # it is both tracked and lifted into the outer graph. inputs = tf.compat.v1.placeholder(tf.float32, (), 'inputs') layer.apply(inputs) self.assertEqual(len(layer.variables), 1) self.assertEqual(len(layer.trainable_variables), 1) self.assertEqual(layer.variables[0].graph, outer_graph) def testGetUpdateFor(self): class MyLayer(base_layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (), tf.float32, trainable=False) self.b = self.add_variable('b', (), tf.float32, trainable=False) self.add_update(tf.compat.v1.assign_add(self.a, 1., name='b_update')) self.built = True def call(self, inputs): self.add_update(tf.compat.v1.assign_add(self.a, inputs, name='a_update'), inputs=True) return inputs + 1 with tf.Graph().as_default(): layer = MyLayer() inputs = tf.compat.v1.placeholder(tf.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.get_updates_for(None)), 1) self.assertEqual(len(layer.get_updates_for([inputs])), 1) self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_updates_for([outputs])), 0) # Call same layer on new input, creating one more conditional update inputs = tf.compat.v1.placeholder(tf.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.updates), 3) self.assertEqual(len(layer.get_updates_for(None)), 1) # Check that we are successfully filtering out irrelevant updates self.assertEqual(len(layer.get_updates_for([inputs])), 1) self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_updates_for([outputs])), 0) def testGetLossesFor(self): class MyLayer(base_layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (), tf.float32, trainable=False) self.b = self.add_variable('b', (), tf.float32, trainable=False) self.add_loss(self.a) self.built = True def call(self, inputs): self.add_loss(inputs, inputs=True) return inputs + 1 with tf.Graph().as_default(): layer = MyLayer() inputs = tf.compat.v1.placeholder(tf.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.losses), 2) self.assertEqual(len(layer.get_losses_for(None)), 1) self.assertEqual(len(layer.get_losses_for([inputs])), 1) self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_losses_for([outputs])), 0) # Call same layer on new input, creating one more conditional loss inputs = tf.compat.v1.placeholder(tf.float32, (), 'inputs') intermediate_inputs = inputs + 1 outputs = layer.apply(intermediate_inputs) self.assertEqual(len(layer.losses), 3) self.assertEqual(len(layer.get_losses_for(None)), 1) # Check that we are successfully filtering out irrelevant losses self.assertEqual(len(layer.get_losses_for([inputs])), 1) self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1) self.assertEqual(len(layer.get_losses_for([outputs])), 0) class IdentityLayer(base_layers.Layer): """A layer returns the identity of it's input.""" def call(self, inputs): return inputs @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class DTypeTest(tf.test.TestCase, parameterized.TestCase): def _const(self, dtype): return tf.constant(1, dtype=dtype) def test_dtype_inferred_from_input(self): # Test with Tensor input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('float64')) self.assertEqual(layer.dtype, 'float64') # Test with Numpy input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(np.array(1., dtype='float64')) self.assertEqual(layer.dtype, 'float64') # Test with integer input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('int32')) self.assertEqual(layer.dtype, 'int32') # Test layer dtype doesn't change when passed a new dtype layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('float64')) self.assertEqual(layer.dtype, 'float64') layer(self._const('float16')) self.assertEqual(layer.dtype, 'float64') # Test layer dtype inferred from first input layer = IdentityLayer() layer([self._const('float32'), self._const('float64')]) self.assertEqual(layer.dtype, 'float32') def test_passing_dtype_to_constructor(self): layer = IdentityLayer(dtype='float64') layer(self._const('float32')) self.assertEqual(layer.dtype, 'float64') layer = IdentityLayer(dtype='int32') layer(self._const('float32')) self.assertEqual(layer.dtype, 'int32') layer = IdentityLayer(dtype=tf.float64) layer(self._const('float32')) self.assertEqual(layer.dtype, 'float64') def test_inputs_not_casted(self): layer = IdentityLayer(dtype='float32') self.assertEqual(layer(self._const('float64')).dtype, 'float64') if __name__ == '__main__': tf.test.main()
25,100
35.116547
92
py
keras
keras-master/keras/legacy_tf_layers/variable_scope_shim.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=g-classes-have-attributes """Contains a shim to allow using TF1 get_variable code in TF2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import functools from keras.engine import base_layer from keras.utils import tf_inspect from keras.utils import layer_utils from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export # pylint: disable=g-direct-tensorflow-import def as_shape(shape): """Converts the given object to a TensorShape.""" if isinstance(shape, tf.TensorShape): return shape else: return tf.TensorShape(shape) def _is_callable_object(obj): return hasattr(obj, "__call__") and tf_inspect.ismethod(obj.__call__) def _has_kwargs(fn): """Returns whether the passed callable has **kwargs in its signature. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `bool`: if `fn` has **kwargs in its signature. Raises: `TypeError`: If fn is not a Function, or function-like object. """ if isinstance(fn, functools.partial): fn = fn.func elif _is_callable_object(fn): fn = fn.__call__ elif not callable(fn): raise TypeError( "fn should be a function-like object, but is of type {}.".format( type(fn))) return tf_inspect.getfullargspec(fn).varkw is not None def fn_args(fn): """Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `tuple` of string argument names. Raises: ValueError: if partial function has positionally bound arguments """ if isinstance(fn, functools.partial): args = fn_args(fn.func) args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])] else: if hasattr(fn, "__call__") and tf_inspect.ismethod(fn.__call__): fn = fn.__call__ args = tf_inspect.getfullargspec(fn).args if _is_bound_method(fn) and args: # If it's a bound method, it may or may not have a self/cls first # argument; for example, self could be captured in *args. # If it does have a positional argument, it is self/cls. args.pop(0) return tuple(args) def _is_bound_method(fn): _, fn = tf.__internal__.decorator.unwrap(fn) return tf_inspect.ismethod(fn) and (fn.__self__ is not None) def validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name): """Given user-provided variable properties, sets defaults and validates.""" if aggregation is None: aggregation = tf.compat.v1.VariableAggregation.NONE else: if not isinstance(aggregation, (tf.compat.v1.VariableAggregation, tf.VariableAggregation)): try: aggregation = tf.VariableAggregation(aggregation) except ValueError: raise ValueError( "Invalid variable aggregation mode: {} for variable: {}".format( aggregation, name)) if synchronization is None: synchronization = tf.VariableSynchronization.AUTO else: try: synchronization = tf.VariableSynchronization(synchronization) except ValueError: raise ValueError( "Invalid variable synchronization mode: {} for variable: {}".format( synchronization, name)) if trainable is None: trainable = synchronization != tf.VariableSynchronization.ON_READ return synchronization, aggregation, trainable class _EagerVariableStore(tf.Module): """TF2-compatible VariableStore that avoids collections & tracks regularizers. New variable names and new variables can be created; all stored variables are initialized with the initializer passed to __init__. All variables get created in `tf.init_scope.` to avoid a bad interaction between `tf.function` `FuncGraph` internals, Keras Functional Models, and TPUStrategy variable initialization. Also, it always acts as if reuse is set to either "TRUE" or tf.compat.v1.AUTO_REUSE Attributes: vars: a dictionary with string names (same as passed in GetVar) as keys and the corresponding TensorFlow Variables as values. """ def __init__(self): """Create a variable store.""" self._vars = {} # A dictionary of the stored TensorFlow variables. self._regularizers = {} # A dict mapping var names to their regularizers. self._store_eager_variables = True def get_variable( self, name, shape=None, dtype=tf.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE): """Gets an existing variable with these parameters or create a new one. If a variable with the given name is already stored, we return the stored variable. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. In this shim, `reuse` of `False` will be treated as auto-reuse. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. When eager execution is enabled this argument is always forced to be False. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable` defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys to add the `Variable` to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the `Variable` reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and dtype of the `Variable` to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates instead an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be true. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, or when violating reuse during variable creation. RuntimeError: when eager execution is enabled and not called from an EagerVariableStore. """ if custom_getter is not None and not callable(custom_getter): raise ValueError("Passed a custom_getter which is not callable: %s" % custom_getter) with tf.init_scope(): if tf.executing_eagerly(): # Variable creation and initialization takes place in `init_scope`s; # as such, if an `init_scope` lifts us into the eager context, then we # need to use `ResourceVariable`s. use_resource = True # Note that it's fine to reuse eager variables whose initialization was # lifted from a function-building graph into the eager context (that's why # the following clause is not wrapped in an `init_scope`); lifted variables # are tracked by the graph's `VariableStore`. if not reuse: reuse = tf.compat.v1.AUTO_REUSE # If a *_ref type is passed in an error would be triggered further down the # stack. We prevent this using base_dtype to get a non-ref version of the # type, before doing anything else. When _ref types are removed in favor of # resources, this line can be removed. try: dtype = dtype.base_dtype except AttributeError: # .base_dtype not existing means that we will try and use the raw dtype # which was passed in - this might be a NumPy type which is valid. pass # This is the main logic of get_variable. However, custom_getter # may override this logic. So we save it as a callable and pass # it to custom_getter. # Note: the parameters of _true_getter, and their documentation, match # *exactly* item-for-item with the docstring of this method. def _true_getter( # pylint: disable=missing-docstring name, shape=None, dtype=tf.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, # pylint: disable=unused-argument caching_device=None, partitioner=None, validate_shape=True, use_resource=None, # pylint: disable=unused-argument constraint=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE): # Partitioned variable currently unsupported w/ the shim if partitioner is not None: raise ValueError( "`partitioner` arg for `get_variable` is unsupported in TF2." "File a bug if you need help. You passed %s" % partitioner) # Single variable case if "%s/part_0" % name in self._vars: raise ValueError( "No partitioner was provided, but a partitioned version of the " "variable was found: %s/part_0. Perhaps a variable of the same " "name was already created with partitioning?" % name) return self._get_single_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, caching_device=caching_device, validate_shape=validate_shape, constraint=constraint, synchronization=synchronization, aggregation=aggregation) synchronization, aggregation, trainable = ( validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name)) if custom_getter is not None: # Handle backwards compatibility with getter arguments that were added # to the API after users started writing custom getters. custom_getter_kwargs = { "getter": _true_getter, "name": name, "shape": shape, "dtype": dtype, "initializer": initializer, "regularizer": regularizer, "reuse": reuse, "trainable": trainable, "collections": collections, "caching_device": caching_device, "partitioner": partitioner, "validate_shape": validate_shape, "use_resource": use_resource, "synchronization": synchronization, "aggregation": aggregation, } # `fn_args` and `has_kwargs` can handle functions, `functools.partial`, # `lambda`. if ("constraint" in fn_args(custom_getter) or _has_kwargs(custom_getter)): custom_getter_kwargs["constraint"] = constraint return custom_getter(**custom_getter_kwargs) else: return _true_getter( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_single_variable( self, name, shape=None, dtype=tf.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, caching_device=None, validate_shape=True, constraint=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE): """Get or create a single Variable (e.g. a shard or entire variable). See the documentation of get_variable above (ignore partitioning components) for details. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. initializer: see get_variable. regularizer: see get_variable. partition_info: _PartitionInfo object. reuse: see get_variable. trainable: see get_variable. caching_device: see get_variable. validate_shape: see get_variable. constraint: see get_variable. synchronization: see get_variable. aggregation: see get_variable. Returns: A Variable. See documentation of get_variable above. Raises: ValueError: See documentation of get_variable above. """ # Set to true if initializer is a constant. initializing_from_value = False if initializer is not None and not callable(initializer): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError("If initializer is a constant, do not specify shape.") dtype = tf.as_dtype(dtype) shape = as_shape(shape) if name in self._vars: # Here we handle the case when returning an existing variable. found_var = self._vars[name] if not shape.is_compatible_with(found_var.get_shape()): raise ValueError("Trying to share variable %s, but specified shape %s" " and found shape %s." % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError("Trying to share variable %s, but specified dtype %s" " and found dtype %s." % (name, dtype_str, found_type_str)) return found_var # The code below handles only the case of creating a new variable. if reuse is True: # pylint: disable=g-bool-id-comparison raise ValueError("Variable %s does not exist, or was not created with " "tf.get_variable(). Did you mean to set " "reuse=tf.AUTO_REUSE in VarScope?" % name) # Create the tensor to initialize the variable with default value. if initializer is None: initializer, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) # Enter an init scope when creating the initializer. with tf.init_scope(): if initializing_from_value: init_val = initializer variable_dtype = None else: # Instantiate initializer if provided initializer is a type object. if tf_inspect.isclass(initializer): initializer = initializer() if shape.is_fully_defined(): if "partition_info" in tf_inspect.getargspec(initializer).args: init_val = functools.partial(initializer, shape.as_list(), dtype=dtype, partition_info=partition_info) else: init_val = functools.partial(initializer, shape.as_list(), dtype=dtype) variable_dtype = dtype.base_dtype else: init_val = initializer variable_dtype = None # Create the variable (Always eagerly as a workaround for a strange # tpu / funcgraph / keras functional model interaction ) with tf.init_scope(): v = tf.Variable( initial_value=init_val, name=name, trainable=trainable, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, synchronization=synchronization, aggregation=aggregation) self._vars[name] = v logging.vlog(1, "Created variable %s with shape %s and init %s", v.name, format(shape), initializer) # Run the regularizer if requested and save the resulting loss. if regularizer: self.add_regularizer(v, regularizer) return v def add_regularizer(self, var, regularizer): self._regularizers[var.name] = functools.partial(regularizer, var) # Initialize variable when no initializer provided def _get_default_initializer(self, name, shape=None, dtype=tf.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ del shape # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = tf.compat.v1.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or dtype == tf.string): initializer = tf.compat.v1.zeros_initializer() initializing_from_value = False # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value @keras_export(v1=["keras.utils.track_tf1_style_variables"]) def track_tf1_style_variables(method): """Wrap layer & module methods in this decorator to capture tf1-style weights. Decorating a `tf.keras.Layer`'s or `tf.Module`'s methods with this decorator will cause the layer/module to track weights created/used via `tf.compat.v1.get_variable` (and by extension `tf.compat.v1.layers`) inside the decorated method. In addition to tracking the weights themselves under the standard `layer.variable`/`module.variable`/etc. properties, if the method belongs to a `tf.keras.Layer` then any regularization losses specified via the `get_variable` or `tf.compat.v1.layers` regularizer arguments will get tracked by the layer under the standard `layer.losses` property. This tracking enables using large classes of TF1-style model-forward-pass code inside of Keras layers or `tf.Modules` in TF2 with TF2 behaviors enabled. Example of capturing tf.compat.v1.layer-based modeling code as a Keras layer: ```python class WrappedDoubleDenseLayer(tf.keras.layers.Layer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units @tf.compat.v1.keras.utils.track_tf1_style_variables def call(self, inputs): with tf.compat.v1.variable_scope("double_dense_layer"): out = tf.compat.v1.layers.dense( inputs, self.units, name="dense_one", kernel_initializer=tf.compat.v1.random_normal_initializer, kernel_regularizer="l2") out = tf.compat.v1.layers.dense( out, self.units, name="dense_two", kernel_initializer=tf.compat.v1.random_normal_initializer(), kernel_regularizer="l2") return out # Create a layer that can be used as a standard keras layer layer = WrappedDoubleDenseLayer(10) # call the layer on inputs layer(...) # Variables created/used within the scope will be tracked by the layer layer.weights layer.trainable_variables # Regularization losses will be captured in layer.losses after a call, # just like any other Keras layer reg_losses = layer.losses ``` Example of capturing tf.compat.v1.get_variable-based modeling code as a Keras layer: ```python class WrappedDoubleDenseLayer(tf.keras.layers.Layer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units @tf.compat.v1.keras.utils.track_tf1_style_variables def call(self, inputs): out = inputs with tf.compat.v1.variable_scope("double_dense_layer"): with tf.compat.v1.variable_scope("dense_one"): # The weights are created with a `regularizer`, # so the layer should track their regularization losses kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=init_ops.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=init_ops.zeros_initializer(), name="bias") out = tf.compat.v1.math.matmul(out, kernel) out = tf.compat.v1.nn.bias_add(out, bias) with tf.compat.v1.variable_scope("dense_two"): kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=init_ops.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=init_ops.zeros_initializer(), name="bias") out = tf.compat.v1.math.matmul(out, kernel) out = tf.compat.v1.nn.bias_add(out, bias) return out # Create a layer that can be used as a standard keras layer layer = WrappedDoubleDenseLayer(10) # call the layer on inputs layer(...) # Variables created/used within the scope will be tracked by the layer layer.weights layer.trainable_variables # Regularization losses will be captured in layer.losses after a call, # just like any other Keras layer reg_losses = layer.losses ``` Regularization losses: Any regularizers specified in the `get_variable` calls or `compat.v1.layer` creations will get captured if they occur in your decorated method and the method belongs to a `tf.keras.Layer`/`tf.keras.Module`. Regularization losses are accessible in `layer.losses` after a call just like in a standard Keras layer, and will be captured by any model that includes this layer. Regularization losses attached to Keras layers/models set as attributes of your layer will also get captured in the standard Keras regularization loss tracking. (While Modules have no `losses` property, no-arg callables to compute the regularization losses may be tracked as dict values in a private `module._tf1_style_var_store._regularizers` property, but only for `tf.compat.v1.layers` and `get_variable` weights and not for any other nested Keras layers/tf.Modules) Variable scope / variable reuse: variable-scope based reuse in your decorated method will be respected, and work like variable-scope based reuse in TF1. Variable Names/Pre-trained checkpoint loading: Variable naming from get_variable and `compat.v1.layer` layers will match the TF1 names, so you should be able to re-use your old name-based checkpoints. Variable naming for Keras layers/models or for variables created by `tf.Variable` may change when going to eager execution. Training Arg if you decorate `layer.call`: Keras will pass a `training` arg to this layer if `call` contains a `training` arg or a `**kwargs` varargs in its call signature, similarly to how keras passes `training` to other layers in TF2 that have similar signatures in their `call` implementations. See more details in the docs on `tf.keras.layers.Layer` to understand what will be passed and when. Note: tf.compat.v1.layers are usually not called with `training=None`, so the training arg to `forward_pass` might not feed through to them unless you pass it to their calls explicitly. Caveats: * TF2 will not prune unused variable updates (or unused outputs). You may need to adjust your forward pass code to avoid computations or variable updates that you don't intend to use. * Avoid Nesting variable creation in tf.function inside of methods decorated with `track_tf1_style_variables` While the method may safely be used from inside a `tf.function`, using a function inside of a decorated method may break the variable scoping. * This decorator only adds implicit tracking for legacy tf1-style get_variable / compat.v1.layers usage. If you would like to use nested Keras layers/models inside the decorated method, you need to assign them as attributes of your layer so that Keras/Module's standard object-oriented weights (and loss tracking for layers) will kick in. See the intro to modules, layers, and models [guide](https://www.tensorflow.org/guide/intro_to_modules) for more info Args: method: The method to decorate. This should belong to a custom tf.Module, tf.keras.layers.Layer, or tf.keras.Model. Returns: The decorated method. """ def _method_wrapper(self, *args, **kwargs): var_store = getattr(self, "_tf1_style_var_store", None) if not var_store: if not isinstance(self, tf.Module): # Raise an error if you incorrectly decorate a method # that is not a method of a Module, Layer, or Model: raise ValueError( "`@tf.compat.v1.keras.utils.track_tf1_layers_and_variables` must " "be applied to a method of a subclassed `tf.Module`, " "`tf.keras.layers.Layer`, or `tf.keras.Model` and which takes " "`self` as the first argument. But, the first argument passed " "to the decorated method was {}, which does not " "extend Module, Layer, or Model.".format(self)) var_store = _EagerVariableStore() self._tf1_style_var_store = var_store # pylint: disable=protected-access existing_regularized_variables = set(var_store._regularizers.keys()) # pylint: disable=protected-access with vs.with_variable_store(var_store): out = method(self, *args, **kwargs) # If this is a layer method, add the regularization losses # to the layer for any newly-created regularized variables if isinstance(self, base_layer.Layer): for var_name, regularizer in var_store._regularizers.items(): # pylint: disable=protected-access if var_name not in existing_regularized_variables: self.add_loss(regularizer) return out return tf.__internal__.decorator.make_decorator( target=method, decorator_func=_method_wrapper) class VariableScopeLayer(base_layer.Layer): """Wrapper Layer to capture `compat.v1.get_variable` and `compat.v1.layers`. This shim layer allows using large sets of TF1 model-forward-pass code as a Keras layer that works in TF2 with TF2 behaviors enabled. It will capture both weights and regularization losses of your forward-pass code. To use it, override this class and put your TF1 model's forward pass inside your implementation for `forward_pass`. (Unlike standard custom Keras layers, do not override `call`.) Below are some examples, and then more details on the functionality of this shim layer to wrap TF1 model forward passes. Example of capturing tf.compat.v1.layer-based modeling code as a Keras layer: ```python class WrappedDoubleDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs): with variable_scope.variable_scope("double_dense_layer"): out = tf.compat.v1.layers.dense( inputs, self.units, name="dense_one", kernel_initializer=tf.compat.v1.random_normal_initializer, kernel_regularizer="l2") out = tf.compat.v1.layers.dense( out, self.units, name="dense_two", kernel_initializer=tf.compat.v1.random_normal_initializer(), kernel_regularizer="l2") return out # Create a layer that can be used as a standard keras layer layer = WrappedDoubleDenseLayer(10) # call the layer on inputs layer(...) # Variables created/used within the scope will be tracked by the layer layer.weights layer.trainable_variables # Regularization losses will be captured in layer.losses after a call, # just like any other Keras layer reg_losses = layer.losses ``` Example of capturing tf.compat.v1.get_variable-based modeling code as a Keras layer: ```python class WrappedDoubleDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs): out = inputs with tf.compat.v1.variable_scope("double_dense_layer"): with tf.compat.v1.variable_scope("dense_one"): # The weights are created with a `regularizer`, # so the layer should track their regularization losses kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=init_ops.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=init_ops.zeros_initializer(), name="bias") out = tf.compat.v1.math.matmul(out, kernel) out = tf.compat.v1.nn.bias_add(out, bias) with tf.compat.v1.variable_scope("dense_two"): kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=init_ops.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=init_ops.zeros_initializer(), name="bias") out = tf.compat.v1.math.matmul(out, kernel) out = tf.compat.v1.nn.bias_add(out, bias) return out # Create a layer that can be used as a standard keras layer layer = WrappedDoubleDenseLayer(10) # call the layer on inputs layer(...) # Variables created/used within the scope will be tracked by the layer layer.weights layer.trainable_variables # Regularization losses will be captured in layer.losses after a call, # just like any other Keras layer reg_losses = layer.losses ``` Regularization losses: Any regularizers specified in the `get_variable` calls or `compat.v1.layer` creations will get captured by this wrapper layer. Regularization losses are accessible in `layer.losses` after a call just like in a standard Keras layer, and will be captured by any model that includes this layer. Regularization losses attached to Keras layers/models set as attributes of your layer will also get captured in the standard Keras regularization loss tracking. Variable scope / variable reuse: variable-scope based reuse in the `forward_pass` will be respected, and work like variable-scope based reuse in TF1. Variable Names/Pre-trained checkpoint loading: Variable naming from get_variable and `compat.v1.layer` layers will match the TF1 names, so you should be able to re-use your old name-based checkpoints. Variable naming for Keras layers/models or for variables created by `tf.Variable` may change when going to eager execution. Training Arg in `forward_pass`: Keras will pass a `training` arg to this layer if `forward_pass` contains a `training` arg or a `**kwargs` varargs in its call signature, similarly to how keras passes `training` to other layers in TF2 that have similar signatures in their `call` implementations. See more details in the docs on `tf.keras.layers.Layer` to understand what will be passed and when. Note: tf.compat.v1.layers are usually not called with `training=None`, so the training arg to `forward_pass` might not feed through to them unless you pass it to their calls explicitly. Call signature of the forward pass: The semantics of the forward pass signature match the standard Keras layer `call` signature, including how Keras decides when to pass in a `training` arg., and the semantics applied to the first positional arg in the call signature. Caveats: * TF2 will not prune unused variable updates (or unused outputs). You may need to adjust your forward pass code to avoid computations or variable updates that you don't intend to use. (E.g. by adding a flag to the `forward_pass` call signature and branching on it). * Avoid Nesting variable creation in tf.function inside of `forward_pass` While the layer may safely be used from inside a `tf.function`, using a function inside of `forward_pass` will break the variable scoping. * If you would like to nest Keras layers/models or other `VariableScopeLayer`s directly in `forward_pass`, you need to assign them as attributes of your layer so that Keras's standard object-oriented weights and loss tracking will kick in. See the intro to modules, layers, and models [guide](https://www.tensorflow.org/guide/intro_to_modules) for more info """ @property @layer_utils.cached_per_instance def _call_full_argspec(self): # Argspec inspection is expensive and the call spec is used often, so it # makes sense to cache the result. return tf_inspect.getfullargspec(self.forward_pass) def forward_pass(self, *args, **kwargs): """Implement this method. It should include your model forward pass.""" raise NotImplementedError @track_tf1_style_variables def call(self, *args, **kwargs): return self.forward_pass(*args, **kwargs)
38,332
40.262648
108
py
keras
keras-master/keras/legacy_tf_layers/__init__.py
0
0
0
py
keras
keras-master/keras/legacy_tf_layers/core_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.core.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import collections import platform from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util from keras import combinations from keras.legacy_tf_layers import core as core_layers from tensorflow.python.ops import variable_scope class DenseTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testDenseProperties(self): dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense') self.assertEqual(dense.units, 2) self.assertEqual(dense.activation, tf.nn.relu) self.assertEqual(dense.kernel_regularizer, None) self.assertEqual(dense.bias_regularizer, None) self.assertEqual(dense.activity_regularizer, None) self.assertEqual(dense.use_bias, True) # Test auto-naming dense = core_layers.Dense(2, activation=tf.nn.relu) dense.apply(tf.random.uniform((5, 2))) self.assertEqual(dense.name, 'dense_1') dense = core_layers.Dense(2, activation=tf.nn.relu) dense.apply(tf.random.uniform((5, 2))) self.assertEqual(dense.name, 'dense_2') @test_util.run_deprecated_v1 def testVariableInput(self): with self.cached_session(): v = tf.compat.v1.get_variable( 'X', initializer=tf.compat.v1.zeros_initializer(), shape=(1, 1)) x = core_layers.Dense(1)(v) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(x, [[0.0]]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testCall(self): dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense') inputs = tf.random.uniform((5, 4), seed=1) outputs = dense(inputs) self.assertListEqual([5, 2], outputs.get_shape().as_list()) self.assertListEqual(dense.variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.trainable_variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.non_trainable_variables, []) if not tf.executing_eagerly(): self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 2) self.assertEqual(dense.kernel.name, 'my_dense/kernel:0') self.assertEqual(dense.bias.name, 'my_dense/bias:0') @test_util.assert_no_new_pyobjects_executing_eagerly def testNoEagerLeak(self): # Tests that repeatedly constructing and building a Layer does not leak # Python objects. inputs = tf.random.uniform((5, 4), seed=1) core_layers.Dense(5)(inputs) core_layers.Dense(2, activation=tf.nn.relu, name='my_dense')(inputs) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testCallTensorDot(self): dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense') inputs = tf.random.uniform((5, 4, 3), seed=1) outputs = dense(inputs) self.assertListEqual([5, 4, 2], outputs.get_shape().as_list()) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoBias(self): dense = core_layers.Dense(2, use_bias=False, name='my_dense') inputs = tf.random.uniform((5, 2), seed=1) _ = dense(inputs) self.assertListEqual(dense.variables, [dense.kernel]) self.assertListEqual(dense.trainable_variables, [dense.kernel]) self.assertListEqual(dense.non_trainable_variables, []) if not tf.executing_eagerly(): self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 1) self.assertEqual(dense.kernel.name, 'my_dense/kernel:0') self.assertEqual(dense.bias, None) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNonTrainable(self): dense = core_layers.Dense(2, trainable=False, name='my_dense') inputs = tf.random.uniform((5, 2), seed=1) _ = dense(inputs) self.assertListEqual(dense.variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.non_trainable_variables, [dense.kernel, dense.bias]) self.assertListEqual(dense.trainable_variables, []) if not tf.executing_eagerly(): self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testOutputShape(self): dense = core_layers.Dense(7, activation=tf.nn.relu, name='my_dense') inputs = tf.random.uniform((5, 3), seed=1) outputs = dense.apply(inputs) self.assertEqual(outputs.get_shape().as_list(), [5, 7]) inputs = tf.random.uniform((5, 2, 3), seed=1) outputs = dense(inputs) self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7]) inputs = tf.random.uniform((1, 2, 4, 3), seed=1) outputs = dense.apply(inputs) self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7]) @test_util.run_deprecated_v1 def testCallOnPlaceHolder(self): inputs = tf.compat.v1.placeholder(dtype=tf.float32) dense = core_layers.Dense(4, name='my_dense') with self.assertRaises(ValueError): dense(inputs) inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None]) dense = core_layers.Dense(4, name='my_dense') with self.assertRaises(ValueError): dense(inputs) inputs = tf.compat.v1.placeholder( dtype=tf.float32, shape=[None, None, None]) dense = core_layers.Dense(4, name='my_dense') with self.assertRaises(ValueError): dense(inputs) inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3]) dense = core_layers.Dense(4, name='my_dense') dense(inputs) inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None, 3]) dense = core_layers.Dense(4, name='my_dense') dense(inputs) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testActivation(self): dense = core_layers.Dense(2, activation=tf.nn.relu, name='dense1') inputs = tf.random.uniform((5, 3), seed=1) outputs = dense(inputs) if not tf.executing_eagerly(): self.assertEqual(outputs.op.name, 'dense1/Relu') dense = core_layers.Dense(2, name='dense2') inputs = tf.random.uniform((5, 3), seed=1) outputs = dense(inputs) if not tf.executing_eagerly(): self.assertEqual(outputs.op.name, 'dense2/BiasAdd') @test_util.run_deprecated_v1 def testActivityRegularizer(self): regularizer = lambda x: tf.reduce_sum(x) * 1e-3 dense = core_layers.Dense( 2, name='my_dense', activity_regularizer=regularizer) inputs = tf.random.uniform((5, 3), seed=1) _ = dense(inputs) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.assertListEqual(dense.losses, loss_keys) @test_util.run_deprecated_v1 def testKernelRegularizer(self): regularizer = lambda x: tf.reduce_sum(x) * 1e-3 dense = core_layers.Dense( 2, name='my_dense', kernel_regularizer=regularizer) inputs = tf.random.uniform((5, 3), seed=1) _ = dense(inputs) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in dense.variables]) self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys)) @test_util.run_deprecated_v1 def testKernelRegularizerWithReuse(self): regularizer = lambda x: tf.reduce_sum(x) * 1e-3 inputs = tf.random.uniform((5, 3), seed=1) _ = core_layers.dense( inputs, 2, name='my_dense', kernel_regularizer=regularizer) self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)), 1) _ = core_layers.dense( inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True) self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)), 1) @test_util.run_deprecated_v1 def testBiasRegularizer(self): regularizer = lambda x: tf.reduce_sum(x) * 1e-3 dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer) inputs = tf.random.uniform((5, 3), seed=1) _ = dense(inputs) loss_keys = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(loss_keys), 1) self.evaluate([v.initializer for v in dense.variables]) self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys)) @test_util.run_deprecated_v1 def testFunctionalDense(self): with self.cached_session(): inputs = tf.random.uniform((5, 3), seed=1) outputs = core_layers.dense( inputs, 2, activation=tf.nn.relu, name='my_dense') self.assertEqual( len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)), 2) self.assertEqual(outputs.op.name, 'my_dense/Relu') @test_util.run_deprecated_v1 def testFunctionalDenseTwice(self): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2) vars1 = _get_variable_dict_from_varstore().values() core_layers.dense(inputs, 2) vars2 = _get_variable_dict_from_varstore().values() self.assertEqual(len(vars1), 2) self.assertEqual(len(vars2), 4) # TODO(alive): get this to work in eager mode. def testFunctionalDenseTwiceReuse(self): with self.cached_session(): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name='my_dense') vars1 = tf.compat.v1.trainable_variables() core_layers.dense(inputs, 2, name='my_dense', reuse=True) vars2 = tf.compat.v1.trainable_variables() self.assertEqual(vars1, vars2) # TODO(alive): get this to work in eager mode. def testFunctionalDenseTwiceReuseFromScope(self): with self.cached_session(): with tf.compat.v1.variable_scope('scope'): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name='my_dense') vars1 = tf.compat.v1.trainable_variables() with tf.compat.v1.variable_scope('scope', reuse=True): core_layers.dense(inputs, 2, name='my_dense') vars2 = tf.compat.v1.trainable_variables() self.assertEqual(vars1, vars2) @test_util.run_deprecated_v1 def testFunctionalDenseInitializerFromScope(self): with tf.compat.v1.variable_scope( 'scope', initializer=tf.compat.v1.ones_initializer()), self.cached_session(): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2) self.evaluate(tf.compat.v1.global_variables_initializer()) weights = _get_variable_dict_from_varstore() self.assertEqual(len(weights), 2) # Check that the matrix weights got initialized to ones (from scope). self.assertAllClose(weights['scope/dense/kernel'].read_value(), np.ones((3, 2))) # Check that the bias still got initialized to zeros. self.assertAllClose(weights['scope/dense/bias'].read_value(), np.zeros( (2))) def testFunctionalDenseWithCustomGetter(self): called = [0] def custom_getter(getter, *args, **kwargs): called[0] += 1 return getter(*args, **kwargs) with tf.compat.v1.variable_scope('test', custom_getter=custom_getter): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2) self.assertEqual(called[0], 2) @test_util.run_deprecated_v1 def testFunctionalDenseInScope(self): with self.cached_session(): with tf.compat.v1.variable_scope('test'): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name='my_dense') var_dict = _get_variable_dict_from_varstore() var_key = 'test/my_dense/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) with tf.compat.v1.variable_scope('test1') as scope: inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2, name=scope) var_dict = _get_variable_dict_from_varstore() var_key = 'test1/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) with tf.compat.v1.variable_scope('test2'): inputs = tf.random.uniform((5, 3), seed=1) core_layers.dense(inputs, 2) var_dict = _get_variable_dict_from_varstore() var_key = 'test2/dense/kernel' self.assertEqual(var_dict[var_key].name, '%s:0' % var_key) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testComputeOutputShape(self): dense = core_layers.Dense(2, activation=tf.nn.relu, name='dense1') ts = tf.TensorShape # pylint: disable=protected-access with self.assertRaises(ValueError): dense.compute_output_shape(ts(None)) with self.assertRaises(ValueError): dense.compute_output_shape(ts([])) with self.assertRaises(ValueError): dense.compute_output_shape(ts([1])) self.assertEqual( [None, 2], dense.compute_output_shape((None, 3)).as_list()) self.assertEqual( [None, 2], dense.compute_output_shape(ts([None, 3])).as_list()) self.assertEqual( [None, 4, 2], dense.compute_output_shape(ts([None, 4, 3])).as_list()) # pylint: enable=protected-access @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testConstraints(self): k_constraint = lambda x: x / tf.reduce_sum(x) b_constraint = lambda x: x / tf.reduce_max(x) dense = core_layers.Dense(2, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = tf.random.uniform((5, 3), seed=1) dense(inputs) self.assertEqual(dense.kernel_constraint, k_constraint) self.assertEqual(dense.bias_constraint, b_constraint) def _get_variable_dict_from_varstore(): var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access sorted_var_dict = collections.OrderedDict( sorted(var_dict.items(), key=lambda t: t[0])) return sorted_var_dict class DropoutTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testDropoutProperties(self): dp = core_layers.Dropout(0.5, name='dropout') self.assertEqual(dp.rate, 0.5) self.assertEqual(dp.noise_shape, None) dp.apply(tf.ones(())) self.assertEqual(dp.name, 'dropout') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testBooleanLearningPhase(self): dp = core_layers.Dropout(0.5) inputs = tf.ones((5, 3)) dropped = dp.apply(inputs, training=True) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) dropped = dp.apply(inputs, training=False) np_output = self.evaluate(dropped) self.assertAllClose(np.ones((5, 3)), np_output) @test_util.run_deprecated_v1 def testDynamicLearningPhase(self): with self.cached_session() as sess: dp = core_layers.Dropout(0.5, seed=1) inputs = tf.ones((5, 5)) training = tf.compat.v1.placeholder(dtype='bool') dropped = dp.apply(inputs, training=training) self.evaluate(tf.compat.v1.global_variables_initializer()) np_output = sess.run(dropped, feed_dict={training: True}) self.assertAlmostEqual(0., np_output.min()) np_output = sess.run(dropped, feed_dict={training: False}) self.assertAllClose(np.ones((5, 5)), np_output) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testDynamicNoiseShape(self): inputs = tf.ones((5, 3, 2)) noise_shape = [None, 1, None] dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1) dropped = dp.apply(inputs, training=True) self.evaluate(tf.compat.v1.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :]) def testCustomNoiseShape(self): inputs = tf.ones((5, 3, 2)) noise_shape = [5, 1, 2] dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1) dropped = dp.apply(inputs, training=True) self.evaluate(tf.compat.v1.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :]) @test_util.run_deprecated_v1 def testFunctionalDropout(self): with self.cached_session(): inputs = tf.ones((5, 5)) dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1) self.evaluate(tf.compat.v1.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1) np_output = self.evaluate(dropped) self.assertAllClose(np.ones((5, 5)), np_output) @test_util.run_deprecated_v1 def testDynamicRate(self): with self.cached_session() as sess: rate = tf.compat.v1.placeholder(dtype='float32', name='rate') dp = core_layers.Dropout(rate, name='dropout') inputs = tf.ones((5, 5)) dropped = dp.apply(inputs, training=True) self.evaluate(tf.compat.v1.global_variables_initializer()) np_output = sess.run(dropped, feed_dict={rate: 0.5}) self.assertAlmostEqual(0., np_output.min()) np_output = sess.run(dropped, feed_dict={rate: 0.0}) self.assertAllClose(np.ones((5, 5)), np_output) class FlattenTest(tf.test.TestCase): @test_util.run_deprecated_v1 def testCreateFlatten(self): with self.cached_session() as sess: x = tf.compat.v1.placeholder(shape=(None, 2, 3), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))}) self.assertEqual(list(np_output.shape), [3, 6]) self.assertEqual(y.get_shape().as_list(), [None, 6]) x = tf.compat.v1.placeholder(shape=(1, 2, 3, 2), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))}) self.assertEqual(list(np_output.shape), [1, 12]) self.assertEqual(y.get_shape().as_list(), [1, 12]) def testComputeShape(self): shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2)) self.assertEqual(shape.as_list(), [1, 12]) shape = core_layers.Flatten().compute_output_shape((None, 3, 2)) self.assertEqual(shape.as_list(), [None, 6]) shape = core_layers.Flatten().compute_output_shape((None, 3, None)) self.assertEqual(shape.as_list(), [None, None]) @test_util.run_deprecated_v1 def testDataFormat5d(self): np_input_channels_last = np.arange( 120, dtype='float32').reshape([1, 5, 4, 3, 2]) with self.test_session() as sess: x = tf.compat.v1.placeholder(shape=(1, 5, 4, 3, 2), dtype='float32') y = core_layers.Flatten(data_format='channels_last')(x) np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last}) x = tf.compat.v1.placeholder(shape=(1, 2, 5, 4, 3), dtype='float32') y = core_layers.Flatten(data_format='channels_first')(x) np_input_channels_first = np.transpose(np_input_channels_last, [0, 4, 1, 2, 3]) np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first}) self.assertAllEqual(np_output_cl, np_output_cf) @test_util.run_deprecated_v1 def testDataFormat4d(self): np_input_channels_last = np.arange( 24, dtype='float32').reshape([1, 4, 3, 2]) with self.test_session() as sess: x = tf.compat.v1.placeholder(shape=(1, 4, 3, 2), dtype='float32') y = core_layers.Flatten(data_format='channels_last')(x) np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last}) x = tf.compat.v1.placeholder(shape=(1, 2, 4, 3), dtype='float32') y = core_layers.Flatten(data_format='channels_first')(x) np_input_channels_first = np.transpose(np_input_channels_last, [0, 3, 1, 2]) np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first}) self.assertAllEqual(np_output_cl, np_output_cf) @test_util.run_deprecated_v1 def testFunctionalFlatten(self): x = tf.compat.v1.placeholder(shape=(None, 2, 3), dtype='float32') y = core_layers.flatten(x, name='flatten') self.assertEqual(y.get_shape().as_list(), [None, 6]) @test_util.run_deprecated_v1 def testFlatten0D(self): x = tf.compat.v1.placeholder(shape=(None,), dtype='float32') y = core_layers.Flatten()(x) with self.cached_session() as sess: np_output = sess.run(y, feed_dict={x: np.zeros((5,))}) self.assertEqual(list(np_output.shape), [5, 1]) self.assertEqual(y.shape.as_list(), [None, 1]) @test_util.run_deprecated_v1 def testFlattenUnknownAxes(self): with self.cached_session() as sess: x = tf.compat.v1.placeholder(shape=(5, None, None), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))}) self.assertEqual(list(np_output.shape), [5, 6]) self.assertEqual(y.get_shape().as_list(), [5, None]) x = tf.compat.v1.placeholder(shape=(5, None, 2), dtype='float32') y = core_layers.Flatten()(x) np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))}) self.assertEqual(list(np_output.shape), [5, 6]) self.assertEqual(y.get_shape().as_list(), [5, None]) @test_util.run_deprecated_v1 def testFlattenLargeDim(self): if any(platform.win32_ver()): self.skipTest('values are truncated on windows causing test failures') x = tf.compat.v1.placeholder(shape=(None, 21316, 21316, 80), dtype='float32') y = core_layers.Flatten()(x) self.assertEqual(y.shape.as_list(), [None, 21316 * 21316 * 80]) @test_util.run_deprecated_v1 def testFlattenLargeBatchDim(self): batch_size = np.iinfo(np.int32).max + 10 x = tf.compat.v1.placeholder( shape=(batch_size, None, None, 1), dtype='float32') y = core_layers.Flatten()(x) self.assertEqual(y.shape.as_list(), [batch_size, None]) if __name__ == '__main__': tf.test.main()
23,137
40.765343
99
py
keras
keras-master/keras/legacy_tf_layers/normalization.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=g-classes-have-attributes """Contains the normalization layer classes and their functional aliases.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import warnings from keras.layers.normalization import batch_normalization_v1 from keras.legacy_tf_layers import base from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export @keras_export(v1=['keras.__internal__.legacy.layers.BatchNormalization']) @tf_export(v1=['layers.BatchNormalization']) class BatchNormalization(batch_normalization_v1.BatchNormalization, base.Layer): """Batch Normalization layer from (Ioffe et al., 2015). Keras APIs handle BatchNormalization updates to the moving_mean and moving_variance as part of their `fit()` and `evaluate()` loops. However, if a custom training loop is used with an instance of `Model`, these updates need to be explicitly included. Here's a simple example of how it can be done: ```python # model is an instance of Model that contains BatchNormalization layer. update_ops = model.get_updates_for(None) + model.get_updates_for(features) train_op = optimizer.minimize(loss) train_op = tf.group([train_op, update_ops]) ``` Args: axis: An `int` or list of `int`, the axis or axes that should be normalized, typically the features axis/axes. For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a list of axes is provided, each axis in `axis` will be normalized simultaneously. Default is `-1` which uses the last axis. Note: when using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and `moving_variance` variables are the same rank as the input Tensor, with dimension size 1 in all reduced (non-axis) dimensions). momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling can be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: An optional projection function to be applied to the `beta` weight after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. gamma_constraint: An optional projection function to be applied to the `gamma` weight after being updated by an `Optimizer`. renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform "Ghost Batch Normalization", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. name: A string, the name of the layer. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf)) Batch Renormalization - Towards Reducing Minibatch Dependence in Batch-Normalized Models: [Ioffe, 2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models) ([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf)) @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.BatchNormalization`. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python bn = tf.compat.v1.layers.BatchNormalization() ``` After: ```python bn = tf.keras.layers.BatchNormalization() ``` #### How to Map Arguments TF1 Arg Name | TF2 Arg Name | Note :------------------------ | :------------------------ | :--------------- `name` | `name` | Layer base class `trainable` | `trainable` | Layer base class `axis` | `axis` | - `momentum` | `momentum` | - `epsilon` | `epsilon` | - `center` | `center` | - `scale` | `scale` | - `beta_initializer` | `beta_initializer` | - `gamma_initializer` | `gamma_initializer` | - `moving_mean_initializer` | `moving_mean_initializer` | - `beta_regularizer` | `beta_regularizer' | - `gamma_regularizer` | `gamma_regularizer' | - `beta_constraint` | `beta_constraint' | - `gamma_constraint` | `gamma_constraint' | - `renorm` | Not supported | - `renorm_clipping` | Not supported | - `renorm_momentum` | Not supported | - `fused` | Not supported | - `virtual_batch_size` | Not supported | - `adjustment` | Not supported | - @end_compatibility """ def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer=tf.compat.v1.zeros_initializer(), gamma_initializer=tf.compat.v1.ones_initializer(), moving_mean_initializer=tf.compat.v1.zeros_initializer(), moving_variance_initializer=tf.compat.v1.ones_initializer(), beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, trainable=True, virtual_batch_size=None, adjustment=None, name=None, **kwargs): super(BatchNormalization, self).__init__( axis=axis, momentum=momentum, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, beta_constraint=beta_constraint, gamma_constraint=gamma_constraint, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_momentum, fused=fused, trainable=trainable, virtual_batch_size=virtual_batch_size, adjustment=adjustment, name=name, **kwargs) def call(self, inputs, training=False): return super(BatchNormalization, self).call(inputs, training=training) @keras_export(v1=['keras.__internal__.legacy.layers.batch_normalization']) @tf_export(v1=['layers.batch_normalization']) def batch_normalization(inputs, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer=tf.compat.v1.zeros_initializer(), gamma_initializer=tf.compat.v1.ones_initializer(), moving_mean_initializer=tf.compat.v1.zeros_initializer(), moving_variance_initializer=tf.compat.v1.ones_initializer(), beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, training=False, trainable=True, name=None, reuse=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, virtual_batch_size=None, adjustment=None): """Functional interface for the batch normalization layer from_config(Ioffe et al., 2015). Note: when training, the moving_mean and moving_variance need to be updated. By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they need to be executed alongside the `train_op`. Also, be sure to add any batch_normalization ops before getting the update_ops collection. Otherwise, update_ops will be empty, and training/inference will not work properly. For example: ```python x_norm = tf.compat.v1.layers.batch_normalization(x, training=training) # ... update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = optimizer.minimize(loss) train_op = tf.group([train_op, update_ops]) ``` Args: inputs: Tensor input. axis: An `int`, the axis that should be normalized (typically the features axis). For instance, after a `Convolution2D` layer with `data_format="channels_first"`, set `axis=1` in `BatchNormalization`. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling can be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: An optional projection function to be applied to the `beta` weight after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. gamma_constraint: An optional projection function to be applied to the `gamma` weight after being updated by an `Optimizer`. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (normalized with statistics of the current batch) or in inference mode (normalized with moving statistics). **NOTE**: make sure to set this parameter correctly, or else your training/inference will not work properly. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation. virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform "Ghost Batch Normalization", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf)) Batch Renormalization - Towards Reducing Minibatch Dependence in Batch-Normalized Models: [Ioffe, 2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models) ([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf)) @compatibility(TF2) This API is not compatible with eager execution or `tf.function`. Please refer to [tf.layers section of the migration guide] (https://www.tensorflow.org/guide/migrate#models_based_on_tflayers) to migrate a TensorFlow v1 model to Keras. The corresponding TensorFlow v2 layer is `tf.keras.layers.BatchNormalization`. The batch updating pattern with `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` should not be used in native TF2. Consult the `tf.keras.layers.BatchNormalization` documentation for further information. #### Structural Mapping to Native TF2 None of the supported arguments have changed name. Before: ```python x_norm = tf.compat.v1.layers.batch_normalization(x) ``` After: To migrate code using TF1 functional layers use the [Keras Functional API] (https://www.tensorflow.org/guide/keras/functional): ```python x = tf.keras.Input(shape=(28, 28, 1),) y = tf.keras.layers.BatchNormalization()(x) model = tf.keras.Model(x, y) ``` #### How to Map Arguments TF1 Arg Name | TF2 Arg Name | Note :------------------------ | :------------------------ | :--------------- `name` | `name` | Layer base class `trainable` | `trainable` | Layer base class `axis` | `axis` | - `momentum` | `momentum` | - `epsilon` | `epsilon` | - `center` | `center` | - `scale` | `scale` | - `beta_initializer` | `beta_initializer` | - `gamma_initializer` | `gamma_initializer` | - `moving_mean_initializer` | `moving_mean_initializer` | - `beta_regularizer` | `beta_regularizer' | - `gamma_regularizer` | `gamma_regularizer' | - `beta_constraint` | `beta_constraint' | - `gamma_constraint` | `gamma_constraint' | - `renorm` | Not supported | - `renorm_clipping` | Not supported | - `renorm_momentum` | Not supported | - `fused` | Not supported | - `virtual_batch_size` | Not supported | - `adjustment` | Not supported | - @end_compatibility """ warnings.warn( '`tf.layers.batch_normalization` is deprecated and ' 'will be removed in a future version. ' 'Please use `tf.keras.layers.BatchNormalization` instead. ' 'In particular, `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` ' 'should not be used (consult the `tf.keras.layers.BatchNormalization` ' 'documentation).') layer = BatchNormalization( axis=axis, momentum=momentum, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, beta_constraint=beta_constraint, gamma_constraint=gamma_constraint, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_momentum, fused=fused, trainable=trainable, virtual_batch_size=virtual_batch_size, adjustment=adjustment, name=name, _reuse=reuse, _scope=name) return layer.apply(inputs, training=training) # Aliases BatchNorm = BatchNormalization batch_norm = batch_normalization
21,872
46.446855
138
py
keras
keras-master/keras/legacy_tf_layers/variable_scope_shim_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for variable store.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf import gc import threading from absl.testing import parameterized import numpy from tensorflow.python.framework import test_util from keras import combinations from keras import regularizers from keras.engine import input_layer as input_layer_module from keras.engine import training as training_module from keras.layers import core from keras.legacy_tf_layers import core as core_layers from keras.legacy_tf_layers import variable_scope_shim from tensorflow.python.ops import variable_scope def run_inside_wrap_function_in_eager_mode(graph_function): """Decorator to execute the same graph code in eager and graph modes. In graph mode, we just execute the graph_function passed as argument. In eager mode, we wrap the function using wrap_function and then execute the wrapped result. Args: graph_function: python function containing graph code to be wrapped Returns: decorated function """ def wrap_and_execute(self): store = variable_scope_shim._EagerVariableStore() with variable_scope.with_variable_store(store): # use the original function graph_function(self) return wrap_and_execute class VariableScopeTest(tf.test.TestCase): def tearDown(self): gc.collect() # This will only contain uncollectable garbage, i.e. reference cycles # involving objects with __del__ defined. self.assertEqual(0, len(gc.garbage)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVar(self): vs = variable_scope._get_default_variable_store() v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) self.assertIs(v, v1) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNameExists(self): vs = variable_scope._get_default_variable_store() # No check by default, so we can both create and get existing names. v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) self.assertIs(v, v1) self.assertIsNot(v, vs.get_variable("u", [1], reuse=False)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNamelessStore(self): vs = variable_scope._get_default_variable_store() vs.get_variable("v1", [2]) vs.get_variable("v2", [2]) expected_names = ["%s:0" % name for name in ["v1", "v2"]] self.assertEqual( set(expected_names), set(v.name for v in vs._vars.values())) # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Expected tf.group() expected Tensor arguments not 'None' with # type '<type 'NoneType'>' @test_util.run_in_graph_and_eager_modes def testVarScopeInitializer(self): init = tf.compat.v1.constant_initializer(0.3) with tf.compat.v1.variable_scope("tower0") as tower: with tf.compat.v1.variable_scope("foo", initializer=init): v = tf.compat.v1.get_variable("v", []) self.evaluate(tf.compat.v1.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 0.3) with tf.compat.v1.variable_scope(tower, initializer=init): w = tf.compat.v1.get_variable("w", []) self.evaluate(tf.compat.v1.variables_initializer([w])) self.assertAllClose(self.evaluate(w.value()), 0.3) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeConstraint(self): constraint = lambda x: 0. * x with tf.compat.v1.variable_scope("tower1") as tower: with tf.compat.v1.variable_scope("foo", constraint=constraint): v = tf.compat.v1.get_variable("v", []) self.assertIsNotNone(v.constraint) with tf.compat.v1.variable_scope(tower, constraint=constraint): w = tf.compat.v1.get_variable("w", []) self.assertIsNotNone(w.constraint) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeDType(self): with tf.compat.v1.variable_scope("tower2") as tower: with tf.compat.v1.variable_scope("foo", dtype=tf.float16): v = tf.compat.v1.get_variable("v", []) self.assertEqual(v.dtype.base_dtype, tf.float16) with tf.compat.v1.variable_scope(tower, dtype=tf.float16): w = tf.compat.v1.get_variable("w", []) self.assertEqual(w.dtype.base_dtype, tf.float16) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testInitFromNonTensorValue(self): v = tf.compat.v1.get_variable("v4", initializer=4, dtype=tf.int32) self.evaluate(tf.compat.v1.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 4) w = tf.compat.v1.get_variable( "w4", initializer=numpy.array([1, 2, 3]), dtype=tf.int64) self.evaluate(tf.compat.v1.variables_initializer([w])) self.assertAllClose(self.evaluate(w.value()), [1, 2, 3]) # A quirk to be revisited? error = ValueError if tf.executing_eagerly() else TypeError with self.assertRaises(error): tf.compat.v1.get_variable("x4", initializer={}) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testInitFromNonInitializer(self): # Test various dtypes with zeros initializer as following: types = [ tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32, tf.int64, tf.bool ] # Use different variable_name to distinguish various dtypes for (i, dtype) in enumerate(types): x = tf.compat.v1.get_variable( name="xx%d" % i, shape=(3, 4), dtype=dtype) y = tf.compat.v1.get_variable( name="yy%d" % i, shape=(3, 4), dtype=dtype, initializer=tf.compat.v1.zeros_initializer(dtype=dtype)) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value())) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeRegularizer(self): init = tf.compat.v1.constant_initializer(0.3) def regularizer1(v): return tf.reduce_mean(v) + 0.1 def regularizer2(v): return tf.reduce_mean(v) + 0.2 with tf.compat.v1.variable_scope( "tower3", regularizer=regularizer1) as tower: with tf.compat.v1.variable_scope("foo", initializer=init): v = tf.compat.v1.get_variable("v", []) self.evaluate(tf.compat.v1.variables_initializer([v])) with tf.compat.v1.variable_scope(tower, initializer=init) as vs: tf.compat.v1.get_variable("u", []) vs.set_regularizer(regularizer2) tf.compat.v1.get_variable("w", []) # Next 3 variable not regularized to test disabling regularization. tf.compat.v1.get_variable( "x", [], regularizer=tf.compat.v1.no_regularizer) with tf.compat.v1.variable_scope( "baz", regularizer=tf.compat.v1.no_regularizer): tf.compat.v1.get_variable("y", []) vs.set_regularizer(tf.compat.v1.no_regularizer) tf.compat.v1.get_variable("z", []) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testInitializeFromValue(self): init = tf.constant(0.1) w = tf.compat.v1.get_variable("v", initializer=init) self.evaluate(tf.compat.v1.variables_initializer([w])) self.assertAllClose(self.evaluate(w.value()), 0.1) with self.assertRaisesRegex(ValueError, "shape"): # We disallow explicit shape specification when initializer is constant. tf.compat.v1.get_variable("u", [1], initializer=init) with tf.compat.v1.variable_scope("foo", initializer=init): # Constant initializer can be passed through scopes if needed. v = tf.compat.v1.get_variable("v") self.evaluate(tf.compat.v1.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 0.1) # Check that non-float32 initializer creates a non-float32 variable. init = tf.constant(1, dtype=tf.int32) t = tf.compat.v1.get_variable("t", initializer=init) self.assertEqual(t.dtype.base_dtype, tf.int32) # Raise error if `initializer` dtype and `dtype` are not identical. with self.assertRaisesRegex(ValueError, "don't match"): tf.compat.v1.get_variable("s", initializer=init, dtype=tf.float64) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeGetOrCreateReuse(self): with self.cached_session(): def test_value(value): x = tf.constant(value) with tf.compat.v1.variable_scope( "testVarScopeGetOrCreateReuse_bar", reuse=tf.compat.v1.AUTO_REUSE): _ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x) with tf.compat.v1.variable_scope( "testVarScopeGetOrCreateReuse_bar", reuse=tf.compat.v1.AUTO_REUSE): _ = tf.compat.v1.get_variable("var", []) self.assertEqual(value, self.evaluate(x)) test_value(42.) # Variable is created. test_value(13.) # Variable is reused hereafter. test_value(17.) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeGetOrCreateReuseIgnoreFalse(self): with self.cached_session(): def test_value(value): x = tf.constant(value) with tf.compat.v1.variable_scope( "testVarScopeGetOrCreateReuse_bar", reuse=False): _ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x) # We need to ignore reuse=False in the shim, because the # code is expected to get rerun each time the user calls the shim. with tf.compat.v1.variable_scope( "testVarScopeGetOrCreateReuse_bar", reuse=False): _ = tf.compat.v1.get_variable("var", []) self.assertEqual(value, self.evaluate(x)) test_value(42.) # Variable is created. test_value(13.) # Variable is reused hereafter. test_value(17.) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScope(self): with self.cached_session(): with tf.name_scope("testVarOpScope1"): with tf.compat.v1.variable_scope("tower", "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "tower/w:0") with tf.name_scope("testVarOpScope2"): with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "default/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "default_1/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self): with self.cached_session(): with tf.compat.v1.variable_scope(None, "defaultScope1"): with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "defaultScope1/layer/w:0") with tf.compat.v1.variable_scope(None, "defaultScope1"): with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "defaultScope1_1/layer/w:0") with tf.compat.v1.variable_scope(None, "defaultScope"): with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "defaultScope/layer/w:0") with tf.compat.v1.variable_scope(None, "defaultScope1"): with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "defaultScope1_2/layer/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeUniqueNamesWithJump(self): with self.cached_session(): with tf.compat.v1.variable_scope("default") as default: with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "default/layer/w:0") with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "default/layer_1/w:0") with tf.compat.v1.variable_scope(default): pass # No matter the jump in the middle, unique numbering continues. with tf.compat.v1.variable_scope(None, "layer"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "default/layer_2/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeReuse(self): with self.cached_session(): with tf.compat.v1.variable_scope("outer") as outer: with tf.compat.v1.variable_scope("tower", "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") with tf.compat.v1.variable_scope(outer, reuse=True) as outer: with tf.compat.v1.variable_scope("tower", "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeGetVar(self): with self.cached_session(): with tf.compat.v1.variable_scope("root"): with tf.compat.v1.variable_scope("towerA") as tower_a: va = tf.compat.v1.get_variable("v", [1]) self.assertEqual(va.name, "root/towerA/v:0") with tf.compat.v1.variable_scope(tower_a, reuse=True): va2 = tf.compat.v1.get_variable("v", [1]) self.assertIs(va2, va) with tf.compat.v1.variable_scope("towerB"): vb = tf.compat.v1.get_variable("v", [1]) self.assertEqual(vb.name, "root/towerB/v:0") with tf.compat.v1.variable_scope("towerA", reuse=True): va2 = tf.compat.v1.get_variable("v", [1]) self.assertIs(va2, va) with tf.compat.v1.variable_scope("foo"): with tf.compat.v1.variable_scope("bar"): v = tf.compat.v1.get_variable("v", [1]) self.assertEqual(v.name, "root/foo/bar/v:0") with tf.compat.v1.variable_scope(tower_a, reuse=True): va3 = tf.compat.v1.get_variable("v", [1]) self.assertIs(va, va3) with self.assertRaises(ValueError) as exc: with tf.compat.v1.variable_scope(tower_a, reuse=True): tf.compat.v1.get_variable("v", [2]) # Different shape. self.assertEqual("shape" in str(exc.exception), True) with self.assertRaises(ValueError) as exc: with tf.compat.v1.variable_scope(tower_a, reuse=True): tf.compat.v1.get_variable("v", [1], dtype=tf.int32) self.assertEqual("dtype" in str(exc.exception), True) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeOuterScope(self): with self.cached_session(): with tf.compat.v1.variable_scope("outer") as outer: pass with tf.compat.v1.variable_scope(outer): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope("default"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") with tf.compat.v1.variable_scope(outer, reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope("default", reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeNestedOuterScope(self): with self.cached_session(): with tf.compat.v1.variable_scope("outer") as outer: with tf.compat.v1.variable_scope(outer): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope("default"): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") with tf.compat.v1.variable_scope(outer, reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope("default", reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeReuseParam(self): with self.cached_session(): with tf.compat.v1.variable_scope("outer") as outer: with tf.compat.v1.variable_scope("tower", "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") with tf.compat.v1.variable_scope(outer) as outer: with tf.compat.v1.variable_scope("tower", "default", reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0") outer.reuse_variables() with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeReuseError(self): with self.cached_session(): with self.assertRaises(ValueError): with tf.compat.v1.variable_scope(None, "default", reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeOuterScope(self): with self.cached_session(): with tf.compat.v1.variable_scope("outer") as outer: pass with tf.compat.v1.variable_scope(outer, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") with tf.compat.v1.variable_scope(outer, "default", reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") outer.reuse_variables() with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeNestedOuterScope(self): with self.cached_session(): with tf.compat.v1.variable_scope("outer") as outer: with tf.compat.v1.variable_scope(outer, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") with tf.compat.v1.variable_scope(outer, "default", reuse=True): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/w:0") with tf.compat.v1.variable_scope(None, "default", []): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testBasicWhenAuxiliaryNameScopeIsFalse(self): with self.cached_session(): with tf.compat.v1.variable_scope( "scope", auxiliary_name_scope=False) as scope: self.assertEqual( tf.compat.v1.get_variable("w", []).name, "scope/w:0") with tf.compat.v1.variable_scope(scope, auxiliary_name_scope=False): self.assertEqual( tf.compat.v1.get_variable("w1", []).name, "scope/w1:0") with tf.compat.v1.variable_scope("outer"): with tf.compat.v1.variable_scope( "inner", auxiliary_name_scope=False) as inner: self.assertEqual(inner.original_name_scope, "outer/") self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0") with tf.compat.v1.variable_scope( inner, auxiliary_name_scope=False) as inner1: self.assertEqual(inner1.original_name_scope, "outer/") self.assertEqual( tf.compat.v1.get_variable("w1", []).name, "outer/inner/w1:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self): with self.cached_session(): with tf.compat.v1.variable_scope( None, default_name="default", auxiliary_name_scope=False): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "default/w:0") with tf.compat.v1.variable_scope("outer"): with tf.compat.v1.variable_scope( None, default_name="default", auxiliary_name_scope=False) as inner: self.assertEqual(inner.original_name_scope, "outer/") self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/default/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self): with self.cached_session(): root_scope = tf.compat.v1.get_variable_scope() with tf.compat.v1.variable_scope( root_scope, auxiliary_name_scope=False): self.assertEqual(tf.compat.v1.get_variable("w", []).name, "w:0") with tf.compat.v1.variable_scope("outer"): with tf.compat.v1.variable_scope( root_scope, auxiliary_name_scope=False) as inner: self.assertEqual(inner.original_name_scope, "") self.assertEqual(tf.compat.v1.get_variable("w1", []).name, "w1:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testAuxiliaryNameScopeIsInvalid(self): with self.cached_session(): with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"): with tf.compat.v1.variable_scope( None, default_name="scope", auxiliary_name_scope="invalid"): pass with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"): with tf.compat.v1.variable_scope( "scope", auxiliary_name_scope="invalid"): pass with tf.compat.v1.variable_scope("scope") as scope: pass with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"): with tf.compat.v1.variable_scope( scope, auxiliary_name_scope="invalid"): pass @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReuseScopeWithoutNameScopeCollision(self): # Github issue: #13429 with self.cached_session(): with tf.compat.v1.variable_scope("outer"): with tf.compat.v1.variable_scope("inner") as inner: pass with tf.compat.v1.variable_scope( inner, auxiliary_name_scope=False) as scope: with tf.name_scope(scope.original_name_scope): self.assertEqual( tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0") with tf.compat.v1.variable_scope("another"): with tf.compat.v1.variable_scope( inner, auxiliary_name_scope=False) as scope1: with tf.name_scope(scope1.original_name_scope): self.assertEqual( tf.compat.v1.get_variable("w1", []).name, "outer/inner/w1:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVarWithDevice(self): g = tf.Graph() varname_type = [] def device_func(op): if op.type in ["Variable", "VariableV2", "VarHandleOp"]: varname_type.append((op.name, op.get_attr("dtype"))) return "/device:GPU:0" with g.as_default(): with tf.compat.v1.device(device_func): _ = tf.compat.v1.get_variable("x", (100, 200)) _ = tf.compat.v1.get_variable( "y", dtype=tf.int64, initializer=numpy.arange(73)) self.assertEqual(varname_type[0], ("x", tf.float32)) self.assertEqual(varname_type[1], ("y", tf.int64)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithRefDtype(self): v = tf.compat.v1.get_variable("v", shape=[3, 4], dtype=tf.float32) # Ensure it is possible to do get_variable with a _ref dtype passed in. _ = tf.compat.v1.get_variable("w", shape=[5, 6], dtype=v.dtype) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithInitializerWhichTakesNoArgs(self): v = tf.compat.v1.get_variable("foo", initializer=lambda: [2]) self.assertEqual(v.name, "foo:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithInitializerWhichTakesOptionalArgs(self): v = tf.compat.v1.get_variable("foo", initializer=lambda x=True: [2]) self.assertEqual(v.name, "foo:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testTwoGraphs(self): def f(): g1 = tf.Graph() g2 = tf.Graph() with g1.as_default(): with g2.as_default(): with tf.compat.v1.variable_scope("_"): pass self.assertRaisesRegex(ValueError, "'_' is not a valid scope name", f) class VariableScopeWithCustomGetterTest(tf.test.TestCase): @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNonCallableGetterFails(self): with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"): with tf.compat.v1.variable_scope("scope0", custom_getter=3): tf.compat.v1.get_variable("name0") with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"): tf.compat.v1.get_variable("name0", custom_getter=3) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNoSideEffectsWithIdentityCustomGetter(self): called = [0] def custom_getter(getter, *args, **kwargs): called[0] += 1 return getter(*args, **kwargs) with tf.compat.v1.variable_scope( "scope", custom_getter=custom_getter) as scope: v = tf.compat.v1.get_variable("v", [1]) with tf.compat.v1.variable_scope(scope, reuse=True): v2 = tf.compat.v1.get_variable("v", [1]) with tf.compat.v1.variable_scope("new_scope") as new_scope: v3 = tf.compat.v1.get_variable("v3", [1]) with tf.compat.v1.variable_scope( new_scope, reuse=True, custom_getter=custom_getter): v4 = tf.compat.v1.get_variable("v3", [1]) self.assertIs(v, v2) self.assertIs(v3, v4) self.assertEqual(3, called[0]) # skipped one in the first new_scope @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testSynchronizationAndAggregationWithCustomGetter(self): called = [0] synchronization = tf.VariableSynchronization.AUTO aggregation = tf.compat.v1.VariableAggregation.NONE def custom_getter(getter, *args, **kwargs): called[0] += 1 # Verify synchronization and aggregation kwargs are as expected. self.assertEqual(kwargs["synchronization"], synchronization) self.assertEqual(kwargs["aggregation"], aggregation) return getter(*args, **kwargs) with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter): tf.compat.v1.get_variable("v", [1]) self.assertEqual(1, called[0]) with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter): synchronization = tf.VariableSynchronization.ON_READ aggregation = tf.compat.v1.VariableAggregation.MEAN tf.compat.v1.get_variable( "v1", [1], synchronization=synchronization, aggregation=aggregation) self.assertEqual(2, called[0]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVariableCreator(self): variable_names = [] def creator_a(next_creator, **kwargs): variable_names.append(kwargs.get("name", "")) return next_creator(**kwargs) def creator_b(next_creator, **kwargs): kwargs["name"] = "forced_name" return next_creator(**kwargs) with tf.variable_creator_scope(creator_a): with tf.variable_creator_scope(creator_b): tf.compat.v1.Variable(1.0, name="one_name") self.assertEqual(variable_names[0], "forced_name") called = [False] def creater_c(next_creator, **kwargs): called[0] = True self.assertEqual(kwargs["synchronization"], tf.VariableSynchronization.ON_WRITE) self.assertEqual(kwargs["aggregation"], tf.compat.v1.VariableAggregation.MEAN) return next_creator(**kwargs) with tf.variable_creator_scope(creater_c): tf.compat.v1.get_variable( "v", [], synchronization=tf.VariableSynchronization.ON_WRITE, aggregation=tf.compat.v1.VariableAggregation.MEAN) self.assertTrue(called[0]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVariableCreatorNestingError(self): def creator(next_creator, **kwargs): return next_creator(**kwargs) # Save the state so we can clean up at the end. graph = tf.compat.v1.get_default_graph() old_creator_stack = graph._variable_creator_stack try: scope = tf.variable_creator_scope(creator) scope.__enter__() with tf.variable_creator_scope(creator): with self.assertRaises(RuntimeError): scope.__exit__(None, None, None) finally: graph._variable_creator_stack = old_creator_stack class VariableScopeMultithreadedTest(tf.test.TestCase): @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReenterMainScope(self): def thread_fn(graph, main_thread_scope): with graph.as_default(): # Variable created with main scope will have prefix "main". with tf.compat.v1.variable_scope(main_thread_scope): with tf.compat.v1.variable_scope("foo"): v = tf.compat.v1.get_variable("v", []) self.assertEqual("main/foo/v:0", v.name) # Variable created outside main scope will not have prefix "main". with tf.compat.v1.variable_scope("bar"): v = tf.compat.v1.get_variable("v", []) self.assertEqual("bar/v:0", v.name) graph = tf.compat.v1.get_default_graph() with tf.compat.v1.variable_scope("main") as main_thread_scope: thread = threading.Thread( target=thread_fn, args=(graph, main_thread_scope)) thread.start() thread.join() class CompatV1TemplateScaleByY(variable_scope_shim.VariableScopeLayer): def __init__(self, **kwargs): super().__init__(**kwargs) def my_op(x, scalar_name): var1 = tf.compat.v1.get_variable( scalar_name, shape=[], regularizer=regularizers.L2(), initializer=tf.compat.v1.constant_initializer(1.5)) return x * var1 self.scale_by_y = tf.compat.v1.make_template( "scale_by_y", my_op, scalar_name="y") def forward_pass(self, inputs): with tf.compat.v1.variable_scope("foo"): return self.scale_by_y(inputs) class VariableScopeModule(tf.Module): """Module that uses the shim.""" @variable_scope_shim.track_tf1_style_variables def __call__(self, *args, **kwargs): with self.name_scope: return self.forward_pass(*args, **kwargs) def get_compat_v1_regularization_losses(self): """Dict w/ regularization losses from `get_variable`&`compat.v1.layers`.""" return {name: regularizer() for name, regularizer in self._tf1_style_var_store._regularizers.items()} # pylint: disable=protected-access @combinations.generate(combinations.combine(mode=["eager"])) class TF1VariableScopeLayerTest(tf.test.TestCase, parameterized.TestCase): def test_get_variable(self): # Test the shim when using `get_variable` (and regularizers) directly class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs, training=None): out = inputs with tf.compat.v1.variable_scope("dense_one"): # The weights are created with a `regularizer`, # so the layer should track their regularization losses kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=tf.compat.v1.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=tf.compat.v1.zeros_initializer(), name="bias") out = tf.matmul(out, kernel) out = tf.nn.bias_add(out, bias) with tf.compat.v1.variable_scope("nested_scope"): with tf.compat.v1.variable_scope("dense_two"): kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=tf.compat.v1.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=tf.compat.v1.zeros_initializer(), name="bias") out = tf.matmul(out, kernel) out = tf.nn.bias_add(out, bias) return out layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct output, regularization losses, + variables were made self.assertEqual(weights.keys(), {"dense_one/bias:0", "dense_one/kernel:0", "nested_scope/dense_two/bias:0", "nested_scope/dense_two/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50) self.assertAllEqual(tf.add_n(layer.losses), 1.5) # Verify reuse by updating the variables then re-running weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2) weights["nested_scope/dense_two/kernel:0"].assign( tf.ones(shape=(10, 10)) * 2) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200) self.assertAllEqual(tf.add_n(layer.losses), 6) def test_compat_v1_layer(self): # Test the shim when using `compat.v1` layers class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs, training=None): out = core_layers.dense( inputs, self.units, name="dense_one", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") with tf.compat.v1.variable_scope("nested_scope"): out = core_layers.dense( out, self.units, name="dense_two", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") return out layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct output, losses, + variables were made self.assertEqual(weights.keys(), {"dense_one/bias:0", "dense_one/kernel:0", "nested_scope/dense_two/bias:0", "nested_scope/dense_two/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50) self.assertAllEqual(tf.add_n(layer.losses), 1.5) # Verify reuse by updating the variables then re-running weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2) weights["nested_scope/dense_two/kernel:0"].assign( tf.ones(shape=(10, 10)) * 2) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200) self.assertAllEqual(tf.add_n(layer.losses), 6) def test_shim_exporting(self): class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs, training=None): out = core_layers.dense( inputs, self.units, name="dense_one", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") with tf.compat.v1.variable_scope("nested_scope"): out = core_layers.dense( out, self.units, name="dense_two", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") return out layer = WrappedDenseLayer(10) layer(tf.ones(shape=(5, 5))) tmp_dir = self.get_temp_dir() tf.saved_model.save(layer, tmp_dir) def test_module_get_variable(self): # Test the module shim when using `get_variable` (and regularizers) directly class WrappedDenseLayer(VariableScopeModule): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs, training=None): out = inputs with tf.compat.v1.variable_scope("dense_one"): # The weights are created with a `regularizer`, # so the layer should track their regularization losses kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=tf.compat.v1.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=tf.compat.v1.zeros_initializer(), name="bias") out = tf.matmul(out, kernel) out = tf.nn.bias_add(out, bias) with tf.compat.v1.variable_scope("nested_scope"): with tf.compat.v1.variable_scope("dense_two"): kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(), initializer=tf.compat.v1.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=tf.compat.v1.zeros_initializer(), name="bias") out = tf.matmul(out, kernel) out = tf.nn.bias_add(out, bias) return out layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct output, regularization losses, + variables were made self.assertEqual(weights.keys(), {"dense_one/bias:0", "dense_one/kernel:0", "nested_scope/dense_two/bias:0", "nested_scope/dense_two/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50) self.assertAllEqual( tf.add_n(layer.get_compat_v1_regularization_losses().values()), 1.5) # Verify reuse by updating the variables then re-running weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2) weights["nested_scope/dense_two/kernel:0"].assign( tf.ones(shape=(10, 10)) * 2) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200) self.assertAllEqual( tf.add_n(layer.get_compat_v1_regularization_losses().values()), 6) def test_module_compat_v1_layer(self): # Test the module shim when using `compat.v1` layers class WrappedDenseLayer(VariableScopeModule): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs, training=None): out = core_layers.dense( inputs, self.units, name="dense_one", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") with tf.compat.v1.variable_scope("nested_scope"): out = core_layers.dense( out, self.units, name="dense_two", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") return out layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct output, losses, + variables were made self.assertEqual(weights.keys(), {"dense_one/bias:0", "dense_one/kernel:0", "nested_scope/dense_two/bias:0", "nested_scope/dense_two/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50) self.assertAllEqual(tf.add_n( layer.get_compat_v1_regularization_losses().values()), 1.5) # Verify reuse by updating the variables then re-running weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2) weights["nested_scope/dense_two/kernel:0"].assign( tf.ones(shape=(10, 10)) * 2) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200) self.assertAllEqual(tf.add_n( layer.get_compat_v1_regularization_losses().values()), 6) def test_shim_nesting(self): # Test that nesting the shim in itself works class NestedLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, name, *args, **kwargs): super().__init__(*args, name=name, **kwargs) self.units = units def forward_pass(self, inputs): out = inputs with tf.compat.v1.variable_scope(self.name): # The weights are created with a `regularizer`, # so the layer should track their regularization losses kernel = tf.compat.v1.get_variable( shape=[out.shape[-1], self.units], regularizer=regularizers.L2(1.0), initializer=tf.compat.v1.ones_initializer(), name="kernel") bias = tf.compat.v1.get_variable( shape=[self.units,], initializer=tf.compat.v1.initializers.zeros, name="bias") out = tf.linalg.matmul(out, kernel) out = tf.compat.v1.nn.bias_add(out, bias) return out class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, **kwargs): super().__init__(**kwargs) self.units = units self.dense_layer_a = None self.dense_layer_b = None def forward_pass(self, inputs): # Only create the nested tf.variable/module/layer/model if it has not # already been created! if not self.dense_layer_a: self.dense_layer_a = NestedLayer(self.units * 2, "dense_one") out = self.dense_layer_a(inputs) if not self.dense_layer_b: self.dense_layer_b = NestedLayer(self.units, "dense_two") out = self.dense_layer_b(out) return out layer = WrappedDenseLayer(5) out = layer(tf.ones(shape=(1, 3))) weights = {x.name: x for x in layer.variables} # Verify the correct output, losses, + variables were made # (Specifically: no double-counting of any weights or reg. losses # between nested components!) self.assertEqual({var.name for var in layer.trainable_weights}, {"dense_one/bias:0", "dense_one/kernel:0", "dense_two/bias:0", "dense_two/kernel:0"}) self.assertEqual({var.name for var in layer.dense_layer_a.weights}, {"dense_one/bias:0", "dense_one/kernel:0"}) self.assertEqual({var.name for var in layer.dense_layer_b.weights}, {"dense_two/bias:0", "dense_two/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 30) self.assertAllEqual(tf.add_n(layer.dense_layer_a.losses), 30) self.assertAllEqual(tf.add_n(layer.dense_layer_b.losses), 50) self.assertAllEqual(tf.add_n(layer.losses), 80) # Verify reuse by updating the variables then re-running weights["dense_one/kernel:0"].assign(tf.ones(shape=(3, 10)) * 2) weights["dense_two/kernel:0"].assign( tf.ones(shape=(10, 5)) * 2) out = layer(tf.ones(shape=(1, 3))) self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 120) self.assertAllEqual(tf.add_n(layer.losses), 320) def test_compat_v1_make_template_in_shim_eager(self): # Test the shim when using `compat.v1.make_template` # Verify it works correctly in eager layer = CompatV1TemplateScaleByY() for _ in range(3): # Use multiple calls to verify that no new weights get created self.assertAllEqual(layer(tf.ones(shape=(2, 3))), tf.constant(1.5, shape=(2, 3))) self.assertAllEqual({var.name: var for var in layer.weights}, {"foo/scale_by_y/y:0": 1.5}) self.assertAllEqual(tf.add_n(layer.losses), regularizers.L2()(layer.weights[0])) def test_compat_v1_make_template_in_shim_tf_function(self): # Test the shim when using `compat.v1.make_template` # Verify it works correctly in a tf.function # when made outside the function layer = CompatV1TemplateScaleByY() @tf.function def foo(x): return layer(x), tf.add_n(layer.losses) for _ in range(3): # Use multiple calls to verify that no new weights get created out, loss = foo(tf.ones(shape=(2, 3))) self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3))) self.assertAllEqual(loss, regularizers.L2()(layer.weights[0])) self.assertAllEqual({var.name: var for var in layer.weights}, {"foo/scale_by_y/y:0": 1.5}) def test_compat_v1_make_template_in_trace_in_shim(self): # Test the shim when using `compat.v1.make_template` # Verify it works correctly when the make_template/layer/shim # is created on the first tf.function trace! layers = {} @tf.function def bar(x): if "layer" not in layers: layers["layer"] = CompatV1TemplateScaleByY() layer = layers["layer"] return layer(x), tf.add_n(layer.losses) for _ in range(3): # Use multiple calls to verify that no new weights get created out, loss = bar(tf.ones(shape=(2, 3))) self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3))) self.assertAllEqual(loss, regularizers.L2()(layers["layer"].weights[0])) self.assertAllEqual({var.name: var for var in layers["layer"].weights}, {"foo/scale_by_y/y:0": 1.5}) def test_only_track_get_variable(self): # Test the shim does not try tracking or reusing variables # that were not created by get_variable. These variables/modules/layers # need to be tracked separately class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, **kwargs): super().__init__(**kwargs) self.units = units self._dense_model = None def forward_pass(self, inputs): dense_layer = core.Dense( self.units, name="dense", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") return dense_layer(inputs) layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5) self.assertEmpty(layer.weights) def test_embedded_keras_model(self): # Test the shim when embedding a Keras model inside of it # And assigning the model to an attribute class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, **kwargs): super().__init__(**kwargs) self.units = units self._dense_model = None def forward_pass(self, inputs): if not self._dense_model: inp = input_layer_module.Input(shape=inputs.shape) dense_layer = core.Dense( self.units, name="dense", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") self._dense_model = training_module.Model( inputs=inp, outputs=dense_layer(inp)) return self._dense_model(inputs) layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct output, losses, + variables were made self.assertEqual(weights.keys(), {"dense/bias:0", "dense/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5) self.assertAllEqual(tf.add_n(layer.losses), 0.5) # Verify reuse by updating the variables then re-running weights["dense/kernel:0"].assign( tf.ones(shape=(5, 10)) * 2) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10) self.assertAllEqual(tf.add_n(layer.losses), 2) def test_embedded_keras_model_in_module(self): # Test the module shim when embedding a Keras model inside of it # And assigning the model to an attribute class WrappedDenseLayer(VariableScopeModule): def __init__(self, units, **kwargs): super().__init__(**kwargs) self.units = units self._dense_model = None def forward_pass(self, inputs): if not self._dense_model: inp = input_layer_module.Input(shape=inputs.shape) dense_layer = core.Dense( self.units, name="dense", kernel_initializer=tf.compat.v1.ones_initializer(), kernel_regularizer="l2") self._dense_model = training_module.Model( inputs=inp, outputs=dense_layer(inp)) return self._dense_model(inputs) layer = WrappedDenseLayer(10) out = layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct output, losses, + variables were made self.assertEqual(weights.keys(), {"dense/bias:0", "dense/kernel:0"}) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5) # The module shim will only track regularization losses made by # compat.v1.layers and compat.v1.get_variable. Other regularization # losses must be tracked by separate user-created mechanisms. self.assertEmpty(layer.get_compat_v1_regularization_losses()) # Verify reuse by updating the variables then re-running weights["dense/kernel:0"].assign( tf.ones(shape=(5, 10)) * 2) out = layer(tf.ones(shape=(5, 5))) self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10) # The module shim will only track regularization losses made by # compat.v1.layers and compat.v1.get_variable. Other regularization # losses must be tracked by separate user-created mechanisms. self.assertEmpty(layer.get_compat_v1_regularization_losses()) def test_training_arg(self): # Test the shim when passing in a Keras `training` arg class TrainingCheckLayer(variable_scope_shim.VariableScopeLayer): def __init__(self, units, *args, **kwargs): super().__init__(*args, **kwargs) self.units = units def forward_pass(self, inputs, training=None): if training: out = core_layers.dense(inputs, self.units, name="dense_training") else: out = core_layers.dense(inputs, self.units, name="dense_no_training") return out layer = TrainingCheckLayer(10) layer(tf.ones(shape=(5, 5)), training=True) weights = {x.name: x for x in layer.variables} # Verify the correct variables were made self.assertEqual(weights.keys(), {"dense_training/bias:0", "dense_training/kernel:0"}) layer = TrainingCheckLayer(10) layer(tf.ones(shape=(5, 5))) weights = {x.name: x for x in layer.variables} # Verify the correct variables were made self.assertEqual(weights.keys(), {"dense_no_training/bias:0", "dense_no_training/kernel:0"}) def test_incorrect_decoration(self): # Raise an error if you incorrectly decorate a method # that is not a method of a Module, layer, or model: @variable_scope_shim.track_tf1_style_variables def foo(x): return x * 2 with self.assertRaisesRegex(ValueError, "does not extend"): foo(tf.ones(shape=(4, 4))) if __name__ == "__main__": tf.test.main()
54,685
39.239882
99
py