repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
keras
keras-master/keras/layers/preprocessing/benchmarks/category_crossing_benchmark.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for Keras categorical_encoding preprocessing layer.""" import tensorflow as tf import itertools import time import numpy as np import keras from keras.layers.preprocessing import category_crossing tf.compat.v1.enable_v2_behavior() # word_gen creates random sequences of ASCII letters (both lowercase and upper). # The number of unique strings is ~2,700. def int_gen(): for _ in itertools.count(1): yield (np.random.randint(0, 5, (1,)), np.random.randint(0, 7, (1,))) class BenchmarkLayer(tf.test.Benchmark): """Benchmark the layer forward pass.""" def run_dataset_implementation(self, batch_size): num_repeats = 5 starts = [] ends = [] for _ in range(num_repeats): ds = tf.data.Dataset.from_generator( int_gen, (tf.int64, tf.int64), (tf.TensorShape([1]), tf.TensorShape([1]))) ds = ds.shuffle(batch_size * 100) ds = ds.batch(batch_size) num_batches = 5 ds = ds.take(num_batches) ds = ds.prefetch(num_batches) starts.append(time.time()) # Benchmarked code begins here. for i in ds: _ = tf.sparse.cross([i[0], i[1]]) # Benchmarked code ends here. ends.append(time.time()) avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches return avg_time def bm_layer_implementation(self, batch_size): input_1 = keras.Input(shape=(1,), dtype=tf.int64, name="word") input_2 = keras.Input(shape=(1,), dtype=tf.int64, name="int") layer = category_crossing.CategoryCrossing() _ = layer([input_1, input_2]) num_repeats = 5 starts = [] ends = [] for _ in range(num_repeats): ds = tf.data.Dataset.from_generator( int_gen, (tf.int64, tf.int64), (tf.TensorShape([1]), tf.TensorShape([1]))) ds = ds.shuffle(batch_size * 100) ds = ds.batch(batch_size) num_batches = 5 ds = ds.take(num_batches) ds = ds.prefetch(num_batches) starts.append(time.time()) # Benchmarked code begins here. for i in ds: _ = layer([i[0], i[1]]) # Benchmarked code ends here. ends.append(time.time()) avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches name = "category_crossing|batch_%s" % batch_size baseline = self.run_dataset_implementation(batch_size) extras = { "dataset implementation baseline": baseline, "delta seconds": (baseline - avg_time), "delta percent": ((baseline - avg_time) / baseline) * 100 } self.report_benchmark( iters=num_repeats, wall_time=avg_time, extras=extras, name=name) def benchmark_vocab_size_by_batch(self): for batch in [32, 64, 256]: self.bm_layer_implementation(batch_size=batch) if __name__ == "__main__": tf.test.main()
3,482
31.858491
80
py
keras
keras-master/keras/layers/preprocessing/benchmarks/category_cross_hash_dense_benchmark.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for KPL implementation of categorical cross hash columns with dense inputs.""" import tensorflow as tf import keras from tensorflow.python.eager.def_function import function as tf_function from keras.layers.preprocessing import category_crossing from keras.layers.preprocessing import hashing from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm # This is required as of 3/2021 because otherwise we drop into graph mode. tf.compat.v1.enable_v2_behavior() NUM_REPEATS = 10 BATCH_SIZES = [32, 256] def embedding_varlen(batch_size, max_length): """Benchmark a variable-length embedding.""" # Data and constants. num_buckets = 10000 vocab = fc_bm.create_vocabulary(32768) data_a = fc_bm.create_string_data( max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.0) data_b = fc_bm.create_string_data( max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.0) # Keras implementation input_1 = keras.Input(shape=(None,), name="data_a", dtype=tf.string) input_2 = keras.Input(shape=(None,), name="data_b", dtype=tf.string) crossed_data = category_crossing.CategoryCrossing()([input_1, input_2]) hashed_data = hashing.Hashing(num_buckets)(crossed_data) model = keras.Model([input_1, input_2], hashed_data) # FC implementation fc = tf.feature_column.crossed_column(["data_a", "data_b"], num_buckets) # Wrap the FC implementation in a tf.function for a fair comparison @tf_function() def fc_fn(tensors): fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None) # Benchmark runs keras_data = { "data_a": data_a.to_tensor(default_value="", shape=(batch_size, max_length)), "data_b": data_b.to_tensor(default_value="", shape=(batch_size, max_length)), } k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS) fc_data = { "data_a": data_a.to_tensor(default_value="", shape=(batch_size, max_length)), "data_b": data_b.to_tensor(default_value="", shape=(batch_size, max_length)), } fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS) return k_avg_time, fc_avg_time class BenchmarkLayer(fc_bm.LayerBenchmark): """Benchmark the layer forward pass.""" def benchmark_layer(self): for batch in BATCH_SIZES: name = "cross_hash|dense|batch_%s" % batch k_time, f_time = embedding_varlen(batch_size=batch, max_length=256) self.report(name, k_time, f_time, NUM_REPEATS) if __name__ == "__main__": tf.test.main()
3,266
35.3
98
py
keras
keras-master/keras/layers/preprocessing/benchmarks/bucketized_column_dense_benchmark.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for KPL implementation of bucketized columns with dense inputs.""" import tensorflow as tf import numpy as np import keras from tensorflow.python.eager.def_function import function as tf_function from keras.layers.preprocessing import discretization from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm # This is required as of 3/2021 because otherwise we drop into graph mode. tf.compat.v1.enable_v2_behavior() NUM_REPEATS = 10 # The number of times to run each benchmark. BATCH_SIZES = [32, 256] ### KPL AND FC IMPLEMENTATION BENCHMARKS ### def embedding_varlen(batch_size, max_length): """Benchmark a variable-length embedding.""" # Data and constants. max_value = 25.0 bins = np.arange(1.0, max_value) data = fc_bm.create_data( max_length, batch_size * NUM_REPEATS, 100000, dtype=float) # Keras implementation model = keras.Sequential() model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.float32)) model.add(discretization.Discretization(bins)) # FC implementation fc = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("data"), boundaries=list(bins)) # Wrap the FC implementation in a tf.function for a fair comparison @tf_function() def fc_fn(tensors): fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None) # Benchmark runs keras_data = {"data": data.to_tensor(default_value=0.0)} k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS) fc_data = {"data": data.to_tensor(default_value=0.0)} fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS) return k_avg_time, fc_avg_time class BenchmarkLayer(fc_bm.LayerBenchmark): """Benchmark the layer forward pass.""" def benchmark_layer(self): for batch in BATCH_SIZES: name = "bucketized|dense|batch_%s" % batch k_time, f_time = embedding_varlen(batch_size=batch, max_length=256) self.report(name, k_time, f_time, NUM_REPEATS) if __name__ == "__main__": tf.test.main()
2,761
34.410256
98
py
keras
keras-master/keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_dense_benchmark.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for KPL implementation of vocabulary columns + indicator from lists with dense inputs.""" import tensorflow as tf import keras from tensorflow.python.eager.def_function import function as tf_function from keras.layers.preprocessing import category_encoding from keras.layers.preprocessing import string_lookup from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm # This is required as of 3/2021 because otherwise we drop into graph mode. tf.compat.v1.enable_v2_behavior() NUM_REPEATS = 10 BATCH_SIZES = [32, 256] def embedding_varlen(batch_size, max_length): """Benchmark a variable-length embedding.""" # Data and constants. vocab_size = 32768 vocab = fc_bm.create_vocabulary(vocab_size) data = fc_bm.create_string_data( max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15) # Keras implementation model = keras.Sequential() model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string)) model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None)) model.add( category_encoding.CategoryEncoding( num_tokens=vocab_size + 1, output_mode="count")) # FC implementation fc = tf.feature_column.indicator_column( tf.feature_column.categorical_column_with_vocabulary_list( key="data", vocabulary_list=vocab, num_oov_buckets=1)) # Wrap the FC implementation in a tf.function for a fair comparison @tf_function() def fc_fn(tensors): fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None) # Benchmark runs keras_data = { "data": data.to_tensor(default_value="", shape=(batch_size, max_length)) } k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS) fc_data = { "data": data.to_tensor(default_value="", shape=(batch_size, max_length)) } fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS) return k_avg_time, fc_avg_time class BenchmarkLayer(fc_bm.LayerBenchmark): """Benchmark the layer forward pass.""" def benchmark_layer(self): for batch in BATCH_SIZES: name = "vocab_list_indicator|dense|batch_%s" % batch k_time, f_time = embedding_varlen(batch_size=batch, max_length=256) self.report(name, k_time, f_time, NUM_REPEATS) if __name__ == "__main__": tf.test.main()
3,038
35.178571
102
py
keras
keras-master/keras/premade/linear.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in linear model classes.""" import tensorflow.compat.v2 as tf from keras import activations from keras import initializers from keras import regularizers from keras.engine import base_layer from keras.engine import input_spec from keras.engine import training from keras.layers import core from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import from tensorflow.python.util.tf_export import keras_export @keras_export( 'keras.experimental.LinearModel', v1=['keras.experimental.LinearModel', 'keras.models.LinearModel']) @deprecation.deprecated_endpoints('keras.experimental.LinearModel') class LinearModel(training.Model): r"""Linear Model for regression and classification problems. This model approximates the following function: $$y = \beta + \sum_{i=1}^{N} w_{i} * x_{i}$$ where $$\beta$$ is the bias and $$w_{i}$$ is the weight for each feature. Example: ```python model = LinearModel() model.compile(optimizer='sgd', loss='mse') model.fit(x, y, epochs=epochs) ``` This model accepts sparse float inputs as well: Example: ```python model = LinearModel() opt = tf.keras.optimizers.Adam() loss_fn = tf.keras.losses.MeanSquaredError() with tf.GradientTape() as tape: output = model(sparse_input) loss = tf.reduce_mean(loss_fn(target, output)) grads = tape.gradient(loss, model.weights) opt.apply_gradients(zip(grads, model.weights)) ``` """ def __init__(self, units=1, activation=None, use_bias=True, kernel_initializer='zeros', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, **kwargs): """Create a Linear Model. Args: units: Positive integer, output dimension without the batch size. activation: Activation function to use. If you don't specify anything, no activation is applied. use_bias: whether to calculate the bias/intercept for this model. If set to False, no bias/intercept will be used in calculations, e.g., the data is already centered. kernel_initializer: Initializer for the `kernel` weights matrices. bias_initializer: Initializer for the bias vector. kernel_regularizer: regularizer for kernel vectors. bias_regularizer: regularizer for bias vector. **kwargs: The keyword arguments that are passed on to BaseLayer.__init__. """ self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) super(LinearModel, self).__init__(**kwargs) base_layer.keras_premade_model_gauge.get_cell('Linear').set(True) def build(self, input_shape): if isinstance(input_shape, dict): names = sorted(list(input_shape.keys())) self.input_specs = [] self.dense_layers = [] for name in names: shape = input_shape[name] layer = core.Dense( units=self.units, use_bias=False, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, name=name) layer.build(shape) self.input_specs.append( input_spec.InputSpec(shape=shape, name=name)) self.dense_layers.append(layer) elif isinstance(input_shape, (tuple, list)) and all( isinstance(shape, tf.TensorShape) for shape in input_shape): self.dense_layers = [] for shape in input_shape: layer = core.Dense( units=self.units, use_bias=False, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer) layer.build(shape) self.dense_layers.append(layer) else: # input_shape can be a single TensorShape or a tuple of ints. layer = core.Dense( units=self.units, use_bias=False, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer) layer.build(input_shape) self.dense_layers = [layer] if self.use_bias: self.bias = self.add_weight( 'bias', shape=self.units, initializer=self.bias_initializer, regularizer=self.bias_regularizer, dtype=self.dtype, trainable=True) else: self.bias = None self.built = True def call(self, inputs): result = None if isinstance(inputs, dict): names = [layer.name for layer in self.dense_layers] different_keys = set(names) - set(inputs.keys()) if different_keys: raise ValueError( 'The input dictionary does not match ' 'the structure expected by the model.' '\n\tExpected keys: {}' '\n\tReceived keys: {}' '\n\tMissing keys: {}'.format(set(names), set(inputs.keys()), different_keys)) inputs = [inputs[name] for name in names] for inp, layer in zip(inputs, self.dense_layers): output = layer(inp) if result is None: result = output else: result += output elif isinstance(inputs, (tuple, list)): for inp, layer in zip(inputs, self.dense_layers): output = layer(inp) if result is None: result = output else: result += output else: result = self.dense_layers[0](inputs) if self.use_bias: result = tf.nn.bias_add(result, self.bias) if self.activation is not None: return self.activation(result) # pylint: disable=not-callable return result def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), } base_config = base_layer.Layer.get_config(self) return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): del custom_objects return cls(**config)
7,324
35.262376
92
py
keras
keras-master/keras/premade/wide_deep_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras Premade WideNDeep models.""" import tensorflow.compat.v2 as tf import numpy as np from keras import keras_parameterized from keras import testing_utils from keras.engine import input_layer from keras.engine import sequential from keras.engine import training from keras.feature_column import dense_features_v2 from keras.layers import core from keras.optimizer_v2 import gradient_descent from keras.premade import linear from keras.premade import wide_deep @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class WideDeepModelTest(keras_parameterized.TestCase): def test_wide_deep_model(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2)) dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_backprop(self): with self.cached_session(): linear_model = linear.LinearModel(units=1, kernel_initializer='zeros') dnn_model = sequential.Sequential( [core.Dense(units=1, kernel_initializer='zeros')]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.array([[1.]]) dnn_inp = np.array([[1.]]) inputs = [linear_inp, dnn_inp] output = linear_inp + 2 * dnn_inp linear_opt = gradient_descent.SGD(learning_rate=.1) dnn_opt = gradient_descent.SGD(learning_rate=.3) wide_deep_model.compile( optimizer=[linear_opt, dnn_opt], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) self.evaluate(tf.compat.v1.global_variables_initializer()) wide_deep_model.fit(inputs, output, epochs=1) self.assertAllClose( [[0.6]], self.evaluate(wide_deep_model.linear_model.dense_layers[0].kernel)) self.assertAllClose([[1.8]], self.evaluate( wide_deep_model.dnn_model.layers[0].kernel)) def test_wide_deep_model_with_single_input(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inputs = np.random.uniform(low=-5., high=5., size=(64, 3)) output = .3 * inputs[:, 0] wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) wide_deep_model.fit(inputs, output, epochs=5) def test_wide_deep_model_with_multi_outputs(self): inp = input_layer.Input(shape=(1,), name='linear') l = linear.LinearModel(units=2, use_bias=False)(inp) l1, l2 = tf.split(l, num_or_size_splits=2, axis=1) linear_model = training.Model(inp, [l1, l2]) linear_model.set_weights([np.asarray([[0.5, 0.3]])]) h = core.Dense(units=2, use_bias=False)(inp) h1, h2 = tf.split(h, num_or_size_splits=2, axis=1) dnn_model = training.Model(inp, [h1, h2]) dnn_model.set_weights([np.asarray([[0.1, -0.5]])]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inp_np = np.asarray([[1.]]) out1, out2 = wide_deep_model(inp_np) # output should be (0.5 + 0.1), and (0.3 - 0.5) self.assertAllClose([[0.6]], out1) self.assertAllClose([[-0.2]], out2) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation='relu') out1, out2 = wide_deep_model(inp_np) # output should be relu((0.5 + 0.1)), and relu((0.3 - 0.5)) self.assertAllClose([[0.6]], out1) self.assertAllClose([[0.]], out2) def test_wide_deep_model_with_single_optimizer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2)) dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_as_layer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1)]) linear_input = input_layer.Input(shape=(3,), name='linear') dnn_input = input_layer.Input(shape=(5,), name='dnn') wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) wide_deep_output = wide_deep_model((linear_input, dnn_input)) input_b = input_layer.Input(shape=(1,), name='b') output_b = core.Dense(units=1)(input_b) model = training.Model( inputs=[linear_input, dnn_input, input_b], outputs=[wide_deep_output + output_b]) linear_input_np = np.random.uniform(low=-5., high=5., size=(64, 3)) dnn_input_np = np.random.uniform(low=-5., high=5., size=(64, 5)) input_b_np = np.random.uniform(low=-5., high=5., size=(64,)) output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5) def test_wide_deep_model_with_sub_model_trained(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear.LinearModel(units=1), sequential.Sequential([core.Dense(units=1, input_dim=3)])) linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2)) dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] linear_model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) dnn_model.compile( optimizer='adam', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) linear_model.fit(linear_inp, output, epochs=50) dnn_model.fit(dnn_inp, output, epochs=50) wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly()) wide_deep_model.fit(inputs, output, epochs=50) # This test is an example for cases where linear and dnn model accepts # same raw input and same transformed inputs, i.e., the raw input is # categorical, and both linear and dnn model accept one hot encoding. def test_wide_deep_model_with_single_feature_column(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = tf.feature_column.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') dnn_model = sequential.Sequential([core.Dense(units=1)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) combined = sequential.Sequential([dense_feature_layer, wide_deep_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly()) combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) # This test is an example for cases where linear and dnn model accepts # same raw input but different transformed inputs, i.e,. the raw input is # categorical, and linear model accepts one hot encoding, while dnn model # accepts embedding encoding. def test_wide_deep_model_with_two_feature_columns(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = tf.feature_column.indicator_column(cat_column) emb_column = tf.feature_column.embedding_column(cat_column, dimension=5) linear_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') combined_linear = sequential.Sequential( [linear_feature_layer, linear_model]) dnn_model = sequential.Sequential([core.Dense(units=1)]) dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column]) combined_dnn = sequential.Sequential([dnn_feature_layer, dnn_model]) wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn) opt = gradient_descent.SGD(learning_rate=0.1) wide_deep_model.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly()) wide_deep_model.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) def test_config(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config) self.assertEqual(linear_model.units, cloned_wide_deep_model.linear_model.units) self.assertEqual(dnn_model.layers[0].units, cloned_wide_deep_model.dnn_model.layers[0].units) def test_config_with_custom_objects(self): def my_activation(x): return x linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation=my_activation) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config( config, custom_objects={'my_activation': my_activation}) self.assertEqual(cloned_wide_deep_model.activation, my_activation) if __name__ == '__main__': tf.test.main()
12,101
43.656827
80
py
keras
keras-master/keras/premade/wide_deep.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in WideNDeep model classes.""" import tensorflow.compat.v2 as tf from keras import activations from keras import backend from keras import layers as layer_module from keras.engine import base_layer from keras.engine import data_adapter from keras.engine import training as keras_training from keras.utils import generic_utils from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import from tensorflow.python.util.tf_export import keras_export @keras_export( 'keras.experimental.WideDeepModel', v1=['keras.experimental.WideDeepModel', 'keras.models.WideDeepModel']) @deprecation.deprecated_endpoints('keras.experimental.WideDeepModel') class WideDeepModel(keras_training.Model): r"""Wide & Deep Model for regression and classification problems. This model jointly train a linear and a dnn model. Example: ```python linear_model = LinearModel() dnn_model = keras.Sequential([keras.layers.Dense(units=64), keras.layers.Dense(units=1)]) combined_model = WideDeepModel(linear_model, dnn_model) combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse']) # define dnn_inputs and linear_inputs as separate numpy arrays or # a single numpy array if dnn_inputs is same as linear_inputs. combined_model.fit([linear_inputs, dnn_inputs], y, epochs) # or define a single `tf.data.Dataset` that contains a single tensor or # separate tensors for dnn_inputs and linear_inputs. dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y)) combined_model.fit(dataset, epochs) ``` Both linear and dnn model can be pre-compiled and trained separately before jointly training: Example: ```python linear_model = LinearModel() linear_model.compile('adagrad', 'mse') linear_model.fit(linear_inputs, y, epochs) dnn_model = keras.Sequential([keras.layers.Dense(units=1)]) dnn_model.compile('rmsprop', 'mse') dnn_model.fit(dnn_inputs, y, epochs) combined_model = WideDeepModel(linear_model, dnn_model) combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse']) combined_model.fit([linear_inputs, dnn_inputs], y, epochs) ``` """ def __init__(self, linear_model, dnn_model, activation=None, **kwargs): """Create a Wide & Deep Model. Args: linear_model: a premade LinearModel, its output must match the output of the dnn model. dnn_model: a `tf.keras.Model`, its output must match the output of the linear model. activation: Activation function. Set it to None to maintain a linear activation. **kwargs: The keyword arguments that are passed on to BaseLayer.__init__. Allowed keyword arguments include `name`. """ super(WideDeepModel, self).__init__(**kwargs) base_layer.keras_premade_model_gauge.get_cell('WideDeep').set(True) self.linear_model = linear_model self.dnn_model = dnn_model self.activation = activations.get(activation) def call(self, inputs, training=None): if not isinstance(inputs, (tuple, list)) or len(inputs) != 2: linear_inputs = dnn_inputs = inputs else: linear_inputs, dnn_inputs = inputs linear_output = self.linear_model(linear_inputs) # pylint: disable=protected-access if self.dnn_model._expects_training_arg: if training is None: training = backend.learning_phase() dnn_output = self.dnn_model(dnn_inputs, training=training) else: dnn_output = self.dnn_model(dnn_inputs) output = tf.nest.map_structure( lambda x, y: (x + y), linear_output, dnn_output) if self.activation: return tf.nest.map_structure(self.activation, output) return output # This does not support gradient scaling and LossScaleOptimizer. def train_step(self, data): x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) with tf.GradientTape() as tape: y_pred = self(x, training=True) loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) self.compiled_metrics.update_state(y, y_pred, sample_weight) if isinstance(self.optimizer, (list, tuple)): linear_vars = self.linear_model.trainable_variables dnn_vars = self.dnn_model.trainable_variables linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars)) linear_optimizer = self.optimizer[0] dnn_optimizer = self.optimizer[1] linear_optimizer.apply_gradients(zip(linear_grads, linear_vars)) dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars)) else: trainable_variables = self.trainable_variables grads = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(grads, trainable_variables)) return {m.name: m.result() for m in self.metrics} def _make_train_function(self): # Only needed for graph mode and model_to_estimator. has_recompiled = self._recompile_weights_loss_and_weighted_metrics() self._check_trainable_weights_consistency() # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() self._set_trainable_state(self._compiled_trainable_state) inputs = ( self._feed_inputs + self._feed_targets + self._feed_sample_weights) if not isinstance(backend.symbolic_learning_phase(), int): inputs += [backend.symbolic_learning_phase()] if isinstance(self.optimizer, (list, tuple)): linear_optimizer = self.optimizer[0] dnn_optimizer = self.optimizer[1] else: linear_optimizer = self.optimizer dnn_optimizer = self.optimizer with backend.get_graph().as_default(): with backend.name_scope('training'): # Training updates updates = [] linear_updates = linear_optimizer.get_updates( params=self.linear_model.trainable_weights, # pylint: disable=protected-access loss=self.total_loss) updates += linear_updates dnn_updates = dnn_optimizer.get_updates( params=self.dnn_model.trainable_weights, # pylint: disable=protected-access loss=self.total_loss) updates += dnn_updates # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access ] with backend.name_scope('training'): # Gets loss and metrics. Updates weights at each call. fn = backend.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) setattr(self, 'train_function', fn) # Restore the current trainable state self._set_trainable_state(current_trainable_state) def get_config(self): linear_config = generic_utils.serialize_keras_object(self.linear_model) dnn_config = generic_utils.serialize_keras_object(self.dnn_model) config = { 'linear_model': linear_config, 'dnn_model': dnn_config, 'activation': activations.serialize(self.activation), } base_config = base_layer.Layer.get_config(self) return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): linear_config = config.pop('linear_model') linear_model = layer_module.deserialize(linear_config, custom_objects) dnn_config = config.pop('dnn_model') dnn_model = layer_module.deserialize(dnn_config, custom_objects) activation = activations.deserialize( config.pop('activation', None), custom_objects=custom_objects) return cls( linear_model=linear_model, dnn_model=dnn_model, activation=activation, **config)
9,008
40.325688
109
py
keras
keras-master/keras/premade/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Premade Model API.""" from keras.premade import linear from keras.premade import wide_deep
784
40.315789
80
py
keras
keras-master/keras/premade/linear_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras Premade Linear models.""" import tensorflow.compat.v2 as tf import numpy as np from keras import backend from keras import keras_parameterized from keras import losses from keras.engine import input_layer from keras.engine import sequential from keras.engine import training from keras.feature_column import dense_features_v2 from keras.layers import core from keras.optimizer_v2 import gradient_descent from keras.premade import linear @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class LinearModelTest(keras_parameterized.TestCase): def test_linear_model_with_single_input(self): model = linear.LinearModel() inp = np.random.uniform(low=-5., high=5., size=(64, 2)) output = .3 * inp[:, 0] + .2 * inp[:, 1] model.compile('sgd', 'mse', []) model.fit(inp, output, epochs=5) self.assertTrue(model.built) def test_linear_model_with_list_input(self): model = linear.LinearModel() input_a = np.random.uniform(low=-5., high=5., size=(64, 1)) input_b = np.random.uniform(low=-5., high=5., size=(64, 1)) output = .3 * input_a + .2 * input_b model.compile('sgd', 'mse', []) model.fit([input_a, input_b], output, epochs=5) def test_linear_model_with_mismatched_dict_inputs(self): model = linear.LinearModel() input_a = np.random.uniform(low=-5., high=5., size=(64, 1)) input_b = np.random.uniform(low=-5., high=5., size=(64, 1)) output = .3 * input_a + .2 * input_b model.compile('sgd', 'mse', []) model.build({'a': tf.TensorShape([None, 1]), 'b': tf.TensorShape([None, 1])}) with self.assertRaisesRegex(ValueError, 'Missing keys'): model.fit({'c': input_a, 'b': input_b}, output, epochs=5) def test_linear_model_with_dict_input(self): model = linear.LinearModel() input_a = np.random.uniform(low=-5., high=5., size=(64, 1)) input_b = np.random.uniform(low=-5., high=5., size=(64, 1)) output = .3 * input_a + .2 * input_b model.compile('sgd', 'mse', []) model.fit({'a': input_a, 'b': input_b}, output, epochs=5) def test_linear_model_as_layer(self): input_a = input_layer.Input(shape=(1,), name='a') output_a = linear.LinearModel()(input_a) input_b = input_layer.Input(shape=(1,), name='b') output_b = core.Dense(units=1)(input_b) output = output_a + output_b model = training.Model(inputs=[input_a, input_b], outputs=[output]) input_a_np = np.random.uniform(low=-5., high=5., size=(64, 1)) input_b_np = np.random.uniform(low=-5., high=5., size=(64, 1)) output_np = .3 * input_a_np + .2 * input_b_np model.compile('sgd', 'mse', []) model.fit([input_a_np, input_b_np], output_np, epochs=5) def test_linear_model_with_sparse_input(self): indices = tf.constant([[0, 0], [0, 2], [1, 0], [1, 1]], dtype=tf.int64) values = tf.constant([.4, .6, .8, .5]) shape = tf.constant([2, 3], dtype=tf.int64) model = linear.LinearModel() inp = tf.SparseTensor(indices, values, shape) output = model(inp) self.evaluate(tf.compat.v1.global_variables_initializer()) if tf.executing_eagerly(): weights = model.get_weights() weights[0] = np.ones((3, 1)) model.set_weights(weights) output = model(inp) self.assertAllClose([[1.], [1.3]], self.evaluate(output)) def test_linear_model_with_sparse_input_and_custom_training(self): batch_size = 64 indices = [] values = [] target = np.zeros((batch_size, 1)) for i in range(64): rand_int = np.random.randint(3) if rand_int == 0: indices.append((i, 0)) val = np.random.uniform(low=-5., high=5.) values.append(val) target[i] = 0.3 * val elif rand_int == 1: indices.append((i, 1)) val = np.random.uniform(low=-5., high=5.) values.append(val) target[i] = 0.2 * val else: indices.append((i, 0)) indices.append((i, 1)) val_1 = np.random.uniform(low=-5., high=5.) val_2 = np.random.uniform(low=-5., high=5.) values.append(val_1) values.append(val_2) target[i] = 0.3 * val_1 + 0.2 * val_2 indices = np.asarray(indices) values = np.asarray(values) shape = tf.constant([batch_size, 2], dtype=tf.int64) inp = tf.SparseTensor(indices, values, shape) model = linear.LinearModel(use_bias=False) opt = gradient_descent.SGD() for _ in range(20): with tf.GradientTape() as t: output = model(inp) loss = backend.mean(losses.mean_squared_error(target, output)) grads = t.gradient(loss, model.trainable_variables) grads_and_vars = zip(grads, model.trainable_variables) opt.apply_gradients(grads_and_vars) # This test is an example for a regression on categorical inputs, i.e., # the output is 0.4, 0.6, 0.9 when input is 'alpha', 'beta', 'gamma' # separately. def test_linear_model_with_feature_column(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = tf.feature_column.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') combined = sequential.Sequential([dense_feature_layer, linear_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile(opt, 'mse', []) combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) self.assertAllClose([[0.4], [0.6], [0.9]], combined.layers[1].dense_layers[0].kernel.numpy(), atol=0.01) def test_config(self): linear_model = linear.LinearModel(units=3, use_bias=True) config = linear_model.get_config() cloned_linear_model = linear.LinearModel.from_config(config) self.assertEqual(linear_model.units, cloned_linear_model.units) if __name__ == '__main__': tf.test.main()
7,061
39.586207
80
py
keras
keras-master/keras/integration_test/legacy_rnn_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import tensorflow.compat.v1 as tf tf.disable_eager_execution() class KerasNetworkTFRNNs(tf.keras.Model): def __init__(self, name=None): super(KerasNetworkTFRNNs, self).__init__(name=name) self._cell = tf.nn.rnn_cell.MultiRNNCell( [tf.nn.rnn_cell.LSTMCell(1) for _ in range(2)]) def call(self, inputs): return self._cell(inputs, self._cell.get_initial_state(inputs)) class KerasNetworkKerasRNNs(tf.keras.Model): def __init__(self, name=None): super(KerasNetworkKerasRNNs, self).__init__(name=name) self._cell = tf.keras.layers.StackedRNNCells( [tf.keras.layers.LSTMCell(1) for _ in range(2)]) def call(self, inputs): return self._cell(inputs, self._cell.get_initial_state(inputs)) class LegacyRNNTest(tf.test.TestCase): def setUp(self): super(LegacyRNNTest, self).setUp() self._seed = 23489 np.random.seed(self._seed) def testRNNWithKerasSimpleRNNCell(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = tf.keras.utils.to_categorical(y_train) cell = tf.keras.layers.SimpleRNNCell(output_shape) inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) predict = tf.placeholder( tf.float32, shape=(None, output_shape)) outputs, state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape]) self.assertEqual(state.shape.as_list(), [None, output_shape]) loss = tf.losses.softmax_cross_entropy(predict, state) train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) sess.run([tf.global_variables_initializer()]) _, outputs, state = sess.run( [train_op, outputs, state], {inputs: x_train, predict: y_train}) self.assertEqual(len(outputs), batch) self.assertEqual(len(state), batch) def testRNNWithKerasGRUCell(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = tf.keras.utils.to_categorical(y_train) cell = tf.keras.layers.GRUCell(output_shape) inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) predict = tf.placeholder( tf.float32, shape=(None, output_shape)) outputs, state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape]) self.assertEqual(state.shape.as_list(), [None, output_shape]) loss = tf.losses.softmax_cross_entropy(predict, state) train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) sess.run([tf.global_variables_initializer()]) _, outputs, state = sess.run( [train_op, outputs, state], {inputs: x_train, predict: y_train}) self.assertEqual(len(outputs), batch) self.assertEqual(len(state), batch) def testRNNWithKerasLSTMCell(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = tf.keras.utils.to_categorical(y_train) cell = tf.keras.layers.LSTMCell(output_shape) inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) predict = tf.placeholder( tf.float32, shape=(None, output_shape)) outputs, state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape]) self.assertEqual(len(state), 2) self.assertEqual(state[0].shape.as_list(), [None, output_shape]) self.assertEqual(state[1].shape.as_list(), [None, output_shape]) loss = tf.losses.softmax_cross_entropy(predict, state[0]) train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) sess.run([tf.global_variables_initializer()]) _, outputs, state = sess.run( [train_op, outputs, state], {inputs: x_train, predict: y_train}) self.assertEqual(len(outputs), batch) self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), batch) self.assertEqual(len(state[1]), batch) def testRNNWithStackKerasCell(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = tf.keras.utils.to_categorical(y_train) cell = tf.keras.layers.StackedRNNCells( [tf.keras.layers.LSTMCell(2 * output_shape), tf.keras.layers.LSTMCell(output_shape)]) inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) predict = tf.placeholder( tf.float32, shape=(None, output_shape)) outputs, state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape]) self.assertEqual(len(state), 2) state = tf.nest.flatten(state) self.assertEqual(len(state), 4) self.assertEqual(state[0].shape.as_list(), [None, 2 * output_shape]) self.assertEqual(state[1].shape.as_list(), [None, 2 * output_shape]) self.assertEqual(state[2].shape.as_list(), [None, output_shape]) self.assertEqual(state[3].shape.as_list(), [None, output_shape]) loss = tf.losses.softmax_cross_entropy(predict, state[2]) train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) sess.run([tf.global_variables_initializer()]) _, outputs, state = sess.run( [train_op, outputs, state], {inputs: x_train, predict: y_train}) self.assertEqual(len(outputs), batch) self.assertEqual(len(state), 4) for s in state: self.assertEqual(len(s), batch) def testStaticRNNWithKerasSimpleRNNCell(self): with self.cached_session() as sess: input_shape = 10 output_shape = 5 timestep = 4 batch = 100 (x_train, y_train), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) x_train = np.transpose(x_train, (1, 0, 2)) y_train = tf.keras.utils.to_categorical(y_train) cell = tf.keras.layers.SimpleRNNCell(output_shape) inputs = [tf.placeholder( tf.float32, shape=(None, input_shape))] * timestep predict = tf.placeholder( tf.float32, shape=(None, output_shape)) outputs, state = tf.nn.static_rnn( cell, inputs, dtype=tf.float32) self.assertEqual(len(outputs), timestep) self.assertEqual(outputs[0].shape.as_list(), [None, output_shape]) self.assertEqual(state.shape.as_list(), [None, output_shape]) loss = tf.losses.softmax_cross_entropy(predict, state) train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss) sess.run([tf.global_variables_initializer()]) feed_dict = {i: d for i, d in zip(inputs, x_train)} feed_dict[predict] = y_train _, outputs, state = sess.run( [train_op, outputs, state], feed_dict) self.assertEqual(len(outputs), timestep) self.assertEqual(len(outputs[0]), batch) self.assertEqual(len(state), batch) def testKerasAndTFRNNLayerOutputComparison(self): input_shape = 10 output_shape = 5 timestep = 4 batch = 20 (x_train, _), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) fix_weights_generator = tf.keras.layers.SimpleRNNCell(output_shape) fix_weights_generator.build((None, input_shape)) weights = fix_weights_generator.get_weights() with self.session(graph=tf.Graph()) as sess: inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) cell = tf.keras.layers.SimpleRNNCell(output_shape) tf_out, tf_state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) cell.set_weights(weights) [tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train}) with self.session(graph=tf.Graph()) as sess: k_input = tf.keras.Input(shape=(timestep, input_shape), dtype=tf.float32) cell = tf.keras.layers.SimpleRNNCell(output_shape) layer = tf.keras.layers.RNN( cell, return_sequences=True, return_state=True) keras_out = layer(k_input) cell.set_weights(weights) k_out, k_state = sess.run(keras_out, {k_input: x_train}) self.assertAllClose(tf_out, k_out) self.assertAllClose(tf_state, k_state) def testSimpleRNNCellAndBasicRNNCellComparison(self): input_shape = 10 output_shape = 5 timestep = 4 batch = 20 (x_train, _), _ = get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) fix_weights_generator = tf.keras.layers.SimpleRNNCell(output_shape) fix_weights_generator.build((None, input_shape)) # The SimpleRNNCell contains 3 weights: kernel, recurrent_kernel, and bias # The BasicRNNCell contains 2 weight: kernel and bias, where kernel is # zipped [kernel, recurrent_kernel] in SimpleRNNCell. keras_weights = fix_weights_generator.get_weights() kernel, recurrent_kernel, bias = keras_weights tf_weights = [np.concatenate((kernel, recurrent_kernel)), bias] with self.session(graph=tf.Graph()) as sess: inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) cell = tf.keras.layers.SimpleRNNCell(output_shape) k_out, k_state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) cell.set_weights(keras_weights) [k_out, k_state] = sess.run([k_out, k_state], {inputs: x_train}) with self.session(graph=tf.Graph()) as sess: inputs = tf.placeholder( tf.float32, shape=(None, timestep, input_shape)) cell = tf.nn.rnn_cell.BasicRNNCell(output_shape) tf_out, tf_state = tf.nn.dynamic_rnn( cell, inputs, dtype=tf.float32) cell.set_weights(tf_weights) [tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train}) self.assertAllClose(tf_out, k_out, atol=1e-5) self.assertAllClose(tf_state, k_state, atol=1e-5) def testRNNCellSerialization(self): for cell in [ tf.nn.rnn_cell.LSTMCell(32, use_peepholes=True, cell_clip=True), tf.nn.rnn_cell.BasicLSTMCell(32, dtype=tf.float32), tf.nn.rnn_cell.BasicRNNCell(32, activation="relu", dtype=tf.float32), tf.nn.rnn_cell.GRUCell(32, dtype=tf.float32) ]: with self.cached_session(): x = tf.keras.Input((None, 5)) layer = tf.keras.layers.RNN(cell) y = layer(x) model = tf.keras.models.Model(x, y) model.compile(optimizer="rmsprop", loss="mse") # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() # The custom_objects is important here since rnn_cell_impl is # not visible as a Keras layer, and also has a name conflict with # keras.LSTMCell and GRUCell. layer = tf.keras.layers.RNN.from_config( config, custom_objects={ "BasicRNNCell": tf.nn.rnn_cell.BasicRNNCell, "GRUCell": tf.nn.rnn_cell.GRUCell, "LSTMCell": tf.nn.rnn_cell.LSTMCell, "BasicLSTMCell": tf.nn.rnn_cell.BasicLSTMCell }) y = layer(x) model = tf.keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) def testRNNCellActsLikeKerasRNNCellInProperScope(self): with tf.layers.experimental.keras_style_scope(): kn1 = KerasNetworkTFRNNs(name="kn1") kn2 = KerasNetworkKerasRNNs(name="kn2") z = tf.zeros((2, 3)) kn1(z) # pylint:disable=not-callable kn2(z) # pylint:disable=not-callable # pylint: disable=protected-access self.assertTrue(all("kn1" in v.name for v in kn1._cell.variables)) self.assertTrue(all("kn2" in v.name for v in kn2._cell.variables)) with tf.layers.experimental.keras_style_scope(): kn1_new = KerasNetworkTFRNNs(name="kn1_new") kn2_new = KerasNetworkKerasRNNs(name="kn2_new") kn2_new(z) # pylint:disable=not-callable # Most importantly, this doesn't fail due to variable scope reuse issues. kn1_new(z) # pylint:disable=not-callable self.assertTrue(all("kn1_new" in v.name for v in kn1_new._cell.variables)) self.assertTrue(all("kn2_new" in v.name for v in kn2_new._cell.variables)) def get_test_data(train_samples, test_samples, input_shape, num_classes): num_sample = train_samples + test_samples templates = 2 * num_classes * np.random.random((num_classes,) + input_shape) y = np.random.randint(0, num_classes, size=(num_sample,)) x = np.zeros((num_sample,) + input_shape, dtype=np.float32) for i in range(num_sample): x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape) return ((x[:train_samples], y[:train_samples]), (x[train_samples:], y[train_samples:])) if __name__ == "__main__": tf.test.main()
14,919
37.65285
80
py
keras
keras-master/keras/integration_test/module_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf class ModuleTest(tf.test.TestCase): def test_module_discover_layer_variable(self): m = tf.Module() m.a = tf.keras.layers.Dense(1) m.b = tf.keras.layers.Dense(2) # The weights of the layer has not been created yet. self.assertEmpty(m.variables) self.assertLen(m.submodules, 2) inputs = tf.keras.layers.Input((1,)) m.a(inputs) m.b(inputs) variable_list = m.variables self.assertLen(variable_list, 4) self.assertIs(variable_list[0], m.a.kernel) self.assertIs(variable_list[1], m.a.bias) self.assertIs(variable_list[2], m.b.kernel) self.assertIs(variable_list[3], m.b.bias) def test_model_discover_submodule(self): m = tf.keras.models.Sequential( layers=[tf.keras.layers.Dense(1), tf.keras.layers.Dense(2)]) self.assertEqual(m.submodules, (m.layers[0], m.layers[1])) m(tf.keras.layers.Input((1,))) self.assertLen(m.variables, 4) def test_model_wrapped_in_module_discovers_submodules(self): linear = tf.keras.models.Sequential( [tf.keras.layers.Dense(units=1, input_shape=[1])]) linear.compile(optimizer="sgd", loss="mean_squared_error") m = tf.Module() m.l = linear self.assertNotEmpty(m.submodules) self.assertLen(m.variables, 2) if __name__ == "__main__": tf.test.main()
2,018
32.098361
80
py
keras
keras-master/keras/integration_test/preprocessing_applied_in_model_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Demonstrate Keras preprocessing layers applied inside a Model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.integration_test import preprocessing_test_utils as utils ds_combinations = tf.__internal__.distribute.combinations multi_process_runner = tf.__internal__.distribute.multi_process_runner test_combinations = tf.__internal__.test.combinations # Note: Strategy combinations are not (yet) public APIs, so they are subject # to API changes and backward-compatibility is not guaranteed. STRATEGIES = [ ds_combinations.default_strategy, ds_combinations.mirrored_strategy_with_cpu_1_and_2, ds_combinations.mirrored_strategy_with_two_gpus, # TODO(b/183044870) TPU strategies with soft placement do not yet work. # ds_combinations.tpu_strategy, # ds_combinations.cloud_tpu_strategy, ds_combinations.parameter_server_strategy_3worker_2ps_cpu, ds_combinations.parameter_server_strategy_3worker_2ps_1gpu, ds_combinations.multi_worker_mirrored_2x1_cpu, ds_combinations.multi_worker_mirrored_2x2_gpu, ds_combinations.central_storage_strategy_with_two_gpus, ] @ds_combinations.generate( test_combinations.combine(strategy=STRATEGIES, mode="eager")) class PreprocessingAppliedInModelTest(tf.test.TestCase): """Demonstrate Keras preprocessing layers applied inside a Model.""" def testDistributedModelFit(self, strategy): with strategy.scope(): preprocessing_model = utils.make_preprocessing_model(self.get_temp_dir()) training_model = utils.make_training_model() # Merge the two separate models into a single model for training. inputs = preprocessing_model.inputs outputs = training_model(preprocessing_model(inputs)) merged_model = tf.keras.Model(inputs, outputs) merged_model.compile(optimizer="sgd", loss="binary_crossentropy") def dataset_fn(input_context): dataset = utils.make_dataset() dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) batch_size = input_context.get_per_replica_batch_size( global_batch_size=utils.BATCH_SIZE) return dataset.batch(batch_size).repeat().prefetch(2) dataset_creator = tf.keras.utils.experimental.DatasetCreator(dataset_fn) merged_model.fit(dataset_creator, epochs=2, steps_per_epoch=utils.STEPS) if __name__ == "__main__": multi_process_runner.test_main()
3,208
42.958904
80
py
keras
keras-master/keras/integration_test/distributed_training_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test to demonstrate basic Keras training with a variety of strategies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf ds_combinations = tf.__internal__.distribute.combinations # Note: Strategy combinations are not (yet) public APIs, so they are subject # to API changes and backward-compatibility is not guaranteed. # TODO(b/188763034): Proceed to export the strategy combinations as public APIs. STRATEGIES = [ ds_combinations.default_strategy, ds_combinations.mirrored_strategy_with_cpu_1_and_2, ds_combinations.mirrored_strategy_with_two_gpus, ds_combinations.tpu_strategy, ds_combinations.cloud_tpu_strategy, ds_combinations.parameter_server_strategy_3worker_2ps_cpu, ds_combinations.parameter_server_strategy_3worker_2ps_1gpu, ds_combinations.multi_worker_mirrored_2x1_cpu, ds_combinations.multi_worker_mirrored_2x2_gpu, ds_combinations.central_storage_strategy_with_two_gpus, ] @ds_combinations.generate( tf.__internal__.test.combinations.combine( strategy=STRATEGIES, mode="eager")) class DistributedTrainingTest(tf.test.TestCase): """Test to demonstrate basic Keras training with a variety of strategies.""" def testKerasTrainingAPI(self, strategy): # A `dataset_fn` is required for `Model.fit` to work across all strategies. def dataset_fn(input_context): batch_size = input_context.get_per_replica_batch_size( global_batch_size=64) x = tf.random.uniform((10, 10)) y = tf.random.uniform((10,)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).shuffle(10).repeat() dataset = dataset.shard( input_context.num_input_pipelines, input_context.input_pipeline_id) return dataset.batch(batch_size).prefetch(2) with strategy.scope(): model = tf.keras.Sequential([tf.keras.layers.Dense(10)]) optimizer = tf.keras.optimizers.SGD() model.compile(optimizer, loss="mse", steps_per_execution=10) x = tf.keras.utils.experimental.DatasetCreator(dataset_fn) model.fit(x, epochs=2, steps_per_epoch=10) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
2,927
40.239437
80
py
keras
keras-master/keras/integration_test/preprocessing_applied_in_dataset_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Demonstrate Keras preprocessing layers applied in tf.data.Dataset.map.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.integration_test import preprocessing_test_utils as utils ds_combinations = tf.__internal__.distribute.combinations multi_process_runner = tf.__internal__.distribute.multi_process_runner test_combinations = tf.__internal__.test.combinations # Note: Strategy combinations are not (yet) public APIs, so they are subject # to API changes and backward-compatibility is not guaranteed. Note that we # skip parameter server strategy here, as parameter server strategy requires # a DatasetCreator when training on a tf.data.Dataset. STRATEGIES = [ ds_combinations.default_strategy, ds_combinations.mirrored_strategy_with_cpu_1_and_2, ds_combinations.mirrored_strategy_with_two_gpus, ds_combinations.tpu_strategy, ds_combinations.cloud_tpu_strategy, ds_combinations.multi_worker_mirrored_2x1_cpu, ds_combinations.multi_worker_mirrored_2x2_gpu, ds_combinations.central_storage_strategy_with_two_gpus, ] @ds_combinations.generate( test_combinations.combine(strategy=STRATEGIES, mode="eager")) class PreprocessingAppliedInDatasetTest(tf.test.TestCase): """Demonstrate Keras preprocessing layers applied in tf.data.Dataset.map.""" def testDistributedModelFit(self, strategy): with strategy.scope(): preprocessing_model = utils.make_preprocessing_model(self.get_temp_dir()) training_model = utils.make_training_model() training_model.compile(optimizer="sgd", loss="binary_crossentropy") dataset = utils.make_dataset() dataset = dataset.batch(utils.BATCH_SIZE) dataset = dataset.map(lambda x, y: (preprocessing_model(x), y)) training_model.fit(dataset, epochs=2) if __name__ == "__main__": multi_process_runner.test_main()
2,608
41.080645
80
py
keras
keras-master/keras/integration_test/forwardprop_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import functools from absl.testing import parameterized import numpy as np import tensorflow as tf def _jvp(f, primals, tangents): """Compute the jacobian of `f` at `primals` multiplied by `tangents`.""" with tf.autodiff.ForwardAccumulator(primals, tangents) as acc: primals_out = f(*primals) return primals_out, acc.jvp( primals_out, unconnected_gradients=tf.UnconnectedGradients.ZERO) def _jacfwd(f, primals): """Compute the jacobian of `f` at `primals` using forward-mode autodiff.""" jac_flat = [] flat_primals = tf.nest.flatten(primals) tangent_mask = [tf.zeros_like(primal) for primal in flat_primals] for primal_index, primal in enumerate(flat_primals): primal_vector = tf.reshape(primal, [-1]) primal_vector_length = tf.size(primal_vector) jac_columns = [] for element_index in tf.range(primal_vector_length): mask = tf.one_hot(element_index, primal_vector_length) tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal)) jac_columns.append( tf.nest.map_structure( functools.partial(tf.reshape, shape=[-1]), _jvp(f, primals, tf.nest.pack_sequence_as(primals, tangent_mask))[1])) jac_flat.append(tf.stack(jac_columns, axis=1)) tangent_mask[primal_index] = tf.zeros_like(primal) return tf.nest.pack_sequence_as(primals, jac_flat) def _grad(f, argnums=0): """Return a function which computes the gradient of `f`.""" def _f(*params): with tf.GradientTape() as tape: tape.watch(params) primals_out = f(*params) return tape.gradient( primals_out, params[argnums], unconnected_gradients=tf.UnconnectedGradients.ZERO) return _f def _hvp(f, primals, tangents): """Compute a forward-over-back Hessian-vector product.""" with tf.autodiff.ForwardAccumulator(primals, tangents) as acc: with tf.GradientTape() as tape: tape.watch(primals) f_out = f(*primals) f_out.shape.assert_is_compatible_with([]) return acc.jvp(tape.gradient(f_out, primals)) def _vectorize_parameters(f, params, use_pfor, dtype): """Loop over `params`, providing a one-hot mask to `f` for each.""" parameter_sizes = [tf.size(param) for param in params] total_size = tf.math.add_n(parameter_sizes) def _wrapper(index): full_onehot = tf.one_hot(index, total_size) split_onehot = tf.split(full_onehot, parameter_sizes) tangents = [ tf.reshape(v, tf.shape(param)) for param, v in zip(params, split_onehot) ] return f(tangents) if use_pfor: return tf.vectorized_map(_wrapper, tf.range(total_size)) else: return tf.map_fn(_wrapper, tf.range(total_size), dtype) def _forward_over_back_hessian(f, params, use_pfor, dtype=None): """Computes the full Hessian matrix for the scalar-valued f(*params). Args: f: A function taking `params` and returning a scalar. params: A possibly nested structure of tensors. use_pfor: If true, uses `tf.vectorized_map` calls instead of looping. dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes (e.g. `tf.float32`) matching the structure of `f`'s returns. Returns: A possibly nested structure of matrix slices corresponding to `params`. Each slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`) in the corresponding element of `params` and `P` is the total number of parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating along the second axis. """ return _vectorize_parameters( functools.partial(_hvp, f, params), params, use_pfor=use_pfor, dtype=dtype) def _test_gradients(testcase, f, primals, order, delta=1e-3, rtol=1e-2, atol=1e-6): """Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients.""" if order < 1: raise ValueError( "`order` should be a positive integer, got '{}'.".format(order)) if order > 1: _test_gradients( testcase=testcase, f=_grad(f), primals=primals, order=order - 1, delta=delta, rtol=rtol, atol=atol) sym_jac_back, num_jac = tf.test.compute_gradient(f, primals, delta=delta) testcase.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol) sym_jac_fwd = _jacfwd(f, primals) testcase.assertAllClose(num_jac, sym_jac_fwd, rtol=rtol, atol=atol) # And the symbolic computations should be much closer. testcase.assertAllClose(sym_jac_back, sym_jac_fwd) class ForwardpropTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters([ ("Dense", [[0.1]], functools.partial(tf.keras.layers.Dense, 5)), ("Conv2D", np.reshape( np.arange(start=-1., stop=1., step=2. / (1 * 2 * 4 * 4)), [1, 2, 4, 4]), functools.partial(tf.keras.layers.Conv2D, 2, 2), 1e-3) ]) def testKerasLayers(self, value, op_fn, atol=1e-6): layer = op_fn() input_value = tf.constant(value, dtype=tf.float32) layer.build(input_value.shape) # Make sure the test is deterministic by avoiding random variable # initialization. for v in layer.trainable_variables: v.assign( tf.reshape( tf.range( -1., 1., 2. / tf.size(v, out_type=tf.float32), dtype=tf.float32), v.shape)) _test_gradients( self, layer, [input_value], atol=atol, # These are linear, so second-order is pretty boring. order=2) @parameterized.named_parameters([ ("NonFused", [[0.1], [0.2], [-0.3]], functools.partial(tf.keras.layers.BatchNormalization, fused=False)), ("Fused", [[[[0.1, 2.]]], [[[0.2, -3.]]], [[[-0.3, 4.]]]], functools.partial(tf.keras.layers.BatchNormalization, fused=True)) ]) def testBatchNorm(self, value, op_fn): for training in [True, False]: layer = op_fn() input_value = tf.constant(value, dtype=tf.float32) layer.build(input_value.shape) _test_gradients( self, functools.partial(layer, training=training), [input_value], order=2, atol=1e-3) @parameterized.named_parameters([ ("NonFused", [[0.1], [0.2], [-0.3]], functools.partial(tf.keras.layers.BatchNormalization, fused=False)), ("Fused", [[[[0.1, 2.]]], [[[0.2, -3.]]], [[[-0.3, 4.]]]], functools.partial(tf.keras.layers.BatchNormalization, fused=True)) ]) def testBatchNormLayerParamGrads(self, value, op_fn): for training in [True, False]: layer = op_fn() with tf.GradientTape() as tape: input_value = tf.constant(value, dtype=tf.float32) tape.watch(input_value) output = layer(input_value, training=training) jac_back = tape.jacobian(output, [input_value] + layer.trainable_variables) jac_forward = _jacfwd( lambda *args: layer(args[0], training=training), # pylint:disable=cell-var-from-loop [input_value] + layer.trainable_variables) for backward, forward in zip(jac_back, jac_forward): forward = tf.reshape(forward, tf.shape(backward)) self.assertAllClose(backward, forward) @parameterized.named_parameters([("Function", tf.function), ("NoFunction", lambda f: f)]) def testVariablesHVP(self, decorator): class _Model(tf.Module): def __init__(self): self._first_dense = tf.keras.layers.Dense(18) self._conv = tf.keras.layers.Conv2D(2, 2) self._norm = tf.keras.layers.BatchNormalization() self._second_dense = tf.keras.layers.Dense(1) def __call__(self, x): x = self._first_dense(x) x = tf.nn.relu(x) x = self._norm(x) x = tf.nn.relu(self._conv(tf.reshape(x, [-1, 2, 3, 3]))) return self._second_dense(x) model = _Model() def _loss(): input_value = tf.constant([[-0.5, 1.], [0.5, -1.]]) target = tf.constant([[-1.], [2.]]) return tf.math.reduce_sum((model(input_value) - target)**2.) @decorator def _compute_hvps(): with tf.GradientTape() as tape: loss = _loss() vector = tape.gradient(loss, model.trainable_variables) variable_input_fn = lambda unused_variables: _loss() forward_over_back_hvp, = _hvp(variable_input_fn, [model.trainable_variables], [vector]) with tf.GradientTape(persistent=True) as tape: tape.watch(model.trainable_variables) loss = _loss() first_grads = tape.gradient(loss, model.trainable_variables) back_over_back_hvp = tape.gradient( first_grads, model.trainable_variables, output_gradients=vector) return forward_over_back_hvp, back_over_back_hvp self.assertAllClose(*_compute_hvps(), rtol=1e-5, atol=1e-5) def testEmbeddingLayerInFunction(self): class M(tf.keras.Model): def __init__(self): super(M, self).__init__() self.embed = tf.keras.layers.Embedding(5, 1) self.proj = tf.keras.layers.Dense(1) @tf.function def call(self, x): return self.proj(self.embed(x)) model = M() model(tf.zeros([3, 3], dtype=tf.int32)) # pylint: disable=not-callable parameters = model.embed.variables tangents = [tf.ones_like(v) for v in parameters] with tf.autodiff.ForwardAccumulator(parameters, tangents): # Note that forwardprop runs alongside the original computation. This test # is just checking that it doesn't crash; correctness is tested in core # TF. model(tf.zeros([3, 3], dtype=tf.int32)) # pylint: disable=not-callable class HessianTests(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters([("PFor", True), ("MapFn", False)]) def testHessianOfVariables(self, use_pfor): model = tf.keras.layers.Dense(1) model.build([None, 2]) def _loss(*unused_args): input_value = tf.constant([[-0.5, 1.], [0.5, -1.]]) target = tf.constant([[-1.], [2.]]) return tf.math.reduce_sum((model(input_value) - target)**2.) kernel_hess, bias_hess = _forward_over_back_hessian( _loss, [model.kernel, model.bias], use_pfor=use_pfor, dtype=[tf.float32, tf.float32]) # 3 total parameters, the whole hessian is the 3x3 concatenation self.assertEqual([3, 2, 1], kernel_hess.shape) self.assertEqual([3, 1], bias_hess.shape) full_hessian = tf.concat([tf.reshape(kernel_hess, [3, 2]), bias_hess], axis=1) # The full Hessian should be symmetric. self.assertAllClose(full_hessian, tf.transpose(full_hessian)) if __name__ == "__main__": tf.test.main()
11,600
35.828571
95
py
keras
keras-master/keras/integration_test/function_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import sys import tensorflow as tf class MiniModel(tf.keras.Model): """Minimal model for mnist. Useful for testing and debugging on slow TPU simulators. """ def __init__(self): super(MiniModel, self).__init__(name='') self.fc = tf.keras.layers.Dense(1, name='fc', kernel_initializer='ones', bias_initializer='ones') def call(self, inputs, training=True): return self.fc(inputs) class DefunnedMiniModel(MiniModel): @tf.function def call(self, inputs, training=True): return super(DefunnedMiniModel, self).call(inputs, training=training) class ModelWithOptimizer(tf.keras.Model): def __init__(self): super(ModelWithOptimizer, self).__init__() self.dense = tf.keras.layers.Dense(1) self.optimizer = tf.keras.optimizers.Adam(0.01) @tf.function( input_signature=(tf.TensorSpec([None, 2], tf.float32), tf.TensorSpec([None], tf.float32))) def call(self, x, y): with tf.GradientTape() as tape: loss = tf.math.reduce_mean((self.dense(x) - y) ** 2.) trainable_variables = self.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {'loss': loss} class FunctionTest(tf.test.TestCase): def testFunctionRelaxationLosesInnerDimWithKerasLayer(self): layer = tf.keras.layers.Dense(1) fn = tf.function(experimental_relax_shapes=True)(layer) with self.captureWritesToStream(sys.stderr) as printed: fn(tf.ones((3, 2))) self.assertNotIn('ValueError', printed.contents()) with self.captureWritesToStream(sys.stderr) as printed: # Use batch size 2 to trigger a second cache miss on the shape. fn(tf.ones((2, 2))) self.assertNotIn('ValueError', printed.contents()) # Shape relaxation passes TensorShape([None, None]), which causes layer # matmul to fail, due to incompatible dims. What would have been a graph # build time error (layer would complain about the inner dim being 4). with self.captureWritesToStream(sys.stderr) as printed: with self.assertRaisesRegex(tf.errors.InvalidArgumentError, r'Matrix size-incompatible'): fn(tf.ones((3, 4))) def testDefunKerasModelCall(self): model = MiniModel() model.call = tf.function(model.call) x = tf.ones([1, 2]) y = model(x) # pylint:disable=not-callable self.assertAllEqual([[3.0]], self.evaluate(y)) # Break the reference cycle between the MiniModel and the defun: # `MiniModel` --(through its `call` method)--> `Function` # `Function` --(instancemethod on `MiniModel`)--> `MiniModel` del model.call def testDecoratedMethod(self): m = DefunnedMiniModel() instance_call_one = m.call(tf.ones([1, 2]), training=True) instance_call_two = m.call( inputs=tf.ones([1, 2]), training=True) class_call = DefunnedMiniModel.call(m, tf.ones([1, 2]), training=True) self.assertAllEqual(instance_call_one, instance_call_two) self.assertAllEqual(instance_call_one, class_call) def testDecoratedMethodUniqueFunctionPerInstance(self): m = DefunnedMiniModel() n = DefunnedMiniModel() class_method_one = DefunnedMiniModel.call class_method_two = DefunnedMiniModel.call m_method_one = m.call m_method_two = m.call n_method_one = n.call n_method_two = n.call self.assertEqual(class_method_one, class_method_two) self.assertEqual(m_method_one, m_method_two) self.assertEqual(n_method_one, n_method_two) self.assertNotEqual(m.call, n.call) def testDecoratedMethodGetConcreteFunction(self): m = DefunnedMiniModel() instance_call_one = m.call.get_concrete_function( tf.ones([1, 2]), training=False) instance_call_two = m.call.get_concrete_function( inputs=tf.ones([1, 2]), training=False) self.assertAllEqual(instance_call_one(tf.ones([1, 2])), instance_call_two(tf.ones([1, 2]))) # Also make sure get_concrete_function works on the class method DefunnedMiniModel.call.get_concrete_function( m, tf.ones([1, 2]), training=False) DefunnedMiniModel.call.get_concrete_function( m, inputs=tf.ones([1, 2]), training=True) def testDecoratedMethodVariableCleanup(self): m = DefunnedMiniModel() m(tf.ones([1, 2])) # pylint:disable=not-callable variable_refs = list({v.ref() for v in m.variables}) self.assertLen(variable_refs, 2) del m # Verifying if the variables are only referenced from variable_refs. # We expect the reference counter to be 1, but `sys.getrefcount` reports # one higher reference counter because a temporary is created when we call # sys.getrefcount(). Hence check if the number returned is 2. # https://docs.python.org/3/library/sys.html#sys.getrefcount self.assertEqual(sys.getrefcount(variable_refs[0].deref()), 2) self.assertEqual(sys.getrefcount(variable_refs[1].deref()), 2) def testStandardTrainingLoopInFunction(self): layer = tf.keras.layers.Dense(2) dataset = ( tf.data.Dataset.from_tensors((tf.ones([784]), tf.ones([], tf.int32))) .map(lambda x, y: (x, y)) .repeat(10) .batch(32)) optimizer = tf.keras.optimizers.Adam() @tf.function def train(): for x, y in dataset: with tf.GradientTape() as tape: out = layer(x) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=out, labels=y)) layer_variables = layer.trainable_variables gradients = tape.gradient(loss, layer_variables) optimizer.apply_gradients(zip(gradients, layer_variables)) train() def testEarlyStoppingTrainingLoopInFunction(self): layer = tf.keras.layers.Dense(2) dataset = ( tf.data.Dataset.from_tensors((tf.ones([784]), tf.ones([], tf.int32))) .map(lambda x, y: (x, y)) .repeat(10) .batch(32)) optimizer = tf.keras.optimizers.Adam() @tf.function def train(): for x, y in dataset: with tf.GradientTape() as tape: out = layer(x) loss = tf.math.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=out, labels=y)) layer_variables = layer.trainable_variables gradients = tape.gradient(loss, layer_variables) optimizer.apply_gradients(zip(gradients, layer_variables)) if optimizer.iterations > 3: break train() def test_optimizer(self): x = tf.constant([[3., 4.]]) y = tf.constant([2.]) model = ModelWithOptimizer() model(x, y) # pylint:disable=not-callable class AutomaticControlDependenciesTest(tf.test.TestCase): def testVariableInitializersCanBeLifted(self): # The initializer is a stateful op, but using it inside a function should # *not* create additional dependencies. That's what we're testing. layer = tf.keras.layers.Dense(1, kernel_initializer='glorot_uniform') @tf.function def fn(x): # Stateful operation tf.debugging.Assert(x, ['Error']) # Variable initialization should be lifted. Prior to the change that # added this test, the lifting would crash because of an auto control dep # added on `x`. Note, the error did not happen if we # manually created a tf.Variable outside of function and used it # here. Alternatively, creating a tf.Variable inside fn() causes # a different sort of error that is out of scope for this test. return layer(tf.convert_to_tensor([[1.0, 1.0]])) true = tf.convert_to_tensor(True) concrete = fn.get_concrete_function( tf.TensorSpec(shape=(), dtype=tf.bool)) self.evaluate(concrete(true)) self.evaluate(fn(True)) if __name__ == '__main__': tf.test.main()
8,605
35.008368
80
py
keras
keras-master/keras/integration_test/parameter_server_custom_training_loop_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test to demonstrate custom training loop with ParameterServerStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import multiprocessing from absl import logging import portpicker import tensorflow as tf NUM_EPOCHS = 10 NUM_STEPS = 100 STEPS_PER_EXECUTION = 10 class ParameterServerCustomTrainingLoopTest(tf.test.TestCase): """Test to demonstrate custom training loop with ParameterServerStrategy.""" def create_in_process_cluster(self, num_workers, num_ps): """Creates and starts local servers and returns the cluster_resolver.""" worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = {} cluster_dict["worker"] = ["localhost:%s" % port for port in worker_ports] if num_ps > 0: cluster_dict["ps"] = ["localhost:%s" % port for port in ps_ports] cluster_spec = tf.train.ClusterSpec(cluster_dict) # Workers need some inter_ops threads to work properly. worker_config = tf.compat.v1.ConfigProto() if multiprocessing.cpu_count() < num_workers + 1: worker_config.inter_op_parallelism_threads = num_workers + 1 for i in range(num_workers): tf.distribute.Server( cluster_spec, job_name="worker", task_index=i, config=worker_config, protocol="grpc") for i in range(num_ps): tf.distribute.Server( cluster_spec, job_name="ps", task_index=i, protocol="grpc") return cluster_spec def setUp(self): super(ParameterServerCustomTrainingLoopTest, self).setUp() cluster_spec = self.create_in_process_cluster(num_workers=3, num_ps=2) cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver( cluster_spec, rpc_layer="grpc") self.strategy = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver) self.coordinator = ( tf.distribute.experimental.coordinator.ClusterCoordinator( self.strategy)) def testCustomTrainingLoop(self): coordinator, strategy = self.coordinator, self.strategy def per_worker_dataset_fn(): def dataset_fn(_): return tf.data.Dataset.from_tensor_slices((tf.random.uniform( (6, 10)), tf.random.uniform((6, 10)))).batch(2).repeat() return strategy.distribute_datasets_from_function(dataset_fn) per_worker_dataset = coordinator.create_per_worker_dataset( per_worker_dataset_fn) with strategy.scope(): model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) train_accuracy = tf.keras.metrics.CategoricalAccuracy( name="train_accuracy") @tf.function def worker_train_fn(iterator): def replica_fn(inputs): """Training loop function.""" batch_data, labels = inputs with tf.GradientTape() as tape: predictions = model(batch_data, training=True) loss = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE)(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_accuracy.update_state(labels, predictions) for _ in tf.range(STEPS_PER_EXECUTION): strategy.run(replica_fn, args=(next(iterator),)) for epoch in range(NUM_EPOCHS): distributed_iterator = iter(per_worker_dataset) for step in range(0, NUM_STEPS, STEPS_PER_EXECUTION): coordinator.schedule(worker_train_fn, args=(distributed_iterator,)) logging.info("Epoch %d, step %d scheduled.", epoch, step) logging.info("Now joining at epoch %d.", epoch) coordinator.join() logging.info( "Finished joining at epoch %d. Training accuracy: %f. " "Total iterations: %d", epoch, train_accuracy.result(), optimizer.iterations.value()) if epoch < NUM_EPOCHS - 1: train_accuracy.reset_states() if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
4,920
35.723881
80
py
keras
keras-master/keras/integration_test/preprocessing_applied_in_dataset_creator_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Demonstrate Keras preprocessing layers applied in tf.data.Dataset.map.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from keras.integration_test import preprocessing_test_utils as utils ds_combinations = tf.__internal__.distribute.combinations multi_process_runner = tf.__internal__.distribute.multi_process_runner test_combinations = tf.__internal__.test.combinations # Note: Strategy combinations are not (yet) public APIs, so they are subject # to API changes and backward-compatibility is not guaranteed. STRATEGIES = [ ds_combinations.default_strategy, ds_combinations.mirrored_strategy_with_cpu_1_and_2, ds_combinations.mirrored_strategy_with_two_gpus, ds_combinations.tpu_strategy, ds_combinations.cloud_tpu_strategy, ds_combinations.parameter_server_strategy_3worker_2ps_cpu, ds_combinations.parameter_server_strategy_3worker_2ps_1gpu, ds_combinations.multi_worker_mirrored_2x1_cpu, ds_combinations.multi_worker_mirrored_2x2_gpu, ds_combinations.central_storage_strategy_with_two_gpus, ] @ds_combinations.generate( test_combinations.combine(strategy=STRATEGIES, mode="eager")) class PreprocessingAppliedInDatasetCreatorTest(tf.test.TestCase): """Demonstrate Keras preprocessing layers applied in tf.data.Dataset.map.""" def testDistributedModelFit(self, strategy): with strategy.scope(): preprocessing_model = utils.make_preprocessing_model(self.get_temp_dir()) training_model = utils.make_training_model() training_model.compile(optimizer="sgd", loss="binary_crossentropy") def dataset_fn(input_context): dataset = utils.make_dataset() dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) batch_size = input_context.get_per_replica_batch_size( global_batch_size=utils.BATCH_SIZE) dataset = dataset.batch(batch_size).repeat().prefetch(2) return dataset.map(lambda x, y: (preprocessing_model(x), y)) dataset_creator = tf.keras.utils.experimental.DatasetCreator(dataset_fn) training_model.fit(dataset_creator, epochs=2, steps_per_epoch=utils.STEPS) if __name__ == "__main__": multi_process_runner.test_main()
3,000
42.492754
80
py
keras
keras-master/keras/integration_test/gradient_checkpoint_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import gc import tensorflow as tf from tensorflow.python.platform import test as test_lib layers = tf.keras.layers optimizers = tf.keras.optimizers def _get_big_cnn_model(img_dim, n_channels, num_partitions, blocks_per_partition): """Creates a test model whose activations are significantly larger than model size.""" model = tf.keras.Sequential() model.add(layers.Input(shape=(img_dim, img_dim, n_channels))) for _ in range(num_partitions): for _ in range(blocks_per_partition): model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu)) model.add(layers.MaxPooling2D((1, 1), padding='same')) model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu)) model.add(layers.MaxPooling2D((1, 1), padding='same')) model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu)) model.add(layers.MaxPooling2D((1, 1), padding='same')) model.add(layers.Flatten()) model.add(layers.Dense(32, activation=tf.nn.relu)) model.add(layers.Dense(10)) return model def _get_split_cnn_model(img_dim, n_channels, num_partitions, blocks_per_partition): """Creates a test model that is split into `num_partitions` smaller models.""" models = [tf.keras.Sequential() for _ in range(num_partitions)] models[0].add(layers.Input(shape=(img_dim, img_dim, n_channels))) for i in range(num_partitions): model = models[i] if i > 0: last_shape = models[i - 1].layers[-1].output_shape model.add(layers.Input(shape=last_shape[1:])) for _ in range(blocks_per_partition): model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu)) model.add(layers.MaxPooling2D((1, 1), padding='same')) model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu)) model.add(layers.MaxPooling2D((1, 1), padding='same')) model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu)) model.add(layers.MaxPooling2D((1, 1), padding='same')) models[-1].add(layers.Flatten()) models[-1].add(layers.Dense(32, activation=tf.nn.relu)) models[-1].add(layers.Dense(10)) return models def _compute_loss(logits, labels): return tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) def _limit_gpu_memory(): """Helper function to limit GPU memory for testing.""" gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: tf.config.experimental.set_virtual_device_configuration( gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)]) return True return False def _get_dummy_data(img_dim, n_channels, batch_size): inputs = tf.ones([batch_size, img_dim, img_dim, n_channels]) labels = tf.ones([batch_size], dtype=tf.int64) return inputs, labels def _train_no_recompute(n_steps): """Trains a single large model without gradient checkpointing.""" img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) model = _get_big_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2) optimizer = optimizers.SGD() losses = [] tr_vars = model.trainable_variables for _ in range(n_steps): with tf.GradientTape() as tape: logits = model(x) loss = _compute_loss(logits, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses def _train_with_recompute(n_steps): """Trains a single large model with gradient checkpointing using tf.recompute_grad.""" img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2) model1, model2, model3 = models # Apply gradient checkpointing to the submodels using tf.recompute_grad. model1_re = tf.recompute_grad(model1) model2_re = tf.recompute_grad(model2) model3_re = tf.recompute_grad(model3) optimizer = optimizers.SGD() tr_vars = ( model1.trainable_variables + model2.trainable_variables + model3.trainable_variables) losses = [] for _ in range(n_steps): with tf.GradientTape() as tape: logits1 = model1_re(x) logits2 = model2_re(logits1) logits3 = model3_re(logits2) loss = _compute_loss(logits3, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses class GradientCheckpointTest(tf.test.TestCase): def test_raises_oom_exception(self): if not _limit_gpu_memory(): self.skipTest('No virtual GPUs found') with self.assertRaises(Exception) as context: _train_no_recompute(1) self.assertIsInstance(context.exception, tf.errors.ResourceExhaustedError) def test_does_not_raise_oom_exception(self): if not _limit_gpu_memory(): self.skipTest('No virtual GPUs found') if test_lib.is_built_with_rocm(): self.skipTest( 'ROCm MIOpen does not support searching for memory-limited' 'solvers yet so skip the subtest which would result in OOM.') n_step = 2 losses = _train_with_recompute(n_step) self.assertLen(losses, n_step) def tearDown(self): super(GradientCheckpointTest, self).tearDown() # Make sure all the models created in keras has been deleted and cleared # from the global keras grpah, also do a force GC to recycle the GPU memory. tf.keras.backend.clear_session() gc.collect() if __name__ == '__main__': tf.test.main()
6,466
36.818713
88
py
keras
keras-master/keras/integration_test/multi_worker_tutorial_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for multi-worker training tutorial.""" import contextlib import os import re import unittest import uuid import zipfile from absl import logging from absl.testing import parameterized import numpy as np import tensorflow as tf PER_WORKER_BATCH_SIZE = 64 NUM_WORKERS = 2 NUM_EPOCHS = 2 NUM_STEPS_PER_EPOCH = 50 def _is_chief(task_type, task_id): # Note: there are two possible `TF_CONFIG` configuration. # 1) In addition to `worker` tasks, a `chief` task type is use; # in this case, this function should be modified to # `return task_type == 'chief'`. # 2) Only `worker` task type is used; in this case, worker 0 is # regarded as the chief. The implementation demonstrated here # is for this case. return task_type == 'worker' and task_id == 0 def _get_temp_dir(dirpath, task_id): base_dirpath = 'workertemp_' + str(task_id) temp_dir = os.path.join(dirpath, base_dirpath) tf.io.gfile.makedirs(temp_dir) return temp_dir def write_filepath(filepath, task_type, task_id): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) if not _is_chief(task_type, task_id): dirpath = _get_temp_dir(dirpath, task_id) return os.path.join(dirpath, base) class MultiWorkerTutorialTest(parameterized.TestCase, tf.test.TestCase): """Test of multi-worker training flow in tutorials on tensorflow.org. Please see below test method docs for what actual tutorial is being covered. """ # TODO(rchao): Add a test to demonstrate gather with MWMS. @contextlib.contextmanager def skip_fetch_failure_exception(self): try: yield except zipfile.BadZipfile as e: # There can be a race when multiple processes are downloading the data. # Skip the test if that results in loading errors. self.skipTest('Data loading error: Bad magic number for file header.') except Exception as e: # pylint: disable=broad-except if 'URL fetch failure' in str(e): self.skipTest('URL fetch error not considered failure of the test.') else: raise def mnist_dataset(self): path_to_use = 'mnist_{}.npz'.format(str(uuid.uuid4())) with self.skip_fetch_failure_exception(): (x_train, y_train), _ = tf.keras.datasets.mnist.load_data(path=path_to_use) # The `x` arrays are in uint8 and have values in the range [0, 255]. # We need to convert them to float32 with values in the range [0, 1] x_train = x_train / np.float32(255) y_train = y_train.astype(np.int64) train_dataset = tf.data.Dataset.from_tensor_slices( (x_train, y_train)).shuffle(60000) return train_dataset def dataset_fn(self, global_batch_size, input_context): batch_size = input_context.get_per_replica_batch_size(global_batch_size) dataset = self.mnist_dataset() dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) dataset = dataset.batch(batch_size) return dataset def build_cnn_model(self): return tf.keras.Sequential([ tf.keras.layers.Input(shape=(28, 28)), tf.keras.layers.Reshape(target_shape=(28, 28, 1)), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10) ]) def build_and_compile_cnn_model(self): model = self.build_cnn_model() model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.SGD(learning_rate=0.001), metrics=['accuracy']) return model @tf.__internal__.test.combinations.generate( tf.__internal__.test.combinations.combine( mode=['eager'], tf_api_version=2)) def testSingleWorkerModelFit(self): single_worker_dataset = self.mnist_dataset().batch( PER_WORKER_BATCH_SIZE) single_worker_model = self.build_and_compile_cnn_model() single_worker_model.fit(single_worker_dataset, epochs=NUM_EPOCHS) @tf.__internal__.test.combinations.generate( tf.__internal__.test.combinations.combine( mode=['eager'], tf_api_version=2)) def testMwmsWithModelFit(self, mode): """Test multi-worker training flow demo'ed in go/multi-worker-with-keras. This test should be kept in sync with the code samples in go/multi-worker-with-keras. Args: mode: Runtime mode. """ def fn(model_path, checkpoint_dir): global_batch_size = PER_WORKER_BATCH_SIZE * NUM_WORKERS strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() with strategy.scope(): multi_worker_model = self.build_and_compile_cnn_model() callbacks = [ tf.keras.callbacks.ModelCheckpoint( filepath=os.path.join(self.get_temp_dir(), 'checkpoint')) ] multi_worker_dataset = strategy.distribute_datasets_from_function( lambda input_context: self.dataset_fn(global_batch_size, input_context )) multi_worker_model.fit( multi_worker_dataset, epochs=NUM_EPOCHS, steps_per_epoch=50, callbacks=callbacks) task_type, task_id = (strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) write_model_path = write_filepath(model_path, task_type, task_id) multi_worker_model.save(write_model_path) if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(os.path.dirname(write_model_path)) # Make sure chief finishes saving before non-chief's assertions. tf.__internal__.distribute.multi_process_runner.get_barrier().wait() if not tf.io.gfile.exists(model_path): raise RuntimeError() if tf.io.gfile.exists(write_model_path) != _is_chief(task_type, task_id): raise RuntimeError() with strategy.scope(): loaded_model = tf.keras.models.load_model(model_path) loaded_model.fit(multi_worker_dataset, epochs=1, steps_per_epoch=1) checkpoint = tf.train.Checkpoint(model=multi_worker_model) write_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=write_checkpoint_dir, max_to_keep=1) checkpoint_manager.save() if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(write_checkpoint_dir) # Make sure chief finishes saving before non-chief's assertions. tf.__internal__.distribute.multi_process_runner.get_barrier().wait() if not tf.io.gfile.exists(checkpoint_dir): raise RuntimeError() if tf.io.gfile.exists(write_checkpoint_dir) != _is_chief( task_type, task_id): raise RuntimeError() latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) checkpoint.restore(latest_checkpoint) multi_worker_model.fit(multi_worker_dataset, epochs=1, steps_per_epoch=1) logging.info('testMwmsWithModelFit successfully ends') model_path = os.path.join(self.get_temp_dir(), 'model.tf') checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt') try: mpr_result = tf.__internal__.distribute.multi_process_runner.run( fn, tf.__internal__.distribute.multi_process_runner.create_cluster_spec( num_workers=NUM_WORKERS), args=(model_path, checkpoint_dir), return_output=True) except tf.errors.UnavailableError: self.skipTest('Skipping rare disconnection among the workers.') self.assertTrue( any([ 'testMwmsWithModelFit successfully ends' in msg for msg in mpr_result.stdout ])) def extract_accuracy(worker_id, input_string): match = re.match( r'\[worker\-{}\].*accuracy: (\d+\.\d+).*'.format(worker_id), input_string) return None if match is None else float(match.group(1)) for worker_id in range(NUM_WORKERS): accu_result = tf.nest.map_structure( lambda x: extract_accuracy(worker_id, x), # pylint: disable=cell-var-from-loop mpr_result.stdout) self.assertTrue( any(accu_result), 'Every worker is supposed to have accuracy result.') @tf.__internal__.test.combinations.generate( tf.__internal__.test.combinations.combine( mode=['eager'], tf_api_version=2)) def testMwmsWithCtl(self, mode): """Test multi-worker CTL training flow demo'ed in a to-be-added tutorial.""" def proc_func(checkpoint_dir): global_batch_size = PER_WORKER_BATCH_SIZE * NUM_WORKERS strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() try: with strategy.scope(): multi_worker_model = self.build_cnn_model() multi_worker_dataset = strategy.distribute_datasets_from_function( lambda input_context: self.dataset_fn(global_batch_size, # pylint: disable=g-long-lambda input_context)) optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') @tf.function def train_step(iterator): """Training step function.""" def step_fn(inputs): """Per-Replica step function.""" x, y = inputs with tf.GradientTape() as tape: predictions = multi_worker_model(x, training=True) per_batch_loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(y, predictions) loss = tf.nn.compute_average_loss( per_batch_loss, global_batch_size=global_batch_size) grads = tape.gradient(loss, multi_worker_model.trainable_variables) optimizer.apply_gradients( zip(grads, multi_worker_model.trainable_variables)) train_accuracy.update_state(y, predictions) return loss per_replica_losses = strategy.run(step_fn, args=(next(iterator),)) return strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) epoch = tf.Variable( initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='epoch') step_in_epoch = tf.Variable( initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='step_in_epoch') task_type, task_id = (strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) checkpoint = tf.train.Checkpoint( model=multi_worker_model, epoch=epoch, step_in_epoch=step_in_epoch) write_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=write_checkpoint_dir, max_to_keep=1) latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) if latest_checkpoint: checkpoint.restore(latest_checkpoint) while epoch.numpy() < NUM_EPOCHS: iterator = iter(multi_worker_dataset) total_loss = 0.0 num_batches = 0 while step_in_epoch.numpy() < NUM_STEPS_PER_EPOCH: total_loss += train_step(iterator) num_batches += 1 step_in_epoch.assign_add(1) train_loss = total_loss / num_batches logging.info('Epoch: %d, accuracy: %f, train_loss: %f.', epoch.numpy(), train_accuracy.result(), train_loss) train_accuracy.reset_state() checkpoint_manager.save() if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(write_checkpoint_dir) epoch.assign_add(1) step_in_epoch.assign(0) except tf.errors.UnavailableError as e: logging.info('UnavailableError occurred: %r', e) raise unittest.SkipTest('Skipping test due to UnavailableError') logging.info('testMwmsWithCtl successfully ends') checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt') mpr_result = tf.__internal__.distribute.multi_process_runner.run( proc_func, tf.__internal__.distribute.multi_process_runner.create_cluster_spec( num_workers=NUM_WORKERS), return_output=True, args=(checkpoint_dir,)) self.assertTrue( any([ 'testMwmsWithCtl successfully ends' in msg for msg in mpr_result.stdout ])) if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main()
13,443
37.301994
101
py
keras
keras-master/keras/integration_test/gradients_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import tensorflow as tf class TestKerasModelClass(tf.keras.Model): """A simple tensorflow keras Model class definition.""" def __init__(self, width): super(TestKerasModelClass, self).__init__() self.width = width def build(self, input_shape): self.weight = self.add_weight( name="test_keras_var", shape=(self.width,), dtype=tf.float32, trainable=True, ) def call(self, inputs): return self.weight * inputs class GradientsTest(tf.test.TestCase): def _TestVariablesGradient(self, inputs, test_model, vars_to_grad): """Returns gradients of `test_model` with respect to `vars_to_grad`.""" test_model_re = tf.recompute_grad(test_model) with tf.GradientTape(persistent=True) as tape: tape.watch(vars_to_grad) out_re = test_model_re(inputs) out = test_model(inputs) grads_re = tape.gradient(out_re, vars_to_grad) grads = tape.gradient(out, vars_to_grad) return grads_re, grads def testKerasRecompute(self): """Checks that recompute_grad works for a simple Keras Model.""" test_model = TestKerasModelClass(10) test_input = tf.constant(tf.zeros((10, 10), dtype=np.float32)) # Ensures keras model is initialized. test_model(test_input) # pylint: disable=not-callable grads_re, grads = self._TestVariablesGradient(test_input, test_model, test_input) grads_re = self.evaluate(grads_re) grads = self.evaluate(grads) for g, g_re in zip(grads, grads_re): self.assertAllClose(g, g_re) grads_re, grads = self._TestVariablesGradient(test_input, test_model, test_model.variables) grads_re = self.evaluate(grads_re) grads = self.evaluate(grads) for g, g_re in zip(grads, grads_re): self.assertAllClose(g, g_re) def testLSTMBatchJacobian(self): class HasLSTM(tf.keras.Model): def __init__(self): super(HasLSTM, self).__init__() self.lstm = tf.keras.layers.LSTM(units=5) self.dense = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid) def call(self, x): return self.dense(self.lstm(x)) m = HasLSTM() def jacobian(x): with tf.GradientTape() as tape: tape.watch(x) y = m(x) # pylint: disable=not-callable return tape.batch_jacobian(y, x) inp = tf.nn.l2_normalize(tf.ones([1, 2, 3]), axis=[1, 2]) eager_result = jacobian(inp) function_result = tf.function(jacobian)(inp) self.assertAllClose(eager_result, function_result) backprop_result, numeric_result = tf.test.compute_gradient( m, [inp], delta=1e-3) self.assertAllClose(numeric_result, backprop_result, rtol=1e-2) self.assertAllClose(tf.reshape(numeric_result, [-1]), tf.reshape(eager_result, [-1]), rtol=1e-2) def testEmbeddingLookupGradientsHaveKnownShape(self): class MyLayer(tf.keras.layers.Layer): def __init__(self, **kwargs): super().__init__(**kwargs) self.embedding = None def build(self, input_shape): self.embedding = tf.Variable(tf.random.uniform([50, 16])) def call(self, x): return tf.nn.embedding_lookup(self.embedding, x) layer = MyLayer() @tf.function def _run(x): with tf.GradientTape() as tape: y = layer(x) loss = tf.math.reduce_sum(y) gradients = tape.gradient(loss, layer.weights) self.assertListEqual(gradients[0].shape.as_list(), [50, 16]) _run(tf.random.uniform([4, 16], minval=0, maxval=50, dtype=tf.int64)) if __name__ == "__main__": tf.test.main()
4,373
30.927007
80
py
keras
keras-master/keras/integration_test/vectorized_map_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf class VectorizedMapTest(tf.test.TestCase): def test_vectorized_map(self): batch_size = 10 num_features = 32 layer = tf.keras.layers.Dense(1) def model_fn(arg): with tf.GradientTape() as g: inp, label = arg inp = tf.expand_dims(inp, 0) label = tf.expand_dims(label, 0) prediction = layer(inp) loss = tf.nn.l2_loss(label - prediction) return g.gradient(loss, (layer.kernel, layer.bias)) inputs = tf.random.uniform([batch_size, num_features]) labels = tf.random.uniform([batch_size, 1]) per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels)) self.assertEqual(per_example_gradients[0].shape, (batch_size, num_features, 1)) self.assertEqual(per_example_gradients[1].shape, (batch_size, 1)) if __name__ == "__main__": tf.test.main()
1,576
34.044444
80
py
keras
keras-master/keras/integration_test/tpu_strategy_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TPUStrategy.""" import random import tempfile from absl import flags import tensorflow as tf from tensorflow.python.framework import test_util FLAGS = flags.FLAGS flags.DEFINE_string("tpu", "", "Name of TPU to connect to.") flags.DEFINE_string("project", None, "Name of GCP project with TPU.") flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.") # These vocabularies usually come from TFT or a Beam pipeline. FEATURE_VOCAB = [ "avenger", "ironman", "batman", "hulk", "spiderman", "kingkong", "wonder_woman" ] LABEL_VOCAB = ["yes", "no"] def get_tpu_cluster_resolver(): resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project, ) return resolver def get_tpu_strategy(): resolver = get_tpu_cluster_resolver() tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) return tf.distribute.experimental.TPUStrategy(resolver) class TpuStrategyTest(tf.test.TestCase): def define_kpls_for_training(self, use_adapt): if use_adapt: feature_lookup_layer = ( tf.keras.layers.StringLookup( num_oov_indices=1)) feature_lookup_layer.adapt(FEATURE_VOCAB) label_lookup_layer = ( tf.keras.layers.StringLookup( num_oov_indices=0, mask_token=None)) label_lookup_layer.adapt(LABEL_VOCAB) else: feature_lookup_layer = ( tf.keras.layers.StringLookup( vocabulary=FEATURE_VOCAB, num_oov_indices=1)) label_lookup_layer = ( tf.keras.layers.StringLookup( vocabulary=LABEL_VOCAB, num_oov_indices=0, mask_token=None)) raw_feature_input = tf.keras.layers.Input( shape=(3,), dtype=tf.dtypes.string, name="feature", ragged=True) feature_id_input = feature_lookup_layer(raw_feature_input) feature_mapper = tf.keras.Model({"features": raw_feature_input}, feature_id_input) raw_label_input = tf.keras.layers.Input( shape=(1,), dtype=tf.dtypes.string, name="label") label_id_input = label_lookup_layer(raw_label_input) label_mapper = tf.keras.Model({"label": raw_label_input}, label_id_input) return feature_mapper, label_mapper def define_inverse_lookup_layer(self): # Only needed for serving. label_inverse_lookup_layer = ( tf.keras.layers.StringLookup( num_oov_indices=0, mask_token=None, vocabulary=LABEL_VOCAB, invert=True)) return label_inverse_lookup_layer def test_keras_metric_outside_strategy_scope_per_replica(self): strategy = get_tpu_strategy() metric = tf.keras.metrics.Mean("test_metric", dtype=tf.float32) dataset = tf.data.Dataset.range(strategy.num_replicas_in_sync * 2).batch(2) dataset = strategy.experimental_distribute_dataset(dataset) @tf.function def step_fn(i): metric.update_state(i) with self.assertRaisesRegex( ValueError, "Trying to run metric.update_state " "in replica context"): with strategy.scope(): for i in dataset: strategy.run(step_fn, args=(i,)) @test_util.disable_mlir_bridge("TODO(b/168036682): Support dynamic padder") def test_train_and_serve(self): strategy = get_tpu_strategy() use_adapt = False with strategy.scope(): feature_mapper, label_mapper = self.define_kpls_for_training(use_adapt) def dataset_fn(_): def feature_and_label_gen(): # Generator of dataset. while True: features = random.sample(FEATURE_VOCAB, 3) label = ["yes"] if "avenger" in features else ["no"] yield {"features": features, "label": label} raw_dataset = tf.data.Dataset.from_generator( feature_and_label_gen, output_signature={ "features": tf.TensorSpec([3], tf.dtypes.string), "label": tf.TensorSpec([1], tf.dtypes.string) }).shuffle(100).batch(32) train_dataset = raw_dataset.map(lambda x: ( # pylint: disable=g-long-lambda { "features": feature_mapper(x["features"]) }, label_mapper(x["label"]))) return train_dataset # Create the model. The input needs to be compatible with KPLs. model_input = tf.keras.layers.Input( shape=(3,), dtype=tf.dtypes.int64, name="model_input") # input_dim includes a mask token and an oov token. emb_output = tf.keras.layers.Embedding( input_dim=len(FEATURE_VOCAB) + 2, output_dim=20)( model_input) emb_output = tf.math.reduce_mean(emb_output, axis=1) dense_output = tf.keras.layers.Dense( units=1, activation="sigmoid")( emb_output) model = tf.keras.Model({"features": model_input}, dense_output) optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1) accuracy = tf.keras.metrics.Accuracy() @tf.function def train_step(iterator): """The step function for one training step.""" def step_fn(inputs): """The computation to run on each TPU device.""" features, labels = inputs with tf.GradientTape() as tape: pred = model(features, training=True) loss = tf.keras.losses.binary_crossentropy(labels, pred) loss = tf.nn.compute_average_loss(loss) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(list(zip(grads, model.trainable_variables))) actual_pred = tf.cast(tf.math.greater(pred, 0.5), tf.dtypes.int64) accuracy.update_state(labels, actual_pred) strategy.run(step_fn, args=(next(iterator),)) distributed_dataset = strategy.distribute_datasets_from_function( dataset_fn) distributed_iterator = iter(distributed_dataset) num_epochs = 4 num_steps = 7 for _ in range(num_epochs): accuracy.reset_state() for _ in range(num_steps): train_step(distributed_iterator) self.assertGreater(accuracy.result().numpy(), 0.5) self.assertEqual(optimizer.iterations.numpy(), num_epochs * num_steps) # Create a saved model. model.feature_mapper = feature_mapper model.label_mapper = label_mapper model.label_inverse_lookup_layer = self.define_inverse_lookup_layer() def create_serving_signature(model): @tf.function def serve_fn(raw_features): raw_features = tf.expand_dims(raw_features, axis=0) transformed_features = model.feature_mapper(raw_features) outputs = model(transformed_features) outputs = tf.squeeze(outputs, axis=0) outputs = tf.cast(tf.math.greater(outputs, 0.5), tf.dtypes.int64) decoded_outputs = model.label_inverse_lookup_layer(outputs) return tf.squeeze(decoded_outputs, axis=0) # Serving does NOT have batch dimension return serve_fn.get_concrete_function( tf.TensorSpec(shape=(3), dtype=tf.dtypes.string, name="example")) serving_fn = create_serving_signature(model) saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) model.save(saved_model_dir, save_format="tf", signatures={"serving_default": serving_fn}) # Test the saved_model. loaded_serving_fn = tf.keras.models.load_model( saved_model_dir).signatures["serving_default"] # Check model calling with serving signature. prediction1 = loaded_serving_fn( tf.constant(["avenger", "ironman", "avenger"]))["output_0"] self.assertIn(prediction1, ("yes", "no")) prediction2 = loaded_serving_fn( tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"] self.assertIn(prediction2, ("yes", "no")) if __name__ == "__main__": tf.test.main()
8,613
35.345992
84
py
keras
keras-master/keras/integration_test/central_storage_strategy_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for KPL + CentralStorageStrategy.""" from absl.testing import parameterized import tensorflow as tf from tensorflow.python.distribute import combinations as ds_combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.framework import test_combinations as combinations from tensorflow.python.keras.utils import kpl_test_utils # TODO(b/182278926): Combine this test with other strategies. @ds_combinations.generate( combinations.combine( distribution=[ strategy_combinations.central_storage_strategy_with_gpu_and_cpu, ], mode=["eager"])) class CentralStorageStrategyTest(tf.test.TestCase, parameterized.TestCase): def testTrainAndServeWithKPL(self, distribution): use_adapt = False test_utils_obj = kpl_test_utils.DistributeKplTestUtils() with distribution.scope(): feature_mapper, label_mapper = test_utils_obj.define_kpls_for_training( use_adapt) model = test_utils_obj.define_model() optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1) accuracy = tf.keras.metrics.Accuracy() def dataset_fn(_): return test_utils_obj.dataset_fn(feature_mapper, label_mapper) @tf.function def train_step(iterator): """The step function for one training step.""" def step_fn(inputs): """The computation to run on each replica.""" features, labels = inputs with tf.GradientTape() as tape: pred = model(features, training=True) loss = tf.keras.losses.binary_crossentropy(labels, pred) loss = tf.nn.compute_average_loss(loss) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(list(zip(grads, model.trainable_variables))) actual_pred = tf.cast(tf.math.greater(pred, 0.5), tf.dtypes.int64) accuracy.update_state(labels, actual_pred) distribution.run(step_fn, args=(next(iterator),)) distributed_dataset = distribution.distribute_datasets_from_function( dataset_fn) distributed_iterator = iter(distributed_dataset) num_epochs = 4 num_steps = 7 for _ in range(num_epochs): accuracy.reset_state() for _ in range(num_steps): train_step(distributed_iterator) self.assertGreater(accuracy.result().numpy(), 0.5) self.assertEqual(optimizer.iterations.numpy(), num_epochs * num_steps) # Test save/load/serving the trained model. test_utils_obj.test_save_load_serving_model( model, feature_mapper, test_utils_obj.define_reverse_lookup_layer()) if __name__ == "__main__": tf.test.main()
3,385
37.91954
80
py
keras
keras-master/keras/integration_test/tf_trt_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import tempfile from absl import flags import tensorflow as tf import tensorflow_text as tf_text class ConvertResource(tf.test.TestCase): def testConvertResource(self): """Test general resource inputs don't crash the converter.""" if not tf.test.is_built_with_cuda(): self.skipTest('test is only applicable with CUDA') class TokenizeLayer(tf.keras.layers.Layer): def __init__(self, vocab_file): super().__init__() serialized_proto = tf.compat.v1.gfile.GFile(vocab_file, "rb").read() self.tokenizer = tf_text.SentencepieceTokenizer( model=serialized_proto, add_bos=True, add_eos=True) def call(self, inputs): word_ids = self.tokenizer.tokenize(inputs) word_ids = word_ids.to_tensor(default_value=1, shape=(None, 192)) return word_ids vocab_file = os.path.join( flags.FLAGS['test_srcdir'].value, 'org_keras/keras', 'integration_test/data/sentencepiece.pb') # vocab_file = tf.compat.v1.test.test_src_dir_path( # "python/keras/integration_test/data/sentencepiece.pb") output_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) # Create and save a Tokenizer tokenizer = TokenizeLayer(vocab_file) inputs = tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string) tokens = tokenizer(inputs) model = tf.keras.models.Model(inputs=inputs, outputs=tokens) model.save(output_dir) converter = tf.experimental.tensorrt.Converter( input_saved_model_dir=output_dir, conversion_params=tf.experimental.tensorrt.ConversionParams()) converter.convert() if __name__ == "__main__": tf.test.main()
2,369
33.852941
80
py
keras
keras-master/keras/integration_test/preprocessing_test_utils.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utilities for our Keras preprocessing integration tests.""" import os import tensorflow as tf preprocessing = tf.keras.layers BATCH_SIZE = 64 DS_SIZE = BATCH_SIZE * 16 STEPS = DS_SIZE / BATCH_SIZE VOCAB_SIZE = 100 def make_dataset(): """Make a simple structured dataset. The dataset contains three feature columns. - float_col: an unnormalized numeric column. - int_col: an column of integer IDs. - string_col: a column of fixed vocabulary terms. Returns: The dataset. """ tf.random.set_seed(197011) floats = tf.random.uniform((DS_SIZE, 1), maxval=10, dtype="float64") # Generate a 100 unique integer values, but over a wide range to showcase a # common use case for IntegerLookup. ints = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype="int64") ints = ints * 1000 # Use a fixed vocabulary of strings from 0 to 99, to showcase loading a # vocabulary from a file. strings = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype="int64") strings = tf.strings.as_string(strings) features = {"float_col": floats, "int_col": ints, "string_col": strings} # Random binary label. labels = tf.random.uniform((DS_SIZE, 1), maxval=2, dtype="int64") ds = tf.data.Dataset.from_tensor_slices((features, labels)) return ds def make_preprocessing_model(file_dir): """Make a standalone preprocessing model.""" # The name of our keras.Input should match the column name in the dataset. float_in = tf.keras.Input(shape=(1,), dtype="float64", name="float_col") int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col") string_in = tf.keras.Input(shape=(1,), dtype="string", name="string_col") # We need to batch a dataset before adapting. ds = make_dataset().batch(BATCH_SIZE) # Normalize floats by adapting the mean and variance of the input. normalization = preprocessing.Normalization() normalization.adapt(ds.map(lambda features, labels: features["float_col"])) float_out = normalization(float_in) # Lookup ints by adapting a vocab of interger IDs. int_lookup = preprocessing.IntegerLookup() int_lookup.adapt(ds.map(lambda features, labels: features["int_col"])) int_out = int_lookup(int_in) # Lookup strings from a fixed file based vocabulary. string_vocab = list(str(i) for i in range(VOCAB_SIZE)) vocab_file = os.path.join(file_dir, "vocab_file.txt") with open(vocab_file, "w") as f: f.write("\n".join(string_vocab)) string_lookup = preprocessing.StringLookup(vocabulary=vocab_file) string_out = string_lookup(string_in) return tf.keras.Model( inputs=(float_in, int_in, string_in), outputs=(float_out, int_out, string_out)) def make_training_model(): """Make a trainable model for the preprocessed inputs.""" float_in = tf.keras.Input(shape=(1,), dtype="float64", name="float_col") # After preprocessing, both the string and int column are integer ready for # embedding. int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col") string_in = tf.keras.Input(shape=(1,), dtype="int64", name="string_col") # Feed the lookup layers into an embedding. int_embedding = tf.keras.layers.Embedding(VOCAB_SIZE + 1, 8, input_length=1) int_out = int_embedding(int_in) int_out = tf.keras.layers.Flatten()(int_out) string_embedding = tf.keras.layers.Embedding( VOCAB_SIZE + 1, 8, input_length=1) string_out = string_embedding(string_in) string_out = tf.keras.layers.Flatten()(string_out) # Concatenate outputs. concatate = tf.keras.layers.Concatenate() # Feed our preprocessed inputs into a simple MLP. x = concatate((float_in, int_out, string_out)) x = tf.keras.layers.Dense(32, activation="relu")(x) x = tf.keras.layers.Dense(32, activation="relu")(x) outputs = tf.keras.layers.Dense(1, activation="softmax")(x) return tf.keras.Model(inputs=(float_in, int_in, string_in), outputs=outputs)
4,569
40.171171
80
py
keras
keras-master/keras/integration_test/saved_model_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import tempfile from absl.testing import parameterized import tensorflow as tf def cycle(obj, cycles, signatures=None): to_save = obj # TODO(vbardiovsky): It would be nice if exported protos reached a fixed # point w.r.t. saving/restoring, ideally after 2nd saving. for _ in range(cycles): path = tempfile.mkdtemp(prefix=tf.compat.v1.test.get_temp_dir()) # If available, we'll run the save and restore preferring the GPU. This # just makes sure we aren't throwing errors and have enough # device("CPU") blocks to satisfy the placer. device = "/device:GPU:0" if tf.test.is_gpu_available() else "/device:CPU:0" with tf.device(device): tf.saved_model.save(to_save, path, signatures) loaded = tf.saved_model.load(path) to_save = loaded return loaded class _ModelWithOptimizer(tf.train.Checkpoint): def __init__(self): self.dense = tf.keras.layers.Dense(1) self.optimizer = tf.keras.optimizers.Adam(0.01) @tf.function( input_signature=(tf.TensorSpec([None, 2], tf.float32), tf.TensorSpec([None], tf.float32))) def call(self, x, y): with tf.GradientTape() as tape: loss = tf.math.reduce_mean((self.dense(x) - y) ** 2.) trainable_variables = self.dense.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {"loss": loss} def _import_and_infer(save_dir, inputs, signature_key="serving_default"): """Import a SavedModel into a TF 1.x-style graph and run `signature_key`.""" graph = tf.Graph() with graph.as_default(), tf.compat.v1.Session() as session: model = tf.compat.v1.saved_model.load(session, ["serve"], save_dir) return _run_signature(session, model, inputs, signature_key) def _run_signature(session, meta_graph_def, inputs, signature_key): signature = meta_graph_def.signature_def[signature_key] assert set(inputs.keys()) == set(signature.inputs.keys()) feed_dict = {} for arg_name in inputs.keys(): input_tensor = session.graph.get_tensor_by_name( signature.inputs[arg_name].name) feed_dict[input_tensor] = inputs[arg_name] output_dict = {} for output_name, output_tensor_info in signature.outputs.items(): output_dict[output_name] = session.graph.get_tensor_by_name( output_tensor_info.name) return session.run(output_dict, feed_dict=feed_dict) class SaveTest(tf.test.TestCase): def test_unbuilt_model_does_not_prevent_saving(self): root = tf.train.Checkpoint( model=tf.keras.Sequential([tf.keras.layers.Dense(2)])) tf.saved_model.save(root, os.path.join(self.get_temp_dir(), "saved_model")) def test_optimizer(self): x = tf.constant([[3., 4.]]) y = tf.constant([2.]) model = _ModelWithOptimizer() first_loss = model.call(x, y) save_dir = os.path.join(self.get_temp_dir(), "saved_model") tf.saved_model.save(model, save_dir, model.call) second_loss = model.call(x, y) self.assertNotEqual(first_loss, second_loss) self.assertAllClose( second_loss, _import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]})) def test_single_method_default_signature(self): model = _ModelWithOptimizer() x = tf.constant([[3., 4.]]) y = tf.constant([2.]) model.call(x, y) save_dir = os.path.join(self.get_temp_dir(), "saved_model") tf.saved_model.save(model, save_dir) self.assertIn("loss", _import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]})) @parameterized.named_parameters( dict(testcase_name="ReloadOnce", cycles=1), dict(testcase_name="ReloadTwice", cycles=2), dict(testcase_name="ReloadThrice", cycles=3)) class LoadTest(tf.test.TestCase, parameterized.TestCase): def test_optimizer(self, cycles): class _HasOptimizer(tf.Module): def __init__(self): super(_HasOptimizer, self).__init__() self.layer = tf.keras.layers.Dense(1) self.optimizer = tf.keras.optimizers.Adam(0.01) @tf.function def __call__(self, x): return self.layer(x) @tf.function def train(self, x, y): with tf.GradientTape() as tape: predicted = self(x) loss = tf.math.reduce_sum(tf.math.abs(y - predicted)) train_vars = self.layer.trainable_variables grads = tape.gradient(loss, train_vars) self.optimizer.apply_gradients(zip(grads, train_vars)) root = _HasOptimizer() train_input = dict(x=tf.constant([[1.]]), y=tf.constant([[2.]])) root.train(**train_input) imported = cycle(root, cycles) self.assertAllClose(root.optimizer.learning_rate.numpy(), imported.optimizer.learning_rate.numpy()) self.assertAllClose(root(tf.constant([[-0.5]])), imported(tf.constant([[-0.5]]))) root.train(**train_input) imported.train(**train_input) self.assertAllClose(root(tf.constant([[-0.5]])), imported(tf.constant([[-0.5]]))) def test_model_with_custom_function_attached(self, cycles): root = tf.train.Checkpoint( model=tf.keras.Sequential([tf.keras.layers.Dense(2)])) @tf.function def _use_sequential(x): return root.model.call(x) root.model.traced_call = _use_sequential original = root.model.traced_call(tf.zeros([1, 1])).numpy() root = cycle(root, cycles) self.assertAllEqual( original, root.model.traced_call(tf.zeros([1, 1])).numpy()) @parameterized.named_parameters( dict(testcase_name="ReloadOnce", cycles=1), dict(testcase_name="ReloadTwice", cycles=2), dict(testcase_name="ReloadThrice", cycles=3)) class KerasLoadTest(tf.test.TestCase, parameterized.TestCase): def test_dense_features_layer(self, cycles): columns = [ tf.feature_column.numeric_column("x"), tf.feature_column.numeric_column("y") ] layer = tf.keras.layers.DenseFeatures(columns) model = tf.keras.Sequential([layer]) model_input = {"x": tf.constant([[1.]]), "y": tf.constant([[2.]])} self.assertAllClose([[1., 2.]], model.predict(model_input, steps=1)) loaded = cycle(model, cycles) output, = loaded._default_save_signature(model_input).values() self.assertAllClose([[1., 2.]], output) signature_output, = loaded.signatures["serving_default"]( **model_input).values() self.assertAllClose([[1., 2.]], signature_output) def test_dense_features_layer_fit(self, cycles): columns = [tf.feature_column.numeric_column("x")] model = tf.keras.Sequential( [tf.keras.layers.DenseFeatures(columns), tf.keras.layers.Dense(1)]) model_input = {"x": tf.constant([[1.]])} model.compile(optimizer="adam", loss="mse", run_eagerly=True) model.fit(model_input, tf.constant([[3.]])) loaded = cycle(model, cycles) loaded._default_save_signature(model_input) loaded.signatures["serving_default"](**model_input) def test_multi_output_layer(self, cycles): inp = tf.keras.Input(name="inp", shape=(None,), dtype=tf.float32) class _MultiOutput(tf.keras.layers.Layer): def call(self, x): return x + 1., x + 2. out = _MultiOutput(name="out")(inp) # pylint: disable=not-callable model = tf.keras.Model(inp, out) loaded = cycle(model, cycles) self.assertAllClose( dict(out=2., out_1=3.), loaded.signatures["serving_default"](tf.constant(1.))) def test_functional_model_with_conv(self, cycles): x = tf.keras.Input(name="x", shape=(None, None, 3), dtype=tf.float32) conved = tf.keras.layers.Conv2D( filters=3, kernel_size=3, dilation_rate=2)(x) model = tf.keras.Model([x], conved) model_input = tf.ones((1, 10, 10, 3)) initial_output = model.predict([model_input]) model = cycle(model, cycles) self.assertAllClose( [initial_output], list(model.signatures["serving_default"](model_input).values())) if __name__ == "__main__": tf.test.main()
8,780
35.740586
80
py
keras
keras-master/keras/integration_test/parameter_server_keras_preprocessing_test.py
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ClusterCoordinator and Keras models.""" import multiprocessing import os import random import tempfile from absl.testing import parameterized import numpy as np import portpicker import tensorflow as tf # These vocabularies usually come from TFT or a Beam pipeline. FEATURE_VOCAB = [ "avenger", "ironman", "batman", "hulk", "spiderman", "kingkong", "wonder_woman" ] LABEL_VOCAB = ["yes", "no"] def create_in_process_cluster(num_workers, num_ps): """Creates and starts local servers and returns the cluster_resolver.""" worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = {} cluster_dict["worker"] = ["localhost:%s" % port for port in worker_ports] if num_ps > 0: cluster_dict["ps"] = ["localhost:%s" % port for port in ps_ports] cluster_spec = tf.train.ClusterSpec(cluster_dict) # Workers need some inter_ops threads to work properly. worker_config = tf.compat.v1.ConfigProto() if multiprocessing.cpu_count() < num_workers + 1: worker_config.inter_op_parallelism_threads = num_workers + 1 for i in range(num_workers): tf.distribute.Server( cluster_spec, job_name="worker", task_index=i, config=worker_config, protocol="grpc") for i in range(num_ps): tf.distribute.Server( cluster_spec, job_name="ps", task_index=i, protocol="grpc") return cluster_spec class KPLTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(KPLTest, self).setUp() cluster_spec = create_in_process_cluster(num_workers=3, num_ps=2) cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver( cluster_spec, rpc_layer="grpc") self.strategy = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver) self.coordinator = ( tf.distribute.experimental.coordinator.ClusterCoordinator( self.strategy)) def define_kpls_for_training(self, use_adapt): # Define KPLs under strategy's scope. Right now, if they have look up # tables, they will be created on the client. Their variables will be # created on PS. Ideally they should be cached on each worker since they # will not be changed in a training step. if use_adapt: feature_lookup_layer = ( tf.keras.layers.StringLookup( num_oov_indices=1)) feature_lookup_layer.adapt(FEATURE_VOCAB) label_lookup_layer = ( tf.keras.layers.StringLookup( num_oov_indices=0, mask_token=None)) label_lookup_layer.adapt(LABEL_VOCAB) else: # Do vocab shuffling. shuffled_vocab = FEATURE_VOCAB.copy() random.shuffle(shuffled_vocab) feature_lookup_layer = ( tf.keras.layers.StringLookup( vocabulary=shuffled_vocab, num_oov_indices=1)) label_lookup_layer = ( tf.keras.layers.StringLookup( vocabulary=LABEL_VOCAB, num_oov_indices=0, mask_token=None)) raw_feature_input = tf.keras.Input( shape=(3,), dtype=tf.string, name="feature", ragged=True) feature_id_input = feature_lookup_layer(raw_feature_input) # Model creates variables as well. feature_ps = tf.keras.Model({"features": raw_feature_input}, feature_id_input) raw_label_input = tf.keras.Input(shape=(1,), dtype=tf.string, name="label") label_id_input = label_lookup_layer(raw_label_input) label_ps = tf.keras.Model({"label": raw_label_input}, label_id_input) return feature_ps, label_ps def define_reverse_lookup_layer(self): # Only needed for serving. label_inverse_lookup_layer = ( tf.keras.layers.StringLookup( num_oov_indices=0, mask_token=None, vocabulary=LABEL_VOCAB, invert=True)) return label_inverse_lookup_layer @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], use_adapt=[True, False])) def testTrainAndServe(self, use_adapt): with self.coordinator.strategy.scope(): feature_ps, label_ps = self.define_kpls_for_training(use_adapt) def dataset_fn(): def feature_and_label_gen(): while True: features = random.sample(FEATURE_VOCAB, 3) label = ["yes"] if "avenger" in features else ["no"] yield {"features": features, "label": label} # The dataset will be created on the coordinator. raw_dataset = tf.data.Dataset.from_generator( feature_and_label_gen, output_signature={ "features": tf.TensorSpec([3], tf.string), "label": tf.TensorSpec([1], tf.string) }).shuffle(100).batch(32) train_dataset = raw_dataset.map(lambda x: ( # pylint: disable=g-long-lambda { "features": feature_ps(x["features"]) }, label_ps(x["label"]))) return train_dataset # Create the model. The input needs to be compatible with KPLs. model_input = tf.keras.Input( shape=(3,), dtype=tf.int64, name="model_input") # input_dim includes a mask token and an oov token. emb_output = tf.keras.layers.Embedding( input_dim=len(FEATURE_VOCAB) + 2, output_dim=20)( model_input) emb_output = tf.reduce_mean(emb_output, axis=1) dense_output = tf.keras.layers.Dense( units=1, activation="sigmoid")( emb_output) model = tf.keras.Model({"features": model_input}, dense_output) optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1) accuracy = tf.keras.metrics.Accuracy() @tf.function def worker_fn(iterator): def replica_fn(iterator): batch_data, labels = next(iterator) with tf.GradientTape() as tape: pred = model(batch_data, training=True) loss = tf.nn.compute_average_loss( tf.keras.losses.BinaryCrossentropy( reduction=tf.keras.losses.Reduction.NONE)(labels, pred)) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64) accuracy.update_state(labels, actual_pred) self.coordinator.strategy.run(replica_fn, args=(iterator,)) distributed_dataset = self.coordinator.create_per_worker_dataset(dataset_fn) distributed_iterator = iter(distributed_dataset) for _ in range(4): accuracy.reset_state() for _ in range(7): self.coordinator.schedule(worker_fn, args=(distributed_iterator,)) self.coordinator.join() self.assertGreater(accuracy.result().numpy(), 0.5) # Create a saved model. model.feature_ps = feature_ps model.label_ps = label_ps model.label_inverse_lookup_layer = self.define_reverse_lookup_layer() def create_serving_signature(model): @tf.function def serve_fn(raw_features): raw_features = tf.expand_dims(raw_features, axis=0) transformed_features = model.feature_ps(raw_features) outputs = model(transformed_features) outputs = tf.squeeze(outputs, axis=0) outputs = tf.cast(tf.greater(outputs, 0.5), tf.int64) decoded_outputs = model.label_inverse_lookup_layer(outputs) return tf.squeeze(decoded_outputs, axis=0) # serving does NOT have batch dimension return serve_fn.get_concrete_function( tf.TensorSpec(shape=(3), dtype=tf.string, name="example")) serving_fn = create_serving_signature(model) saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) model.save(saved_model_dir, signatures={"serving_default": serving_fn}) # Test the saved_model. loaded_serving_fn = tf.keras.models.load_model( saved_model_dir).signatures["serving_default"] # check the result w/ and w/o avenger. prediction0 = loaded_serving_fn( tf.constant(["avenger", "ironman", "avenger"]))["output_0"] self.assertIn(prediction0, ("yes", "no")) prediction1 = loaded_serving_fn( tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"] self.assertIn(prediction1, ("yes", "no")) class KPLCreatedInDatasetsFromFunctionTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(KPLCreatedInDatasetsFromFunctionTest, self).setUp() cluster_spec = create_in_process_cluster(num_workers=3, num_ps=2) cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver( cluster_spec, rpc_layer="grpc") self.strategy = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver) self.coordinator = ( tf.distribute.experimental.coordinator.ClusterCoordinator( self.strategy)) def testKPLCreatedInDatasetsFromFunction(self): filepath = os.path.join(self.get_temp_dir(), "vocab") with open(filepath, "w") as f: f.write("\n".join(["earth", "wind", "and", "fire"])) def per_worker_dataset_fn(): def dataset_fn(input_context): del input_context lookup_layer = tf.keras.layers.StringLookup( num_oov_indices=1, vocabulary=filepath) x = np.array([["earth", "wind", "and", "fire"], ["fire", "and", "earth", "michigan"]]) y = np.array([0, 1]) map_fn = lambda x, y: (lookup_layer(x), y) return tf.data.Dataset.from_tensor_slices( (x, y)).shuffle(10).repeat().batch(2).map(map_fn) return self.coordinator.strategy.distribute_datasets_from_function( dataset_fn) per_worker_distribute_dataset = self.coordinator.create_per_worker_dataset( per_worker_dataset_fn) per_worker_iter = iter(per_worker_distribute_dataset) @tf.function def worker_fn(iterator): def replica_fn(data): return data return self.coordinator.strategy.run(replica_fn, args=(next(iterator),)) result = [] for _ in range(10): result.append( self.coordinator.schedule(worker_fn, args=(per_worker_iter,))) self.coordinator.join() if __name__ == "__main__": tf.compat.v1.enable_v2_behavior() tf.test.main()
11,062
34.918831
84
py
keras
keras-master/keras/wrappers/__init__.py
0
0
0
py
keras
keras-master/keras/wrappers/scikit_learn.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper for using the Scikit-Learn API with Keras models.""" # pylint: disable=g-classes-have-attributes import copy import types import numpy as np from keras import losses from keras.models import Sequential from keras.utils.generic_utils import has_arg from keras.utils.np_utils import to_categorical from tensorflow.python.util.tf_export import keras_export class BaseWrapper: """Base class for the Keras scikit-learn wrapper. Warning: This class should not be used directly. Use descendant classes instead. Args: build_fn: callable function or class instance **sk_params: model parameters & fitting parameters The `build_fn` should construct, compile and return a Keras model, which will then be used to fit/predict. One of the following three values could be passed to `build_fn`: 1. A function 2. An instance of a class that implements the `__call__` method 3. None. This means you implement a class that inherits from either `KerasClassifier` or `KerasRegressor`. The `__call__` method of the present class will then be treated as the default `build_fn`. `sk_params` takes both model parameters and fitting parameters. Legal model parameters are the arguments of `build_fn`. Note that like all other estimators in scikit-learn, `build_fn` should provide default values for its arguments, so that you could create the estimator without passing any values to `sk_params`. `sk_params` could also accept parameters for calling `fit`, `predict`, `predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`). fitting (predicting) parameters are selected in the following order: 1. Values passed to the dictionary arguments of `fit`, `predict`, `predict_proba`, and `score` methods 2. Values passed to `sk_params` 3. The default values of the `keras.models.Sequential` `fit`, `predict` methods. When using scikit-learn's `grid_search` API, legal tunable parameters are those you could pass to `sk_params`, including fitting parameters. In other words, you could use `grid_search` to search for the best `batch_size` or `epochs` as well as the model parameters. """ def __init__(self, build_fn=None, **sk_params): self.build_fn = build_fn self.sk_params = sk_params self.check_params(sk_params) def check_params(self, params): """Checks for user typos in `params`. Args: params: dictionary; the parameters to be checked Raises: ValueError: if any member of `params` is not a valid argument. """ legal_params_fns = [ Sequential.fit, Sequential.predict, Sequential.evaluate ] if self.build_fn is None: legal_params_fns.append(self.__call__) elif (not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType)): legal_params_fns.append(self.build_fn.__call__) else: legal_params_fns.append(self.build_fn) for params_name in params: for fn in legal_params_fns: if has_arg(fn, params_name): break else: if params_name != 'nb_epoch': raise ValueError('{} is not a legal parameter'.format(params_name)) def get_params(self, **params): # pylint: disable=unused-argument """Gets parameters for this estimator. Args: **params: ignored (exists for API compatibility). Returns: Dictionary of parameter names mapped to their values. """ res = self.sk_params.copy() res.update({'build_fn': self.build_fn}) return res def set_params(self, **params): """Sets the parameters of this estimator. Args: **params: Dictionary of parameter names mapped to their values. Returns: self """ self.check_params(params) self.sk_params.update(params) return self def fit(self, x, y, **kwargs): """Constructs a new model with `build_fn` & fit the model to `(x, y)`. Args: x : array-like, shape `(n_samples, n_features)` Training samples where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.fit` Returns: history : object details about the training history at each epoch. """ if self.build_fn is None: self.model = self.__call__(**self.filter_sk_params(self.__call__)) elif (not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType)): self.model = self.build_fn( **self.filter_sk_params(self.build_fn.__call__)) else: self.model = self.build_fn(**self.filter_sk_params(self.build_fn)) if (losses.is_categorical_crossentropy(self.model.loss) and len(y.shape) != 2): y = to_categorical(y) fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit)) fit_args.update(kwargs) history = self.model.fit(x, y, **fit_args) return history def filter_sk_params(self, fn, override=None): """Filters `sk_params` and returns those in `fn`'s arguments. Args: fn : arbitrary function override: dictionary, values to override `sk_params` Returns: res : dictionary containing variables in both `sk_params` and `fn`'s arguments. """ override = override or {} res = {} for name, value in self.sk_params.items(): if has_arg(fn, name): res.update({name: value}) res.update(override) return res @keras_export('keras.wrappers.scikit_learn.KerasClassifier') class KerasClassifier(BaseWrapper): """Implementation of the scikit-learn classifier API for Keras. """ def fit(self, x, y, **kwargs): """Constructs a new model with `build_fn` & fit the model to `(x, y)`. Args: x : array-like, shape `(n_samples, n_features)` Training samples where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.fit` Returns: history : object details about the training history at each epoch. Raises: ValueError: In case of invalid shape for `y` argument. """ y = np.array(y) if len(y.shape) == 2 and y.shape[1] > 1: self.classes_ = np.arange(y.shape[1]) elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1: self.classes_ = np.unique(y) y = np.searchsorted(self.classes_, y) else: raise ValueError('Invalid shape for y: ' + str(y.shape)) self.n_classes_ = len(self.classes_) return super(KerasClassifier, self).fit(x, y, **kwargs) def predict(self, x, **kwargs): """Returns the class predictions for the given test data. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict`. Returns: preds: array-like, shape `(n_samples,)` Class predictions. """ proba = self.model.predict(x, **kwargs) if proba.shape[-1] > 1: classes = proba.argmax(axis=-1) else: classes = (proba > 0.5).astype('int32') return self.classes_[classes] def predict_proba(self, x, **kwargs): """Returns class probability estimates for the given test data. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict`. Returns: proba: array-like, shape `(n_samples, n_outputs)` Class probability estimates. In the case of binary classification, to match the scikit-learn API, will return an array of shape `(n_samples, 2)` (instead of `(n_sample, 1)` as in Keras). """ probs = self.model.predict(x, **kwargs) # check if binary classification if probs.shape[1] == 1: # first column is probability of class 0 and second is of class 1 probs = np.hstack([1 - probs, probs]) return probs def score(self, x, y, **kwargs): """Returns the mean accuracy on the given test data and labels. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.evaluate`. Returns: score: float Mean accuracy of predictions on `x` wrt. `y`. Raises: ValueError: If the underlying model isn't configured to compute accuracy. You should pass `metrics=["accuracy"]` to the `.compile()` method of the model. """ y = np.searchsorted(self.classes_, y) kwargs = self.filter_sk_params(Sequential.evaluate, kwargs) loss_name = self.model.loss if hasattr(loss_name, '__name__'): loss_name = loss_name.__name__ if loss_name == 'categorical_crossentropy' and len(y.shape) != 2: y = to_categorical(y) outputs = self.model.evaluate(x, y, **kwargs) if not isinstance(outputs, list): outputs = [outputs] for name, output in zip(self.model.metrics_names, outputs): if name in ['accuracy', 'acc']: return output raise ValueError('The model is not configured to compute accuracy. ' 'You should pass `metrics=["accuracy"]` to ' 'the `model.compile()` method.') @keras_export('keras.wrappers.scikit_learn.KerasRegressor') class KerasRegressor(BaseWrapper): """Implementation of the scikit-learn regressor API for Keras. """ def predict(self, x, **kwargs): """Returns predictions for the given test data. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict`. Returns: preds: array-like, shape `(n_samples,)` Predictions. """ kwargs = self.filter_sk_params(Sequential.predict, kwargs) return np.squeeze(self.model.predict(x, **kwargs)) def score(self, x, y, **kwargs): """Returns the mean loss on the given test data and labels. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. y: array-like, shape `(n_samples,)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.evaluate`. Returns: score: float Mean accuracy of predictions on `x` wrt. `y`. """ kwargs = self.filter_sk_params(Sequential.evaluate, kwargs) loss = self.model.evaluate(x, y, **kwargs) if isinstance(loss, list): return -loss[0] return -loss
12,451
34.175141
80
py
keras
keras-master/keras/wrappers/scikit_learn_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Scikit-learn API wrapper.""" import tensorflow.compat.v2 as tf import numpy as np import keras from keras import testing_utils from keras.wrappers import scikit_learn INPUT_DIM = 5 HIDDEN_DIM = 5 TRAIN_SAMPLES = 10 TEST_SAMPLES = 5 NUM_CLASSES = 2 BATCH_SIZE = 5 EPOCHS = 1 def build_fn_clf(hidden_dim): model = keras.models.Sequential() model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,))) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(hidden_dim)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(NUM_CLASSES)) model.add(keras.layers.Activation('softmax')) model.compile( optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) return model def assert_classification_works(clf): np.random.seed(42) (x_train, y_train), (x_test, _) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) clf.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS) score = clf.score(x_train, y_train, batch_size=BATCH_SIZE) assert np.isscalar(score) and np.isfinite(score) preds = clf.predict(x_test, batch_size=BATCH_SIZE) assert preds.shape == (TEST_SAMPLES,) for prediction in np.unique(preds): assert prediction in range(NUM_CLASSES) proba = clf.predict_proba(x_test, batch_size=BATCH_SIZE) assert proba.shape == (TEST_SAMPLES, NUM_CLASSES) assert np.allclose(np.sum(proba, axis=1), np.ones(TEST_SAMPLES)) def build_fn_reg(hidden_dim): model = keras.models.Sequential() model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,))) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(hidden_dim)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(1)) model.add(keras.layers.Activation('linear')) model.compile( optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy']) return model def assert_regression_works(reg): np.random.seed(42) (x_train, y_train), (x_test, _) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) reg.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS) score = reg.score(x_train, y_train, batch_size=BATCH_SIZE) assert np.isscalar(score) and np.isfinite(score) preds = reg.predict(x_test, batch_size=BATCH_SIZE) assert preds.shape == (TEST_SAMPLES,) class ScikitLearnAPIWrapperTest(tf.test.TestCase): def test_classify_build_fn(self): with self.cached_session(): clf = scikit_learn.KerasClassifier( build_fn=build_fn_clf, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_classification_works(clf) def test_classify_class_build_fn(self): class ClassBuildFnClf: def __call__(self, hidden_dim): return build_fn_clf(hidden_dim) with self.cached_session(): clf = scikit_learn.KerasClassifier( build_fn=ClassBuildFnClf(), hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_classification_works(clf) def test_classify_inherit_class_build_fn(self): class InheritClassBuildFnClf(scikit_learn.KerasClassifier): def __call__(self, hidden_dim): return build_fn_clf(hidden_dim) with self.cached_session(): clf = InheritClassBuildFnClf( build_fn=None, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_classification_works(clf) def test_regression_build_fn(self): with self.cached_session(): reg = scikit_learn.KerasRegressor( build_fn=build_fn_reg, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_regression_works(reg) def test_regression_class_build_fn(self): class ClassBuildFnReg: def __call__(self, hidden_dim): return build_fn_reg(hidden_dim) with self.cached_session(): reg = scikit_learn.KerasRegressor( build_fn=ClassBuildFnReg(), hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_regression_works(reg) def test_regression_inherit_class_build_fn(self): class InheritClassBuildFnReg(scikit_learn.KerasRegressor): def __call__(self, hidden_dim): return build_fn_reg(hidden_dim) with self.cached_session(): reg = InheritClassBuildFnReg( build_fn=None, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_regression_works(reg) if __name__ == '__main__': tf.test.main()
5,499
28.100529
80
py
keras
keras-master/keras/engine/data_adapter.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adapter module that convert different input data objects into tf.dataset.""" import tensorflow.compat.v2 as tf import abc import contextlib import functools import itertools import math import random import numpy as np from tensorflow.python.eager import context from keras import backend from keras.engine import training_utils from keras.utils import data_utils from keras.utils import dataset_creator from keras.utils import tf_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: pd = None keras_data_adapter_gauge = tf.__internal__.monitoring.BoolGauge( "/tensorflow/api/keras/data_adapters", "keras data adapter usage", "method") class DataAdapter(object, metaclass=abc.ABCMeta): """Base class for input data adapter. In TF 2.0, tf.data is the preferred API for user to feed in data. In order to simplify the training code path, all the input data object will be converted to `tf.data.Dataset` if possible. Note that since this class is mainly targeted for TF 2.0, it might have a lot of assumptions under the hood, eg eager context by default, distribution strategy, etc. In the meantime, some legacy feature support might be dropped, eg, Iterator from dataset API in v1, etc. The sample usage of this class is like: ``` x = tf.data.Dataset.range(100) adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter] applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)] if len(applicable_adapters) != 1: raise ValueError("Expect only one adapter class to handle the input") dataset = applicable_adapters[0](x).get_dataset() for data in dataset: # training ``` """ @staticmethod def can_handle(x, y=None): """Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there multiple input/output, or dictionary of objects when the intput/output are named. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. Returns: boolean """ raise NotImplementedError @abc.abstractmethod def __init__(self, x, y=None, **kwargs): """Create a DataAdapter based on data inputs. The caller must make sure to call `can_handle()` first before invoking this method. Provide unsupported data type will result into unexpected behavior. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. **kwargs: Other keyword arguments for DataAdapter during the construction of the tf.dataset.Dataset. For example: - Numpy data might have `sample_weights` which will be used for weighting the loss function during training. - Numpy data might need to have `batch_size` parameter when constructing the dataset and iterator. - Certain input might need to be distribution strategy aware. When `distribution_strategy` is passed, the created dataset need to respect the strategy. DataAdapter might choose to ignore any keyword argument if it doesn't use it, or raise exception if any required argument is not provide. """ if not self.can_handle(x, y): raise ValueError("{} Cannot handle input {}, {}".format( self.__class__, x, y)) @abc.abstractmethod def get_dataset(self): """Get a dataset instance for the current DataAdapter. Note that the dataset returned does not repeat for epoch, so caller might need to create new iterator for the same dataset at the beginning of the epoch. This behavior might change in future. Returns: An tf.dataset.Dataset. Caller might use the dataset in different context, eg iter(dataset) in eager to get the value directly, or in graph mode, provide the iterator tensor to Keras model function. """ raise NotImplementedError @abc.abstractmethod def get_size(self): """Return the size (number of batches) for the dataset created. For certain type of the data input, the number of batches is known, eg for Numpy data, the size is same as (number_of_element / batch_size). Whereas for dataset or python generator, the size is unknown since it may or may not have a end state. Returns: int, the number of batches for the dataset, or None if it is unknown. The caller could use this to control the loop of training, show progress bar, or handle unexpected StopIteration error. """ raise NotImplementedError @abc.abstractmethod def batch_size(self): """Return the batch size of the dataset created. For certain type of the data input, the batch size is known, and even required, like numpy array. Where as for dataset, the batch is unknown unless we take a peek. Returns: int, the batch size of the dataset, or None if it is unknown. """ raise NotImplementedError def representative_batch_size(self): """Return a representative size for batches in the dataset. This is not guaranteed to be the batch size for all batches in the dataset. It just needs to be a rough approximation for batch sizes in the dataset. Returns: int, a representative size for batches found in the dataset, or None if it is unknown. """ return self.batch_size() @abc.abstractmethod def has_partial_batch(self): """Whether the dataset has partial batch at the end.""" raise NotImplementedError @abc.abstractmethod def partial_batch_size(self): """The size of the final partial batch for dataset. Will return None if has_partial_batch is False or batch_size is None. """ raise NotImplementedError @abc.abstractmethod def should_recreate_iterator(self): """Returns whether a new iterator should be created every epoch.""" raise NotImplementedError def get_samples(self): """Returns number of samples in the data, or `None`.""" if not self.get_size() or not self.batch_size(): return None total_sample = self.get_size() * self.batch_size() if self.has_partial_batch(): total_sample -= (self.batch_size() - self.partial_batch_size()) return total_sample def on_epoch_end(self): """A hook called after each epoch.""" pass class TensorLikeDataAdapter(DataAdapter): """Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy.""" @staticmethod def can_handle(x, y=None): # TODO(kaftan): Check performance implications of using a flatten # here for other types of inputs. flat_inputs = tf.nest.flatten(x) if y is not None: flat_inputs += tf.nest.flatten(y) tensor_types = _get_tensor_types() def _is_tensor(v): if isinstance(v, tensor_types): return True return False return all(_is_tensor(v) for v in flat_inputs) def __init__(self, x, y=None, sample_weights=None, sample_weight_modes=None, batch_size=None, epochs=1, steps=None, shuffle=False, **kwargs): super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs) x, y, sample_weights = _process_tensorlike((x, y, sample_weights)) sample_weight_modes = broadcast_sample_weight_modes( sample_weights, sample_weight_modes) # If sample_weights are not specified for an output use 1.0 as weights. (sample_weights, _, _) = training_utils.handle_partial_sample_weights( y, sample_weights, sample_weight_modes, check_all_flat=True) inputs = pack_x_y_sample_weight(x, y, sample_weights) num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(inputs)).pop() _check_data_cardinality(inputs) # If batch_size is not passed but steps is, calculate from the input data. # Default to 32 for backwards compat. if not batch_size: batch_size = int(math.ceil(num_samples / steps)) if steps else 32 self._size = int(math.ceil(num_samples / batch_size)) self._batch_size = batch_size num_full_batches = int(num_samples // batch_size) self._partial_batch_size = num_samples % batch_size if isinstance(shuffle, str): shuffle = shuffle.lower() self._shuffle = shuffle # Vectorized version of shuffle. # This is a performance improvement over using `from_tensor_slices`. # The indices of the data are shuffled and batched, and these indices # are then zipped with the data and used to extract a batch of the data # at each step. The performance improvements here come from: # 1. vectorized batch using gather # 2. parallelized map # 3. pipelined permutation generation # 4. optimized permutation batching # 5. disabled static optimizations indices_dataset = tf.data.Dataset.range(1) if shuffle != "batch": indices_dataset = indices_dataset.repeat(epochs) def permutation(_): # It turns out to be more performant to make a new set of indices rather # than reusing the same range Tensor. (presumably because of buffer # forwarding.) indices = tf.range(num_samples, dtype=tf.int64) if shuffle and shuffle != "batch": indices = tf.random.shuffle(indices) return indices # We prefetch a single element. Computing large permutations can take quite # a while so we don't want to wait for prefetching over an epoch boundary to # trigger the next permutation. On the other hand, too many simultaneous # shuffles can contend on a hardware level and degrade all performance. indices_dataset = indices_dataset.map(permutation).prefetch(1) def slice_batch_indices(indices): """Convert a Tensor of indices into a dataset of batched indices. This step can be accomplished in several ways. The most natural is to slice the Tensor in a Dataset map. (With a condition on the upper index to handle the partial batch.) However it turns out that coercing the Tensor into a shape which is divisible by the batch size (and handling the last partial batch separately) allows for a much more favorable memory access pattern and improved performance. Args: indices: Tensor which determines the data order for an entire epoch. Returns: A Dataset of batched indices. """ num_in_full_batch = num_full_batches * batch_size first_k_indices = tf.slice(indices, [0], [num_in_full_batch]) first_k_indices = tf.reshape( first_k_indices, [num_full_batches, batch_size]) flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices) if self._partial_batch_size: index_remainder = tf.data.Dataset.from_tensors(tf.slice( indices, [num_in_full_batch], [self._partial_batch_size])) flat_dataset = flat_dataset.concatenate(index_remainder) if shuffle == "batch": # 1024 is a magic constant that has not been properly evaluated flat_dataset = flat_dataset.shuffle(1024).repeat(epochs) return flat_dataset indices_dataset = indices_dataset.flat_map(slice_batch_indices) dataset = self.slice_inputs(indices_dataset, inputs) if shuffle == "batch": def shuffle_batch(*batch): return tf.nest.map_structure(tf.random.shuffle, batch) dataset = dataset.map(shuffle_batch) self._dataset = dataset def slice_inputs(self, indices_dataset, inputs): """Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices. """ dataset = tf.data.Dataset.zip(( indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat() )) def grab_batch(i, data): return tf.nest.map_structure(lambda d: tf.gather(d, i, axis=0), data) dataset = dataset.map( grab_batch, num_parallel_calls=tf.data.AUTOTUNE) # Default optimizations are disabled to avoid the overhead of (unnecessary) # input pipeline graph serialization and deserialization options = tf.data.Options() options.experimental_optimization.apply_default_optimizations = False if self._shuffle: # See b/141490660 for more details. options.experimental_external_state_policy = ( tf.data.experimental.ExternalStatePolicy.IGNORE) dataset = dataset.with_options(options) return dataset def get_dataset(self): return self._dataset def get_size(self): return self._size def batch_size(self): return self._batch_size def has_partial_batch(self): return self._partial_batch_size > 0 def partial_batch_size(self): return self._partial_batch_size or None def should_recreate_iterator(self): # An infinite dataset is always created here. return False class GenericArrayLikeDataAdapter(TensorLikeDataAdapter): """Adapter that handles array-like data without forcing it into memory. This adapter handles array-like datasets that may be too big to fully fit into memory. Specifically, this adapter handles any Python class which implements: `__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings as Numpy, but it ignores any case where all the inputs are Tensors or Numpy arrays (because that case is handled by the base TensorLikeDataAdapter). It ignores scipy sparse matrices and Composite Tensors because those are handled by the CompositeTensorDataAdapter. It also does not handle lists/tuples of scalars, because those are handled by the ListsOfScalarsDataAdapter. """ @staticmethod def can_handle(x, y=None): flat_inputs = tf.nest.flatten(x) if y is not None: flat_inputs += tf.nest.flatten(y) def _is_array_like(v): """Return True if v is a Tensor, array, or is array-like.""" return ( hasattr(v, "__getitem__") and hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "__len__") ) if (not TensorLikeDataAdapter.can_handle(x, y) and not CompositeTensorDataAdapter.can_handle(x, y)): return all(_is_array_like(v) for v in flat_inputs) else: return False def __init__(self, *args, **kwargs): logging.warning( "Keras is training/fitting/evaluating on array-like data. Keras may " "not be optimized for this format, so if your input data format is " "supported by TensorFlow I/O (https://github.com/tensorflow/io) we " "recommend using that to load a Dataset instead.") super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs) def slice_inputs(self, indices_dataset, inputs): """Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices. """ flat_inputs = tf.nest.flatten(inputs) def dynamic_shape_like(t): shape = list(t.shape) shape[0] = None return tuple(shape) flat_dtypes = [inp.dtype for inp in flat_inputs] contiguous = True if self._shuffle and self._shuffle != "batch": contiguous = False def grab_batch(indices): """Grab a batch of data from the inputs.""" # This uses a py_function to avoid converting the array-like # into a Tensor before slicing it, because converting the array-like # to a Tensor may force it into memory.. def py_method(ind): def slice_array(data): return training_utils.slice_arrays(data, ind.numpy(), contiguous=contiguous) return [slice_array(inp) for inp in flat_inputs] flat_out = tf.py_function(py_method, [indices], flat_dtypes) for v, original_inp in zip(flat_out, flat_inputs): v.set_shape(dynamic_shape_like(original_inp)) return tf.nest.pack_sequence_as(inputs, flat_out) dataset = indices_dataset.map( grab_batch, num_parallel_calls=tf.data.AUTOTUNE) return dataset class DatasetCreatorAdapter(DataAdapter): """Adapter that handles dataset functions.""" def __init__(self, x, y, steps=None, distribution_strategy=None, **kwargs): super(DatasetCreatorAdapter, self).__init__(x, **kwargs) if not isinstance(x, dataset_creator.DatasetCreator): raise TypeError("The input of a `DatasetCreatorAdapter` should be a " "`DatasetCreator` but it received type {}.".format( type(x))) if steps is None: raise ValueError("When using a " "`tf.keras.utils.experimental.DatasetCreator`, " "`steps_per_epoch`, `validation_steps` or `steps` " "argument must be provided in `Model.fit`, " "`Model.evaluate`, or `Model.predict`.") self.dataset_creator = x self.steps = steps self.strategy = distribution_strategy @staticmethod def can_handle(x, y=None): if isinstance(x, dataset_creator.DatasetCreator): assert y is None return True def should_recreate_iterator(self): # We expect users to shuffle the dataset in their `dataset_fn` supplied to # `DatasetCreator`. Since that is a buffered shuffle, we intend to not reset # the dataset so the batches that are not shuffled can still be pulled. return False def get_size(self): return None # To be inferred by `DataHandler`. def get_dataset(self): return self.strategy.distribute_datasets_from_function( self.dataset_creator, options=self.dataset_creator.input_options) def batch_size(self): raise NotImplementedError() def has_partial_batch(self): raise NotImplementedError() def partial_batch_size(self): raise NotImplementedError() class CompositeTensorDataAdapter(DataAdapter): """Adapter that handles composite tensor.""" @staticmethod def can_handle(x, y=None): flat_inputs = tf.nest.flatten(x) if y is not None: flat_inputs += tf.nest.flatten(y) def _is_composite(v): # Dataset/iterator/DistributedDataset inherits from CompositeTensor but # should be handled by DatasetAdapter and GeneratorAdapter. if (tf_utils.is_extension_type(v) and not isinstance(v, (tf.data.Dataset, tf.data.Iterator)) and not _is_distributed_dataset(v)): return True # Support Scipy sparse tensors if scipy is installed return _is_scipy_sparse(v) def _is_tensor_or_composite(v): if isinstance(v, (tf.Tensor, np.ndarray)): return True return _is_composite(v) return (any(_is_composite(v) for v in flat_inputs) and all(_is_tensor_or_composite(v) for v in flat_inputs)) def __init__(self, x, y=None, sample_weights=None, sample_weight_modes=None, batch_size=None, steps=None, shuffle=False, **kwargs): super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs) x, y, sample_weights = _process_tensorlike((x, y, sample_weights)) sample_weight_modes = broadcast_sample_weight_modes( sample_weights, sample_weight_modes) # If sample_weights are not specified for an output use 1.0 as weights. (sample_weights, _, _) = training_utils.handle_partial_sample_weights( y, sample_weights, sample_weight_modes, check_all_flat=True) inputs = pack_x_y_sample_weight(x, y, sample_weights) dataset = tf.data.Dataset.from_tensor_slices(inputs) num_samples = int(tf.nest.flatten(x)[0].shape[0]) if shuffle: dataset = dataset.shuffle(num_samples) # If batch_size is not passed but steps is, calculate from the input data. # Default to 32 for backwards compat. if not batch_size: batch_size = int(math.ceil(num_samples / steps)) if steps else 32 dataset = dataset.batch(batch_size) self._size = int(math.ceil(num_samples / batch_size)) self._batch_size = batch_size self._has_partial_batch = (self._size != (num_samples // batch_size)) self._partial_batch_size = None if self._has_partial_batch: self._partial_batch_size = ( num_samples - (self._size - 1) * self._batch_size) self._dataset = dataset def get_dataset(self): return self._dataset def get_size(self): return self._size def batch_size(self): return self._batch_size def has_partial_batch(self): return self._has_partial_batch def partial_batch_size(self): return self._partial_batch_size def should_recreate_iterator(self): return True class ListsOfScalarsDataAdapter(DataAdapter): """Adapter that handles lists of scalars and lists of lists of scalars.""" @staticmethod def can_handle(x, y=None): handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x) handles_y = True if y is not None: handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y) return handles_x and handles_y @staticmethod def _is_list_of_scalars(inp): if isinstance(inp, (float, int, str, bytes, bytearray)): return True if isinstance(inp, (list, tuple)) and inp: return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0]) return False def __init__(self, x, y=None, sample_weights=None, sample_weight_modes=None, batch_size=None, shuffle=False, **kwargs): super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs) x = np.asarray(x) if y is not None: y = np.asarray(y) if sample_weights is not None: sample_weights = np.asarray(sample_weights) sample_weight_modes = broadcast_sample_weight_modes( sample_weights, sample_weight_modes) self._internal_adapter = TensorLikeDataAdapter( x, y=y, sample_weights=sample_weights, sample_weight_modes=sample_weight_modes, batch_size=batch_size, shuffle=shuffle, **kwargs) def get_dataset(self): return self._internal_adapter.get_dataset() def get_size(self): return self._internal_adapter.get_size() def batch_size(self): return self._internal_adapter.batch_size() def has_partial_batch(self): return self._internal_adapter.has_partial_batch() def partial_batch_size(self): return self._internal_adapter.partial_batch_size() def should_recreate_iterator(self): return True class DatasetAdapter(DataAdapter): """Adapter that handles `tf.data.Dataset`.""" @staticmethod def can_handle(x, y=None): return (isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)) or _is_distributed_dataset(x)) def __init__(self, x, y=None, sample_weights=None, steps=None, **kwargs): super(DatasetAdapter, self).__init__(x, y, **kwargs) # Note that the dataset instance is immutable, its fine to reuse the user # provided dataset. self._dataset = x # The user-provided steps. self._user_steps = steps self._validate_args(y, sample_weights, steps) def get_dataset(self): return self._dataset def get_size(self): return # Inferred in `DataHandler`. def batch_size(self): return None def has_partial_batch(self): return False def partial_batch_size(self): return None def should_recreate_iterator(self): # Since DistributedDatasets have no cardinality, the user must provide # all steps that need to be run, calling `.repeat()` as needed. if _is_distributed_dataset(self._dataset): return False # If user doesn't supply `steps`, or if they supply `steps` that # exactly equals the size of the `Dataset`, create a new iterator # each epoch. return (self._user_steps is None or tf.data.experimental.cardinality(self._dataset).numpy() == self._user_steps) def _validate_args(self, y, sample_weights, steps): """Validates `__init__` arguments.""" # Arguments that shouldn't be passed. if not is_none_or_empty(y): raise ValueError("`y` argument is not supported when using " "dataset as input.") if not is_none_or_empty(sample_weights): raise ValueError("`sample_weight` argument is not supported when using " "dataset as input.") if steps is None: if _is_distributed_dataset(self._dataset): raise ValueError("When providing a distributed dataset, you must " "specify the number of steps to run.") size = tf.data.experimental.cardinality(self._dataset).numpy() if size == tf.data.experimental.INFINITE_CARDINALITY and steps is None: raise ValueError( "When providing an infinite dataset, you must specify " "the number of steps to run (if you did not intend to " "create an infinite dataset, make sure to not call " "`repeat()` on the dataset).") class GeneratorDataAdapter(DataAdapter): """Adapter that handles python generators and iterators.""" @staticmethod def can_handle(x, y=None): return ((hasattr(x, "__next__") or hasattr(x, "next")) and hasattr(x, "__iter__") and not isinstance(x, data_utils.Sequence)) def __init__(self, x, y=None, sample_weights=None, workers=1, use_multiprocessing=False, max_queue_size=10, model=None, **kwargs): # Generators should never shuffle as exhausting the generator in order to # shuffle the batches is inefficient. kwargs.pop("shuffle", None) if not is_none_or_empty(y): raise ValueError("`y` argument is not supported when using " "python generator as input.") if not is_none_or_empty(sample_weights): raise ValueError("`sample_weight` argument is not supported when using " "python generator as input.") super(GeneratorDataAdapter, self).__init__(x, y, **kwargs) # Since we have to know the dtype of the python generator when we build the # dataset, we have to look at a batch to infer the structure. peek, x = self._peek_and_restore(x) peek = self._standardize_batch(peek) peek = _process_tensorlike(peek) # Need to build the Model on concrete input shapes. if model is not None and not model.built: concrete_x, _, _ = unpack_x_y_sample_weight(peek) try: model.distribute_strategy.run( lambda x: model(x, training=False), args=(concrete_x,)) except NotImplementedError: # The above call may fail if the model is a container-like class that # does not implement its own forward pass (e.g. a GAN or VAE where the # forward pass is handled by subcomponents). # Such a model does not need to be built. pass self._first_batch_size = int(tf.nest.flatten(peek)[0].shape[0]) def _get_dynamic_shape(t): shape = t.shape # Unknown number of dimensions, `as_list` cannot be called. if shape.rank is None: return shape return tf.TensorShape([None for _ in shape.as_list()]) output_shapes = tf.nest.map_structure(_get_dynamic_shape, peek) output_types = tf.nest.map_structure(lambda t: t.dtype, peek) # Note that dataset API takes a callable that creates a generator object, # rather than generator itself, which is why we define a function here. generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing, max_queue_size) def wrapped_generator(): for data in generator_fn(): yield self._standardize_batch(data) dataset = tf.data.Dataset.from_generator( wrapped_generator, output_types, output_shapes=output_shapes) if workers == 1 and not use_multiprocessing: dataset = dataset.prefetch(1) self._dataset = dataset def _standardize_batch(self, data): """Standardizes a batch output by a generator.""" # Removes `None`s. x, y, sample_weight = unpack_x_y_sample_weight(data) data = pack_x_y_sample_weight(x, y, sample_weight) data = tf.__internal__.nest.list_to_tuple(data) def _convert_dtype(t): if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)): return np.array(t, dtype=backend.floatx()) return t data = tf.nest.map_structure(_convert_dtype, data) return data @staticmethod def _peek_and_restore(x): peek = next(x) return peek, itertools.chain([peek], x) def _handle_multiprocessing(self, x, workers, use_multiprocessing, max_queue_size): """Create a callable, possibly including an Enqueuer.""" if workers > 1 or (workers > 0 and use_multiprocessing): def generator_fn(): enqueuer = data_utils.GeneratorEnqueuer( x, use_multiprocessing=use_multiprocessing) enqueuer.start(workers=workers, max_queue_size=max_queue_size) return enqueuer.get() else: generator_fn = lambda: x return generator_fn def get_dataset(self): return self._dataset def get_size(self): return None def batch_size(self): return None def representative_batch_size(self): return self._first_batch_size def has_partial_batch(self): return False def partial_batch_size(self): return def should_recreate_iterator(self): return False class KerasSequenceAdapter(GeneratorDataAdapter): """Adapter that handles `keras.utils.Sequence`.""" @staticmethod def can_handle(x, y=None): return isinstance(x, data_utils.Sequence) def __init__(self, x, y=None, sample_weights=None, shuffle=False, workers=1, use_multiprocessing=False, max_queue_size=10, model=None, **kwargs): if not is_none_or_empty(y): raise ValueError("`y` argument is not supported when using " "`keras.utils.Sequence` as input.") if not is_none_or_empty(sample_weights): raise ValueError("`sample_weight` argument is not supported when using " "`keras.utils.Sequence` as input.") self._size = len(x) self._shuffle_sequence = shuffle self._keras_sequence = x self._enqueuer = None super(KerasSequenceAdapter, self).__init__( x, shuffle=False, # Shuffle is handed in the _make_callable override. workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=max_queue_size, model=model, **kwargs) @staticmethod def _peek_and_restore(x): return x[0], x def _handle_multiprocessing(self, x, workers, use_multiprocessing, max_queue_size): if workers > 1 or (workers > 0 and use_multiprocessing): def generator_fn(): self._enqueuer = data_utils.OrderedEnqueuer( x, use_multiprocessing=use_multiprocessing, shuffle=self._shuffle_sequence) self._enqueuer.start(workers=workers, max_queue_size=max_queue_size) return self._enqueuer.get() else: def generator_fn(): order = range(len(x)) if self._shuffle_sequence: # Match the shuffle convention in OrderedEnqueuer. order = list(order) random.shuffle(order) for i in order: yield x[i] return generator_fn def get_size(self): return self._size def should_recreate_iterator(self): return True def on_epoch_end(self): if self._enqueuer: self._enqueuer.stop() self._keras_sequence.on_epoch_end() ALL_ADAPTER_CLS = [ ListsOfScalarsDataAdapter, TensorLikeDataAdapter, GenericArrayLikeDataAdapter, DatasetAdapter, GeneratorDataAdapter, KerasSequenceAdapter, CompositeTensorDataAdapter, DatasetCreatorAdapter ] def select_data_adapter(x, y): """Selects a data adapter than can handle a given x and y.""" adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)] if not adapter_cls: # TODO(scottzhu): This should be a less implementation-specific error. raise ValueError( "Failed to find data adapter that can handle " "input: {}, {}".format( _type_name(x), _type_name(y))) elif len(adapter_cls) > 1: raise RuntimeError( "Data adapters should be mutually exclusive for " "handling inputs. Found multiple adapters {} to handle " "input: {}, {}".format( adapter_cls, _type_name(x), _type_name(y))) # Instrument the data adapter usage before returning it keras_data_adapter_gauge.get_cell(adapter_cls[0].__name__).set(True) return adapter_cls[0] def _type_name(x): """Generates a description of the type of an object.""" if isinstance(x, dict): key_types = set(_type_name(key) for key in x.keys()) val_types = set(_type_name(key) for key in x.values()) return "({} containing {} keys and {} values)".format( type(x), key_types, val_types) if isinstance(x, (list, tuple)): types = set(_type_name(val) for val in x) return "({} containing values of types {})".format( type(x), types) return str(type(x)) def _process_tensorlike(inputs): """Process tensor-like inputs. This function: (1) Converts `Numpy` arrays to `Tensor`s. (2) Converts `Scipy` sparse matrices to `SparseTensor`s. (3) Converts `pandas.Series` to `Tensor`s (4) Converts `list`s to `tuple`s (for `tf.data` support). Args: inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like. Returns: Structure of `Tensor`s or tensor-like. """ def _convert_single_tensor(x): if _is_pandas_series(x): x = np.expand_dims(x.to_numpy(), axis=-1) if isinstance(x, np.ndarray): dtype = None if issubclass(x.dtype.type, np.floating): dtype = backend.floatx() return tf.convert_to_tensor(x, dtype=dtype) elif _is_scipy_sparse(x): return _scipy_sparse_to_sparse_tensor(x) return x inputs = tf.nest.map_structure(_convert_single_tensor, inputs) return tf.__internal__.nest.list_to_tuple(inputs) def is_none_or_empty(inputs): # util method to check if the input is a None or a empty list. # the python "not" check will raise an error like below if the input is a # numpy array # "The truth value of an array with more than one element is ambiguous. # Use a.any() or a.all()" return inputs is None or not tf.nest.flatten(inputs) def broadcast_sample_weight_modes(target_structure, sample_weight_modes): """Match sample_weight_modes structure with output structure.""" if target_structure is None or not tf.nest.flatten(target_structure): return sample_weight_modes if isinstance(sample_weight_modes, str): if isinstance(target_structure, dict): return {key: sample_weight_modes for key in target_structure.keys()} return [sample_weight_modes for _ in target_structure] if sample_weight_modes: try: tf.nest.assert_same_structure( training_utils.list_to_tuple(target_structure), training_utils.list_to_tuple(sample_weight_modes)) except (ValueError, TypeError): target_str = str(tf.nest.map_structure(lambda _: "...", target_structure)) mode_str = str(tf.nest.map_structure(lambda _: "...", sample_weight_modes)) # Attempt to coerce sample_weight_modes to the target structure. This # implicitly depends on the fact that Model flattens outputs for its # internal representation. try: sample_weight_modes = tf.nest.pack_sequence_as( target_structure, tf.nest.flatten(sample_weight_modes)) logging.warning( "sample_weight modes were coerced from\n {}\n to \n {}" .format(target_str, mode_str)) except (ValueError, TypeError): raise ValueError( "Unable to match target structure and sample_weight_modes " "structure:\n {}\n to \n {}".format(target_str, mode_str)) return sample_weight_modes class DataHandler: """Handles iterating over epoch-level `tf.data.Iterator` objects.""" def __init__(self, x, y=None, sample_weight=None, batch_size=None, steps_per_epoch=None, initial_epoch=0, epochs=1, shuffle=False, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, model=None, steps_per_execution=None, distribute=True): """Initializes a `DataHandler`. Arguments: x: See `Model.fit`. y: See `Model.fit`. sample_weight: See `Model.fit`. batch_size: See `Model.fit`. steps_per_epoch: See `Model.fit`. initial_epoch: See `Model.fit`. epochs: See `Model.fit`. shuffle: See `Model.fit`. class_weight: See `Model.fit`. max_queue_size: See `Model.fit`. workers: See `Model.fit`. use_multiprocessing: See `Model.fit`. model: The `Model` instance. Needed in order to correctly `build` the `Model` using generator-like inputs (see `GeneratorDataAdapter`). steps_per_execution: See `Model.compile`. distribute: Whether to distribute the `tf.dataset`. `PreprocessingLayer.adapt` does not support distributed datasets, `Model` should always set this to `True`. """ self._initial_epoch = initial_epoch self._epochs = epochs self._insufficient_data = False self._model = model # `steps_per_execution_value` is the cached initial value. # `steps_per_execution` is mutable and may be changed by the DataAdapter # to handle partial executions. if steps_per_execution is None: self._steps_per_execution = 1 self._steps_per_execution_value = 1 else: self._steps_per_execution = steps_per_execution self._steps_per_execution_value = steps_per_execution.numpy().item() adapter_cls = select_data_adapter(x, y) self._adapter = adapter_cls( x, y, batch_size=batch_size, steps=steps_per_epoch, epochs=epochs - initial_epoch, sample_weights=sample_weight, shuffle=shuffle, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, distribution_strategy=tf.distribute.get_strategy(), model=model) strategy = tf.distribute.get_strategy() self._current_step = 0 self._step_increment = self._steps_per_execution_value - 1 self._insufficient_data = False self._configure_dataset_and_inferred_steps(strategy, x, steps_per_epoch, class_weight, distribute) def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch, class_weight, distribute): """Configure the `_dataset` and `_inferred_steps` attributes.""" del x dataset = self._adapter.get_dataset() if class_weight: dataset = dataset.map(_make_class_weight_map_fn(class_weight)) self._inferred_steps = self._infer_steps(steps_per_epoch, dataset) # `PreprocessingLayer.adapt` does not currently support distributed # datasets, so we pass `distribute=False` there. if distribute and not _is_distributed_dataset(dataset): dataset = strategy.experimental_distribute_dataset(dataset) self._dataset = dataset self._validate_data_handler() def enumerate_epochs(self): """Yields `(epoch, tf.data.Iterator)`.""" with self._truncate_execution_to_epoch(): data_iterator = iter(self._dataset) for epoch in range(self._initial_epoch, self._epochs): if self._insufficient_data: # Set by `catch_stop_iteration`. break if self._adapter.should_recreate_iterator(): data_iterator = iter(self._dataset) yield epoch, data_iterator self._adapter.on_epoch_end() @contextlib.contextmanager def _truncate_execution_to_epoch(self): """Truncates steps per execution to at most one epoch.""" should_truncate = ( self._inferred_steps is not None and self._steps_per_execution_value > self._inferred_steps) original_value = self._steps_per_execution_value try: if should_truncate: self._steps_per_execution.assign(self._inferred_steps) self._steps_per_execution_value = self._inferred_steps yield finally: if should_truncate: self._steps_per_execution.assign(original_value) self._steps_per_execution_value = original_value def sync(self): context.async_wait() @contextlib.contextmanager def catch_stop_iteration(self): """Catches errors when an iterator runs out of data.""" try: yield self.sync() except (StopIteration, tf.errors.OutOfRangeError): if self._inferred_steps is None: self._inferred_steps = self._current_step else: self._insufficient_data = True total_epochs = self._epochs - self._initial_epoch logging.warning( "Your input ran out of data; interrupting training. " "Make sure that your dataset or generator can generate at " "least `steps_per_epoch * epochs` batches (in this case, " "{} batches). You may need to use the repeat() function " "when building your dataset.".format(total_epochs * self._inferred_steps)) def steps(self): """Yields steps for the current epoch.""" self._current_step = 0 # `self._inferred_steps` can be changed by `catch_stop_iteration`. while (self._inferred_steps is None or self._current_step < self._inferred_steps): if self._insufficient_data: # Set by `catch_stop_iteration`. break can_run_full_execution = ( self._steps_per_execution_value == 1 or self._inferred_steps is None or self._inferred_steps - self._current_step >= self._steps_per_execution_value) if can_run_full_execution: self._step_increment = self._steps_per_execution_value - 1 yield self._current_step self._current_step += self._steps_per_execution_value else: # Last partial execution. steps_remaining = self._inferred_steps - self._current_step self._steps_per_execution.assign(steps_remaining) self._step_increment = steps_remaining - 1 yield self._current_step self._current_step += steps_remaining self._steps_per_execution.assign(self._steps_per_execution_value) @property def step_increment(self): """The number to increment the step for `on_batch_end` methods.""" return self._step_increment @property def inferred_steps(self): """The inferred steps per epoch of the created `Dataset`. This will be `None` in the case where: (1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and (2) `steps_per_epoch` was not provided, and (3) The first epoch of iteration has not yet completed. Returns: The inferred steps per epoch of the created `Dataset`. """ return self._inferred_steps @property def should_sync(self): # Catch OutOfRangeError for Datasets of unknown size. # This blocks until the batch has finished executing. # TODO(b/150292341): Allow multiple async steps here. return self._inferred_steps is None def _log_indefinite_training_warning(self): logging.warning("The training loop will run indefinitely since you have " "set `steps_per_epoch=-1`. Please use batch-level " "callbacks to save checkpoints or log training progress, " "etc") def _infer_steps(self, steps, dataset): """Infers steps_per_epoch needed to loop through a dataset.""" if steps == -1: self._log_indefinite_training_warning() return None if steps is not None: return steps adapter_steps = self._adapter.get_size() if adapter_steps is not None: return adapter_steps size = tf.data.experimental.cardinality(dataset) if size == tf.data.experimental.INFINITE_CARDINALITY and steps is None: raise ValueError( "When passing an infinitely repeating dataset, please specify a " "`steps_per_epoch` value so that epoch level " "callbacks continue to work. The value can be arbitrary, or a number " "that you think correctly defines the size of an epoch. " "Epoch-level callbacks will then be called at this interval.") if size >= 0: return size.numpy().item() return None @property def _samples(self): return self._adapter.get_samples() def _validate_data_handler(self): # TODO(b/152094471): Support this with DistIter.get_next_as_optional. if self._steps_per_execution_value > 1 and self._inferred_steps is None: raise ValueError( "Could not infer the size of the data. With " "`steps_per_execution > 1`, you must specify the number of steps " "to run.") class _ClusterCoordinatorDataHandler(DataHandler): """A `DataHandler` that is compatible with `ClusterCoordinator`.""" def __init__(self, x, y=None, **kwargs): if (not _is_distributed_dataset(x) and not isinstance(x, (dataset_creator.DatasetCreator, tf.data.Dataset))): x = self._convert_to_dataset_creator(x, y, **kwargs) super().__init__(x=x, **kwargs) def _convert_to_dataset_creator(self, x, y, **kwargs): """Converts non-tf.data.Dataset to `DatasetCreator` instances.""" def _dataset_fn(input_context): del input_context data_adapter_cls = select_data_adapter(x, y) return data_adapter_cls(x=x, y=y, **kwargs).get_dataset() # This check is needed because types like `tf.data.Dataset` don't work with # PSS yet. So only apply this logic to the types we can support. if (isinstance(x, _get_tensor_types()) and isinstance(y, _get_tensor_types())): return dataset_creator.DatasetCreator(_dataset_fn) else: raise NotImplementedError( "Only `tf.keras.utils.experimental.DatasetCreator`, `tf.Tensor`, " "numpy arrays and pandas dataframes are supported types at this " "time.") def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch, class_weight, distribute): if isinstance(x, dataset_creator.DatasetCreator): def per_worker_dataset_fn(): return strategy.distribute_datasets_from_function( x, options=x.input_options) self._dataset = self._model._cluster_coordinator.create_per_worker_dataset( # pylint: disable=protected-access per_worker_dataset_fn) else: assert distribute if not _is_distributed_dataset(x): x = strategy.experimental_distribute_dataset(x) self._dataset = self._model._cluster_coordinator.create_per_worker_dataset( # pylint: disable=protected-access x) if steps_per_epoch == -1: self._inferred_steps = None self._log_indefinite_training_warning() else: self._inferred_steps = steps_per_epoch def sync(self): self._model._cluster_coordinator.join() # pylint: disable=protected-access def get_data_handler(*args, **kwargs): if getattr(kwargs["model"], "_cluster_coordinator", None): return _ClusterCoordinatorDataHandler(*args, **kwargs) return DataHandler(*args, **kwargs) def _make_class_weight_map_fn(class_weight): """Applies class weighting to a `Dataset`. The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where `y` must be a single `Tensor`. Args: class_weight: A map where the keys are integer class ids and values are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}` Returns: A function that can be used with `tf.data.Dataset.map` to apply class weighting. """ class_ids = list(sorted(class_weight.keys())) expected_class_ids = list(range(len(class_ids))) if class_ids != expected_class_ids: error_msg = ( "Expected `class_weight` to be a dict with keys from 0 to one less " "than the number of classes, found {}").format(class_weight) raise ValueError(error_msg) class_weight_tensor = tf.convert_to_tensor( [class_weight[int(c)] for c in class_ids]) def _class_weights_map_fn(*data): """Convert `class_weight` to `sample_weight`.""" x, y, sw = unpack_x_y_sample_weight(data) if tf.nest.is_nested(y): raise ValueError( "`class_weight` is only supported for Models with a single output.") if y.shape.rank > 2: raise ValueError("`class_weight` not supported for " "3+ dimensional targets.") y_classes = tf.__internal__.smart_cond.smart_cond( y.shape.rank == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: tf.cast(backend.reshape(y, (-1,)), tf.int64)) cw = tf.gather(class_weight_tensor, y_classes) if sw is not None: cw = tf.cast(cw, sw.dtype) # `class_weight` and `sample_weight` are multiplicative. sw = sw * cw else: sw = cw return x, y, sw return _class_weights_map_fn def train_validation_split(arrays, validation_split): """Split arrays into train and validation subsets in deterministic order. The last part of data will become validation data. Args: arrays: Tensors to split. Allowed inputs are arbitrarily nested structures of Tensors and NumPy arrays. validation_split: Float between 0 and 1. The proportion of the dataset to include in the validation split. The rest of the dataset will be included in the training split. Returns: `(train_arrays, validation_arrays)` """ def _can_split(t): tensor_types = _get_tensor_types() return isinstance(t, tensor_types) or t is None flat_arrays = tf.nest.flatten(arrays) unsplitable = [type(t) for t in flat_arrays if not _can_split(t)] if unsplitable: raise ValueError( "`validation_split` is only supported for Tensors or NumPy " "arrays, found following types in the input: {}".format(unsplitable)) if all(t is None for t in flat_arrays): return arrays, arrays first_non_none = None for t in flat_arrays: if t is not None: first_non_none = t break # Assumes all arrays have the same batch shape or are `None`. batch_dim = int(first_non_none.shape[0]) split_at = int(math.floor(batch_dim * (1. - validation_split))) if split_at == 0 or split_at == batch_dim: raise ValueError( "Training data contains {batch_dim} samples, which is not sufficient " "to split it into a validation and training set as specified by " "`validation_split={validation_split}`. Either provide more data, or a " "different value for the `validation_split` argument." .format( batch_dim=batch_dim, validation_split=validation_split)) def _split(t, start, end): if t is None: return t return t[start:end] train_arrays = tf.nest.map_structure( functools.partial(_split, start=0, end=split_at), arrays) val_arrays = tf.nest.map_structure( functools.partial(_split, start=split_at, end=batch_dim), arrays) return train_arrays, val_arrays @keras_export("keras.utils.unpack_x_y_sample_weight", v1=[]) def unpack_x_y_sample_weight(data): """Unpacks user-provided data tuple. This is a convenience utility to be used when overriding `Model.train_step`, `Model.test_step`, or `Model.predict_step`. This utility makes it easy to support data of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Standalone usage: >>> features_batch = tf.ones((10, 5)) >>> labels_batch = tf.zeros((10, 5)) >>> data = (features_batch, labels_batch) >>> # `y` and `sample_weight` will default to `None` if not provided. >>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) >>> sample_weight is None True Example in overridden `Model.train_step`: ```python class MyModel(tf.keras.Model): def train_step(self, data): # If `sample_weight` is not provided, all samples will be weighted # equally. x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) with tf.GradientTape() as tape: y_pred = self(x, training=True) loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) trainable_variables = self.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) self.compiled_metrics.update_state(y, y_pred, sample_weight) return {m.name: m.result() for m in self.metrics} ``` Args: data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Returns: The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not provided. """ if not isinstance(data, tuple): return (data, None, None) elif len(data) == 1: return (data[0], None, None) elif len(data) == 2: return (data[0], data[1], None) elif len(data) == 3: return (data[0], data[1], data[2]) else: error_msg = ("Data is expected to be in format `x`, `(x,)`, `(x, y)`, " "or `(x, y, sample_weight)`, found: {}").format(data) raise ValueError(error_msg) @keras_export("keras.utils.pack_x_y_sample_weight", v1=[]) def pack_x_y_sample_weight(x, y=None, sample_weight=None): """Packs user-provided data into a tuple. This is a convenience utility for packing data into the tuple formats that `Model.fit` uses. Standalone usage: >>> x = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x) >>> isinstance(data, tf.Tensor) True >>> y = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y) >>> isinstance(data, tuple) True >>> x, y = data Args: x: Features to pass to `Model`. y: Ground-truth targets to pass to `Model`. sample_weight: Sample weight for each element. Returns: Tuple in the format used in `Model.fit`. """ if y is None: # For single x-input, we do no tuple wrapping since in this case # there is no ambiguity. This also makes NumPy and Dataset # consistent in that the user does not have to wrap their Dataset # data in an unnecessary tuple if not tf.nest.is_nested(x): return x else: return (x,) elif sample_weight is None: return (x, y) else: return (x, y, sample_weight) def single_batch_iterator(strategy, x, y=None, sample_weight=None, class_weight=None): """Creates a single-batch dataset.""" x, y, sample_weight = _process_tensorlike((x, y, sample_weight)) if y is None: data = (x,) elif sample_weight is None: data = (x, y) else: data = (x, y, sample_weight) _check_data_cardinality(data) dataset = tf.data.Dataset.from_tensors(data) if class_weight: dataset = dataset.map(_make_class_weight_map_fn(class_weight)) dataset = strategy.experimental_distribute_dataset(dataset) return iter(dataset) def _check_data_cardinality(data): num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(data)) if len(num_samples) > 1: msg = "Data cardinality is ambiguous:\n" for label, single_data in zip(["x", "y", "sample_weight"], data): msg += " {} sizes: {}\n".format( label, ", ".join(str(i.shape[0]) for i in tf.nest.flatten(single_data))) msg += "Make sure all arrays contain the same number of samples." raise ValueError(msg) def _get_tensor_types(): if pd is None: return (tf.Tensor, np.ndarray) else: return (tf.Tensor, np.ndarray, pd.Series, pd.DataFrame) def _is_scipy_sparse(x): try: from scipy.sparse import issparse # pylint: disable=g-import-not-at-top return issparse(x) except ImportError: return False def _is_pandas_series(x): if pd is None: return False else: return isinstance(x, pd.Series) def _scipy_sparse_to_sparse_tensor(t): """Converts a SciPy sparse matrix to a SparseTensor.""" sparse_coo = t.tocoo() row, col = sparse_coo.row, sparse_coo.col data, shape = sparse_coo.data, sparse_coo.shape if issubclass(data.dtype.type, np.floating): data = data.astype(backend.floatx()) indices = np.concatenate( (np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1) return tf.SparseTensor(indices, data, shape) def _is_distributed_dataset(ds): return isinstance(ds, tf.distribute.DistributedDataset)
58,696
33.60908
117
py
keras
keras-master/keras/engine/training_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import tensorflow.compat.v2 as tf import io import sys import numpy as np import keras from keras import callbacks from keras import keras_parameterized from keras import metrics as metrics_module from keras import testing_utils from tensorflow.python.platform import tf_logging as logging class BatchCounterCallback(callbacks.Callback): def __init__(self): self.batch_begin_count = 0 self.batch_end_count = 0 def on_batch_begin(self, *args, **kwargs): self.batch_begin_count += 1 def on_batch_end(self, *args, **kwargs): self.batch_end_count += 1 class TestTrainingWithDataset(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_calling_model_on_same_dataset(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = 'rmsprop' loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3), np.float32) targets = np.zeros((10, 4), np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) # Call fit with validation data model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_training_and_eval_methods_on_dataset(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = 'rmsprop' loss = 'mse' metrics = ['mae', metrics_module.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3), np.float32) targets = np.zeros((10, 4), np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat() # Infinite dataset. dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1) model.predict(dataset, steps=2) # Test with validation data model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) # Test with validation split with self.assertRaises(ValueError): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_split=0.5, validation_steps=2) # Test with sample weight. sample_weight = np.random.random((10,)) with self.assertRaisesRegex( ValueError, r'`sample_weight` argument is not supported .+dataset'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, sample_weight=sample_weight) with self.assertRaisesRegex( ValueError, '(you should not specify a target)|' '(`y` argument is not supported when using dataset as input.)'): model.fit(dataset, dataset, epochs=1, steps_per_epoch=2, verbose=0) # With an infinite dataset, `steps_per_epoch`/`steps` argument is required. with self.assertRaises(ValueError): model.fit(dataset, epochs=1, verbose=0) with self.assertRaises(ValueError): model.evaluate(dataset, verbose=0) with self.assertRaises(ValueError): model.predict(dataset, verbose=0) @keras_parameterized.run_with_all_model_types(exclude_models='sequential') @keras_parameterized.run_all_keras_modes def test_training_and_eval_methods_on_multi_input_output_dataset(self): input_a = keras.layers.Input(shape=(3,), name='input_1') input_b = keras.layers.Input(shape=(3,), name='input_2') dense = keras.layers.Dense(4, name='dense') dropout = keras.layers.Dropout(0.5, name='dropout') branch_a = [input_a, dense] branch_b = [input_b, dense, dropout] model = testing_utils.get_multi_io_model(branch_a, branch_b) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) input_a_np = np.random.random((10, 3)).astype(dtype=np.float32) input_b_np = np.random.random((10, 3)).astype(dtype=np.float32) output_d_np = np.random.random((10, 4)).astype(dtype=np.float32) output_e_np = np.random.random((10, 4)).astype(dtype=np.float32) # Test with tuples dataset_tuple = tf.data.Dataset.from_tensor_slices( ((input_a_np, input_b_np), (output_d_np, output_e_np))) dataset_tuple = dataset_tuple.repeat(100) dataset_tuple = dataset_tuple.batch(10) model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset_tuple, steps=2, verbose=1) # Test with dict input_dict = {'input_1': input_a_np, 'input_2': input_b_np} if testing_utils.get_model_type() == 'subclass': output_dict = {'output_1': output_d_np, 'output_2': output_e_np} else: output_dict = {'dense': output_d_np, 'dropout': output_e_np} dataset_dict = tf.data.Dataset.from_tensor_slices( (input_dict, output_dict)) dataset_dict = dataset_dict.repeat(100) dataset_dict = dataset_dict.batch(10) model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset_dict, steps=2, verbose=1) predict_dataset_dict = tf.data.Dataset.from_tensor_slices(input_dict) predict_dataset_dict = predict_dataset_dict.repeat(100) predict_dataset_dict = predict_dataset_dict.batch(10) model.predict(predict_dataset_dict, steps=1) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_dataset_with_sample_weights(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = 'rmsprop' loss = 'mse' metrics = ['mae', metrics_module.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3), np.float32) targets = np.zeros((10, 4), np.float32) sample_weights = np.ones((10), np.float32) dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets, sample_weights)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1) model.predict(dataset, steps=2) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_dataset_with_sample_weights_correctness(self): x = keras.layers.Input(shape=(1,), name='input') y = keras.layers.Dense( 1, kernel_initializer='ones', bias_initializer='zeros', name='dense')( x) model = keras.Model(x, y) optimizer = 'rmsprop' loss = 'mse' model.compile(optimizer, loss) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) ds = tf.data.Dataset.from_tensor_slices( (inputs, targets, sample_weights)).batch(2) result = model.evaluate(ds, verbose=1) # The per sample loss is multiplied by the corresponding sample weight. The # average of these weighted losses is the return value of the `evaluate` # call. For example, in the test above the average weighted loss is # calculated in the following manner: # ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1) # equals 42.5 / 4 = 10.625 self.assertEqual(result, 10.625) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_dataset_with_sparse_labels(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = 'rmsprop' model.compile( optimizer, loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3), dtype=np.float32) targets = np.random.randint(0, 4, size=10, dtype=np.int32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) @keras_parameterized.run_all_keras_modes def test_dataset_fit_correctness(self): class SumLayer(keras.layers.Layer): def build(self, _): self.w = self.add_weight('w', ()) def call(self, inputs): return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0 model = keras.Sequential([SumLayer(input_shape=(2,))]) model.compile( 'rmsprop', loss='mae', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((40, 2), dtype=np.float32) inputs[10:20, :] = 2 inputs[20:30, :] = 1 inputs[30:, :] = 4 targets = np.zeros((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, steps_per_epoch=2, verbose=1, validation_data=val_dataset, validation_steps=2) self.assertAllClose(history.history['loss'], [inputs[:20].sum() / 20, inputs[20:].sum() / 20]) # The validation dataset will be reset at the end of each validation run. self.assertAllClose(history.history['val_loss'], [inputs[:20].sum() / 20, inputs[:20].sum() / 20]) # Test correctness with dataset reset. train_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) self.assertAllClose( history.history['loss'], [inputs.sum() / 40, inputs.sum() / 40]) self.assertAllClose( history.history['val_loss'], [inputs.sum() / 40, inputs.sum() / 40]) def test_dataset_input_shape_validation(self): with tf.compat.v1.get_default_graph().as_default(), self.cached_session(): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) model.compile(optimizer='rmsprop', loss='mse') # User forgets to batch the dataset inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) with self.assertRaisesRegex( ValueError, r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)' ): model.train_on_batch(dataset) # Wrong input shape inputs = np.zeros((10, 5)) targets = np.zeros((10, 4)) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) with self.assertRaisesRegex(ValueError, r'expected (.*?) to have shape \(3,\)'): model.train_on_batch(dataset) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_finite_dataset_known_cardinality_no_steps_arg(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((100, 3), dtype=np.float32) targets = np.random.randint(0, 4, size=100, dtype=np.int32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.batch(10) batch_counter = BatchCounterCallback() history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter]) self.assertLen(history.history['loss'], 2) self.assertEqual(batch_counter.batch_end_count, 20) model.evaluate(dataset) out = model.predict(dataset) self.assertEqual(out.shape[0], 100) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_finite_dataset_unknown_cardinality_no_steps_arg(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((100, 3), dtype=np.float32) targets = np.random.randint(0, 4, size=100, dtype=np.int32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.filter(lambda x, y: True).batch(10) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) batch_counter = BatchCounterCallback() history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter]) self.assertLen(history.history['loss'], 2) self.assertEqual(batch_counter.batch_end_count, 20) model.evaluate(dataset) out = model.predict(dataset) self.assertEqual(out.shape[0], 100) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self): class CaptureStdout: def __enter__(self): self._stdout = sys.stdout string_io = io.StringIO() sys.stdout = string_io self._stringio = string_io return self def __exit__(self, *args): self.output = self._stringio.getvalue() sys.stdout = self._stdout model = testing_utils.get_small_mlp(1, 4, input_dim=3) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((100, 3), dtype=np.float32) targets = np.random.randint(0, 4, size=100, dtype=np.int32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.filter(lambda x, y: True).batch(10) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) batch_counter = BatchCounterCallback() with CaptureStdout() as capture: history = model.fit( dataset, epochs=2, callbacks=[batch_counter], validation_data=dataset.take(3)) lines = capture.output.splitlines() self.assertIn('10/10', lines[-1]) self.assertLen(history.history['loss'], 2) self.assertEqual(batch_counter.batch_begin_count, 21) self.assertEqual(batch_counter.batch_end_count, 20) model.evaluate(dataset) out = model.predict(dataset) self.assertEqual(out.shape[0], 100) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_finite_dataset_unknown_cardinality_out_of_data(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((100, 3), dtype=np.float32) targets = np.random.randint(0, 4, size=100, dtype=np.int32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.filter(lambda x, y: True).batch(10) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) batch_counter = BatchCounterCallback() with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: # steps_per_epoch (200) is greater than the dataset size (100). As this is # unexpected, training will stop and not make it to the second epoch. history = model.fit( dataset, epochs=2, verbose=1, callbacks=[batch_counter], steps_per_epoch=200) self.assertIn('ran out of data; interrupting training.', str(mock_log.call_args)) self.assertIn( 'can generate at least ' '`steps_per_epoch * epochs` batches (in this case, 400 batches). ' 'You may need to use the repeat() function when ' 'building your dataset.', str(mock_log.call_args)) self.assertLen(history.history['loss'], 1) self.assertEqual(batch_counter.batch_end_count, 10) model.evaluate(dataset) out = model.predict(dataset) self.assertEqual(out.shape[0], 100) @keras_parameterized.run_all_keras_modes def test_with_external_loss(self): inp = keras.Input(shape=(4,), name='inp1') out = keras.layers.Dense(2)(inp) model = keras.Model(inp, out) model.add_loss(tf.reduce_mean(out)) model.compile('rmsprop') x = np.ones((10, 4)) # dataset contains only features, no labels. dataset = tf.data.Dataset.from_tensor_slices(x).repeat(10).batch(10) model.fit(dataset) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_train_eval_with_steps(self): # See b/142880049 for more details. inp = keras.Input(shape=(4,), name='inp1') out = keras.layers.Dense(2)(inp) model = keras.Model(inp, out) model.compile( 'rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((100, 4), dtype=np.float32) targets = np.random.randint(0, 2, size=100, dtype=np.int32) training_ds = tf.data.Dataset.from_tensor_slices( (inputs, targets)).repeat().batch(10) # Create eval dataset with generator, so that dataset won't contain the # overall size metadata. Without eval_steps, we expect to run through all # the data in this dataset every epoch. def gen(): for _ in range(100): yield (np.zeros(4, dtype=np.float32), np.random.randint(0, 2, size=1, dtype=np.int32)) eval_ds = tf.data.Dataset.from_generator( generator=gen, output_types=('float64', 'int32'), output_shapes=([4], [1])).batch(100) batch_counter = BatchCounterCallback() model.fit( training_ds, steps_per_epoch=10, epochs=10, validation_data=eval_ds, callbacks=[batch_counter]) # Expect 10 batch from training per epoch. self.assertEqual(batch_counter.batch_end_count, 100) class TestMetricsWithDatasets(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_metrics_correctness_with_dataset(self): layers = [ keras.layers.Dense( 8, activation='relu', input_dim=4, kernel_initializer='ones'), keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones') ] model = testing_utils.get_model_from_layers(layers, (4,)) model.compile( loss='binary_crossentropy', metrics=['accuracy', metrics_module.BinaryAccuracy()], optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) np.random.seed(123) x = np.random.randint(10, size=(100, 4)).astype(np.float32) y = np.random.randint(2, size=(100, 1)).astype(np.float32) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.batch(10) outs = model.evaluate(dataset, steps=10) self.assertEqual(np.around(outs[1], decimals=1), 0.5) self.assertEqual(np.around(outs[2], decimals=1), 0.5) y = np.zeros((100, 1), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) outs = model.evaluate(dataset, steps=10) self.assertEqual(outs[1], 0.) self.assertEqual(outs[2], 0.) if __name__ == '__main__': tf.test.main()
20,760
35.422807
80
py
keras
keras-master/keras/engine/training_generator_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import tensorflow.compat.v2 as tf import itertools from absl.testing import parameterized import numpy as np from keras import combinations from keras import keras_parameterized from keras import layers as layers_module from keras import losses from keras import metrics as metrics_module from keras import testing_utils from keras.engine import input_layer from keras.engine import training from keras.engine import training_generator_v1 from keras.optimizer_v2 import rmsprop from keras.utils import data_utils def custom_generator(mode=2): batch_size = 10 num_samples = 50 arr_data = np.random.random((num_samples, 2)) arr_labels = np.random.random((num_samples, 4)) arr_weights = np.random.random((num_samples,)) i = 0 while True: batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + batch_size x = arr_data[start: end] y = arr_labels[start: end] w = arr_weights[start: end] if mode == 1: yield x elif mode == 2: yield x, y else: yield x, y, w def custom_generator_changing_batch_size(mode=2): batch_size = 10 cur_batch_size = 11 num_samples = 50 arr_data = np.random.random((num_samples, 2)) arr_labels = np.random.random((num_samples, 4)) arr_weights = np.random.random((num_samples,)) i = 0 while True: if cur_batch_size > 1: cur_batch_size -= 1 batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + cur_batch_size x = arr_data[start: end] y = arr_labels[start: end] w = arr_weights[start: end] if mode == 1: yield x elif mode == 2: yield x, y else: yield x, y, w custom_generator_threads = data_utils.threadsafe_generator(custom_generator) class TestGeneratorMethods(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_fit_generator_method(self): model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.compile( loss='mse', optimizer=rmsprop.RMSprop(1e-3), metrics=['mae', metrics_module.CategoricalAccuracy()]) model.fit_generator(custom_generator_threads(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, workers=4, use_multiprocessing=True) model.fit_generator(custom_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False) model.fit_generator(custom_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=custom_generator(), validation_steps=10) model.fit_generator(custom_generator(), steps_per_epoch=5, validation_data=custom_generator(), validation_steps=1, workers=0) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_evaluate_generator_method(self): model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.compile( loss='mse', optimizer=rmsprop.RMSprop(1e-3), metrics=['mae', metrics_module.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) model.evaluate_generator(custom_generator_threads(), steps=5, max_queue_size=10, workers=2, verbose=1, use_multiprocessing=True) model.evaluate_generator(custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False) model.evaluate_generator(custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, workers=0) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_predict_generator_method(self): model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.run_eagerly = testing_utils.should_run_eagerly() model.predict_generator(custom_generator_threads(), steps=5, max_queue_size=10, workers=2, use_multiprocessing=True) model.predict_generator(custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False) model.predict_generator(custom_generator(), steps=5, max_queue_size=10, workers=0) # Test generator with just inputs (no targets) model.predict_generator(custom_generator_threads(mode=1), steps=5, max_queue_size=10, workers=2, use_multiprocessing=True) model.predict_generator(custom_generator(mode=1), steps=5, max_queue_size=10, use_multiprocessing=False) model.predict_generator(custom_generator(mode=1), steps=5, max_queue_size=10, workers=0) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_generator_methods_with_sample_weights(self): model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.compile( loss='mse', optimizer=rmsprop.RMSprop(1e-3), metrics=['mae', metrics_module.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) model.fit_generator(custom_generator(mode=3), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False) model.fit_generator(custom_generator(mode=3), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=custom_generator(mode=3), validation_steps=10) model.predict_generator(custom_generator(mode=3), steps=5, max_queue_size=10, use_multiprocessing=False) model.evaluate_generator(custom_generator(mode=3), steps=5, max_queue_size=10, use_multiprocessing=False) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_generator_methods_invalid_use_case(self): def invalid_generator(): while 1: yield (0, 0, 0, 0) model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.compile( loss='mse', optimizer=rmsprop.RMSprop(1e-3), run_eagerly=testing_utils.should_run_eagerly()) with self.assertRaises(ValueError): model.fit_generator(invalid_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False) with self.assertRaises(ValueError): model.fit_generator(custom_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=invalid_generator(), validation_steps=10) with self.assertRaises(ValueError): model.predict_generator(invalid_generator(), steps=5, max_queue_size=10, use_multiprocessing=False) with self.assertRaises(ValueError): model.evaluate_generator(invalid_generator(), steps=5, max_queue_size=10, use_multiprocessing=False) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_generator_input_to_fit_eval_predict(self): val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) def ones_generator(): while True: yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) model = testing_utils.get_small_mlp( num_hidden=10, num_classes=1, input_dim=10) model.compile( rmsprop.RMSprop(0.001), 'binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly()) model.fit( ones_generator(), steps_per_epoch=2, validation_data=val_data, epochs=2) model.evaluate(ones_generator(), steps=2) model.predict(ones_generator(), steps=2) # Test with a changing batch size model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.compile( loss='mse', optimizer=rmsprop.RMSprop(1e-3), metrics=['mae', metrics_module.CategoricalAccuracy()]) model.fit_generator(custom_generator_changing_batch_size(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False) model.fit_generator(custom_generator_changing_batch_size(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=custom_generator_changing_batch_size(), validation_steps=10) model.fit( custom_generator_changing_batch_size(), steps_per_epoch=5, validation_data=custom_generator_changing_batch_size(), validation_steps=10, epochs=2) model.evaluate(custom_generator_changing_batch_size(), steps=5) model.predict(custom_generator_changing_batch_size(), steps=5) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_generator_dynamic_shapes(self): x = [ 'I think juice is great', 'unknown is the best language since slicedbread', 'a a a a a a a', 'matmul' 'Yaks are also quite nice', ] y = [1, 0, 0, 1, 1] vocab = { word: i + 1 for i, word in enumerate( sorted(set(itertools.chain(*[i.split() for i in x])))) } def data_gen(batch_size=2): np.random.seed(0) data = list(zip(x, y)) * 10 np.random.shuffle(data) def pack_and_pad(queue): x = [[vocab[j] for j in i[0].split()] for i in queue] pad_len = max(len(i) for i in x) x = np.array([i + [0] * (pad_len - len(i)) for i in x]) y = np.array([i[1] for i in queue]) del queue[:] return x, y[:, np.newaxis] queue = [] for i, element in enumerate(data): queue.append(element) if not (i + 1) % batch_size: yield pack_and_pad(queue) if queue: # Last partial batch yield pack_and_pad(queue) model = testing_utils.get_model_from_layers([ layers_module.Embedding(input_dim=len(vocab) + 1, output_dim=4), layers_module.SimpleRNN(units=1), layers_module.Activation('sigmoid') ], input_shape=(None,)) model.compile(loss=losses.binary_crossentropy, optimizer='sgd') model.fit(data_gen(), epochs=1, steps_per_epoch=5) class TestGeneratorMethodsWithSequences(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_training_with_sequences(self): class DummySequence(data_utils.Sequence): def __getitem__(self, idx): return np.zeros([10, 2]), np.ones([10, 4]) def __len__(self): return 10 model = testing_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2) model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3)) model.fit_generator(DummySequence(), steps_per_epoch=10, validation_data=custom_generator(), validation_steps=1, max_queue_size=10, workers=0, use_multiprocessing=True) model.fit_generator(DummySequence(), steps_per_epoch=10, validation_data=custom_generator(), validation_steps=1, max_queue_size=10, workers=0, use_multiprocessing=False) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_sequence_input_to_fit_eval_predict(self): val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) class CustomSequence(data_utils.Sequence): def __getitem__(self, idx): return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) def __len__(self): return 2 class CustomSequenceChangingBatchSize(data_utils.Sequence): def __getitem__(self, idx): batch_size = 10 - idx return (np.ones([batch_size, 10], np.float32), np.ones([batch_size, 1], np.float32)) def __len__(self): return 2 model = testing_utils.get_small_mlp( num_hidden=10, num_classes=1, input_dim=10) model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy') model.fit(CustomSequence(), validation_data=val_data, epochs=2) model.evaluate(CustomSequence()) model.predict(CustomSequence()) with self.assertRaisesRegex(ValueError, '`y` argument is not supported'): model.fit(CustomSequence(), y=np.ones([10, 1])) with self.assertRaisesRegex(ValueError, '`sample_weight` argument is not supported'): model.fit(CustomSequence(), sample_weight=np.ones([10, 1])) model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy') model.fit(CustomSequenceChangingBatchSize(), validation_data=val_data, epochs=2) model.evaluate(CustomSequenceChangingBatchSize()) model.predict(CustomSequenceChangingBatchSize()) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_sequence_on_epoch_end(self): class MySequence(data_utils.Sequence): def __init__(self): self.epochs = 0 def __getitem__(self, idx): return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) def __len__(self): return 2 def on_epoch_end(self): self.epochs += 1 inputs = input_layer.Input(10) outputs = layers_module.Dense(1)(inputs) model = training.Model(inputs, outputs) model.compile('sgd', 'mse') my_seq = MySequence() model.fit(my_seq, epochs=2) self.assertEqual(my_seq.epochs, 2) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class TestConvertToGeneratorLike(tf.test.TestCase, parameterized.TestCase): simple_inputs = (np.ones((10, 10)), np.ones((10, 1))) nested_inputs = ((np.ones((10, 10)), np.ones((10, 20))), (np.ones((10, 1)), np.ones((10, 3)))) def _make_dataset(self, inputs, batches): return tf.data.Dataset.from_tensors(inputs).repeat(batches) def _make_iterator(self, inputs, batches): return tf.compat.v1.data.make_one_shot_iterator( self._make_dataset(inputs, batches)) def _make_generator(self, inputs, batches): def _gen(): for _ in range(batches): yield inputs return _gen() def _make_numpy(self, inputs, _): return inputs @parameterized.named_parameters( ('simple_dataset', _make_dataset, simple_inputs), ('simple_iterator', _make_iterator, simple_inputs), ('simple_generator', _make_generator, simple_inputs), ('simple_numpy', _make_numpy, simple_inputs), ('nested_dataset', _make_dataset, nested_inputs), ('nested_iterator', _make_iterator, nested_inputs), ('nested_generator', _make_generator, nested_inputs), ('nested_numpy', _make_numpy, nested_inputs)) def test_convert_to_generator_like(self, input_fn, inputs): expected_batches = 5 data = input_fn(self, inputs, expected_batches) # Dataset and Iterator not supported in Legacy Graph mode. if (not tf.executing_eagerly() and isinstance(data, (tf.data.Dataset, tf.compat.v1.data.Iterator))): return generator, steps = training_generator_v1.convert_to_generator_like( data, batch_size=2, steps_per_epoch=expected_batches) self.assertEqual(steps, expected_batches) for _ in range(expected_batches): outputs = next(generator) tf.nest.assert_same_structure(outputs, inputs) if __name__ == '__main__': tf.test.main()
19,087
34.812383
80
py
keras
keras-master/keras/engine/keras_tensor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Input Tensor used to track functional API Topology.""" import tensorflow.compat.v2 as tf from keras.utils import object_identity # pylint: disable=g-classes-have-attributes # Tensorflow tensors have a maximum rank of 254 # (See `MaxDimensions()` in //tensorflow/core/framework/tensor_shape.h ) # So we do not try to infer values for int32 tensors larger than this, # As they cannot represent shapes. _MAX_TENSOR_RANK = 254 class KerasTensor: """A representation of a Keras in/output during Functional API construction. `KerasTensor`s are tensor-like objects that represent the symbolic inputs and outputs of Keras layers during Functional model construction. They are comprised of the `tf.TypeSpec` of the (Composite)Tensor that will be consumed/produced in the corresponding location of the Functional model. KerasTensors are intended as a private API, so users should never need to directly instantiate `KerasTensor`s. **Building Functional Models with KerasTensors** `tf.keras.Input` produces `KerasTensor`s that represent the symbolic inputs to your model. Passing a `KerasTensor` to a `tf.keras.Layer` `__call__` lets the layer know that you are building a Functional model. The layer __call__ will infer the output signature and return `KerasTensor`s with `tf.TypeSpec`s corresponding to the symbolic outputs of that layer call. These output `KerasTensor`s will have all of the internal KerasHistory metadata attached to them that Keras needs to construct a Functional Model. Currently, layers infer the output signature by: * creating a scratch `FuncGraph` * making placeholders in the scratch graph that match the input typespecs * Calling `layer.call` on these placeholders * extracting the signatures of the outputs before clearing the scratch graph (Note: names assigned to KerasTensors by this process are not guaranteed to be unique, and are subject to implementation details). `tf.nest` methods are used to insure all of the inputs/output data structures get maintained, with elements swapped between KerasTensors and placeholders. In rare cases (such as when directly manipulating shapes using Keras layers), the layer may be able to partially infer the value of the output in addition to just inferring the signature. When this happens, the returned KerasTensor will also contain the inferred value information. Follow-on layers can use this information. during their own output signature inference. E.g. if one layer produces a symbolic `KerasTensor` that the next layer uses as the shape of its outputs, partially knowing the value helps infer the output shape. **Automatically converting TF APIs to layers**: If you passing a `KerasTensor` to a TF API that supports dispatching, Keras will automatically turn that API call into a lambda layer in the Functional model, and return KerasTensors representing the symbolic outputs. Most TF APIs that take only tensors as input and produce output tensors will support dispatching. Calling a `tf.function` does not support dispatching, so you cannot pass `KerasTensor`s as inputs to a `tf.function`. Higher-order APIs that take methods which produce tensors (e.g. `tf.while`, `tf.map_fn`, `tf.cond`) also do not currently support dispatching. So, you cannot directly pass KerasTensors as inputs to these APIs either. If you want to use these APIs inside of a Functional model, you must put them inside of a custom layer. Args: type_spec: The `tf.TypeSpec` for the symbolic input created by `tf.keras.Input`, or symbolically inferred for the output during a symbolic layer `__call__`. inferred_value: (Optional) a non-symbolic static value, possibly partially specified, that could be symbolically inferred for the outputs during a symbolic layer `__call__`. This will generally only happen when grabbing and manipulating `tf.int32` shapes directly as tensors. Statically inferring values in this way and storing them in the KerasTensor allows follow-on layers to infer output signatures more effectively. (e.g. when using a symbolic shape tensor to later construct a tensor with that shape). name: (optional) string name for this KerasTensor. Names automatically generated by symbolic layer `__call__`s are not guaranteed to be unique, and are subject to implementation details. """ def __init__(self, type_spec, inferred_value=None, name=None): """Constructs a KerasTensor.""" if not isinstance(type_spec, tf.TypeSpec): raise ValueError('KerasTensors must be constructed with a `tf.TypeSpec`.') self._type_spec = type_spec self._inferred_value = inferred_value self._name = name @property def type_spec(self): """Returns the `tf.TypeSpec` symbolically inferred for this Keras output.""" return self._type_spec @property def shape(self): """Returns the `TensorShape` symbolically inferred for this Keras output.""" # TODO(kaftan): This is only valid for normal/sparse/ragged tensors. # may need to raise an error when it's not valid for a type_spec, # but some keras code (e.g. build-related stuff) will likely fail when # it can't access shape or dtype return self._type_spec._shape # pylint: disable=protected-access @classmethod def from_tensor(cls, tensor): """Convert a traced (composite)tensor to a representative KerasTensor.""" if isinstance(tensor, tf.Tensor): name = getattr(tensor, 'name', None) type_spec = tf.type_spec_from_value(tensor) inferred_value = None if (type_spec.dtype == tf.int32 and type_spec.shape.rank is not None and type_spec.shape.rank < 2): # If this tensor might be representing shape information, # (dtype=int32, rank of 0 or 1, not too large to represent a shape) # we attempt to capture any value information tensorflow's # shape handling can extract from the current scratch graph. # # Even though keras layers each trace in their own scratch # graph, this shape value info extraction allows us to capture # a sizable and useful subset of the C++ shape value inference TF can do # if all tf ops appear in the same graph when using shape ops. # # Examples of things this cannot infer concrete dimensions for # that the full single-graph C++ shape inference sometimes can are: # * cases where the shape tensor is cast out of int32 before being # manipulated w/ floating point numbers then converted back # * cases where int32 tensors w/ rank >= 2 are manipulated before being # used as a shape tensor # * cases where int32 tensors too large to represent shapes are # manipulated to a smaller size before being used as a shape tensor inferred_value = tf.ones(shape=tensor).shape if inferred_value.dims: inferred_value = inferred_value.as_list() if len(inferred_value) > _MAX_TENSOR_RANK: inferred_value = None else: inferred_value = None return KerasTensor(type_spec, inferred_value=inferred_value, name=name) else: # Fallback to the generic arbitrary-typespec KerasTensor name = getattr(tensor, 'name', None) type_spec = tf.type_spec_from_value(tensor) return cls(type_spec, name=name) @classmethod def from_type_spec(cls, type_spec, name=None): return cls(type_spec=type_spec, name=name) def _to_placeholder(self): """Convert this KerasTensor to a placeholder in a graph.""" # If there is an inferred value for this tensor, inject the inferred value if self._inferred_value is not None: # If we suspect this KerasTensor might be representing a shape tensor, # and we were able to extract value information with TensorFlow's shape # handling when making the KerasTensor, we construct the placeholder by # re-injecting the inferred value information into the graph. We # do this injection through the shape of a placeholder, because that # allows us to specify partially-unspecified shape values. # # See the comment on value extraction inside `from_tensor` for more info. inferred_value = tf.shape( tf.compat.v1.placeholder( shape=self._inferred_value, dtype=tf.int32)) if self.type_spec.shape.rank == 0: # `tf.shape` always returns a rank-1, we may need to turn it back to a # scalar. inferred_value = inferred_value[0] return inferred_value # Use the generic conversion from typespec to a placeholder. def component_to_placeholder(component): return tf.compat.v1.placeholder(component.dtype, component.shape) return tf.nest.map_structure( component_to_placeholder, self.type_spec, expand_composites=True) def get_shape(self): return self.shape def __len__(self): raise TypeError('Keras symbolic inputs/outputs do not ' 'implement `__len__`. You may be ' 'trying to pass Keras symbolic inputs/outputs ' 'to a TF API that does not register dispatching, ' 'preventing Keras from automatically ' 'converting the API call to a lambda layer ' 'in the Functional Model. This error will also get raised ' 'if you try asserting a symbolic input/output directly.') @property def op(self): raise TypeError('Keras symbolic inputs/outputs do not ' 'implement `op`. You may be ' 'trying to pass Keras symbolic inputs/outputs ' 'to a TF API that does not register dispatching, ' 'preventing Keras from automatically ' 'converting the API call to a lambda layer ' 'in the Functional Model.') def __hash__(self): raise TypeError(f'Tensors are unhashable (this tensor: {self}). ' 'Instead, use tensor.ref() as the key.') # Note: This enables the KerasTensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # In the future explore changing this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 def __array__(self): raise TypeError( 'Cannot convert a symbolic Keras input/output to a numpy array. ' 'This error may indicate that you\'re trying to pass a symbolic value ' 'to a NumPy call, which is not supported. Or, ' 'you may be trying to pass Keras symbolic inputs/outputs ' 'to a TF API that does not register dispatching, ' 'preventing Keras from automatically ' 'converting the API call to a lambda layer ' 'in the Functional Model.') @property def is_tensor_like(self): return True def set_shape(self, shape): """Updates the shape of this KerasTensor. Mimics `tf.Tensor.set_shape()`.""" if not isinstance(shape, tf.TensorShape): shape = tf.TensorShape(shape) if shape.dims is not None: dim_list = [dim.value for dim in shape.dims] for dim in range(len(dim_list)): if dim_list[dim] is None and self.shape.dims is not None: dim_list[dim] = self.shape.dims[dim] shape = tf.TensorShape(dim_list) if not self.shape.is_compatible_with(shape): raise ValueError( f"Keras symbolic input/output's shape {self.shape} is not " f"compatible with supplied shape {shape}.") else: self._type_spec._shape = shape # pylint: disable=protected-access def __str__(self): symbolic_description = '' inferred_value_string = '' name_string = '' if hasattr(self, '_keras_history'): layer = self._keras_history.layer symbolic_description = ( ', description="created by layer \'%s\'"' % (layer.name,)) if self._inferred_value is not None: inferred_value_string = ( ', inferred_value=%s' % self._inferred_value) if self.name is not None: name_string = ', name=\'%s\'' % self._name return 'KerasTensor(type_spec=%s%s%s%s)' % ( self.type_spec, inferred_value_string, name_string, symbolic_description) def __repr__(self): symbolic_description = '' inferred_value_string = '' if isinstance(self.type_spec, tf.TensorSpec): type_spec_string = 'shape=%s dtype=%s' % (self.shape, self.dtype.name) else: type_spec_string = 'type_spec=%s' % self.type_spec if hasattr(self, '_keras_history'): layer = self._keras_history.layer symbolic_description = ' (created by layer \'%s\')' % (layer.name,) if self._inferred_value is not None: inferred_value_string = ( ' inferred_value=%s' % self._inferred_value) return '<KerasTensor: %s%s%s>' % ( type_spec_string, inferred_value_string, symbolic_description) @property def dtype(self): """Returns the `dtype` symbolically inferred for this Keras output.""" # TODO(kaftan): This is only valid for normal/sparse/ragged tensors. # may need to raise an error when it's not valid for a type_spec, # but some keras code (e.g. build-related stuff) will likely fail when # it can't access shape or dtype return self._type_spec._dtype # pylint: disable=protected-access def ref(self): """Returns a hashable reference object to this KerasTensor. The primary use case for this API is to put KerasTensors in a set/dictionary. We can't put tensors in a set/dictionary as `tensor.__hash__()` is not available and tensor equality (`==`) is supposed to produce a tensor representing if the two inputs are equal. See the documentation of `tf.Tensor.ref()` for more info. """ return object_identity.Reference(self) @property def node(self): """Find the corresponding `Node` that produce this keras_tensor. During functional model construction, Keras will attach `KerasHistory` to keras tensor to track the connectivity between calls of layers. Return None if there isn't any KerasHistory attached to this tensor. """ if hasattr(self, '_keras_history'): layer, node_index, _ = self._keras_history return layer.inbound_nodes[node_index] return None def __iter__(self): shape = None if self.shape.ndims is not None: shape = [dim.value for dim in self.shape.dims] if shape is None: raise TypeError('Cannot iterate over a Tensor with unknown shape.') if not shape: raise TypeError('Cannot iterate over a scalar.') if shape[0] is None: raise TypeError( 'Cannot iterate over a Tensor with unknown first dimension.') return _KerasTensorIterator(self, shape[0]) @property def name(self): """Returns the (non-unique, optional) name of this symbolic Keras value.""" return self._name @classmethod def _overload_all_operators(cls, tensor_class): # pylint: disable=invalid-name """Register overloads for all operators.""" for operator in tf.Tensor.OVERLOADABLE_OPERATORS: cls._overload_operator(tensor_class, operator) # We include `experimental_ref` for versions of TensorFlow that # still include the deprecated method in Tensors. if hasattr(tensor_class, 'experimental_ref'): cls._overload_operator(tensor_class, 'experimental_ref') @classmethod def _overload_operator(cls, tensor_class, operator): # pylint: disable=invalid-name """Overload an operator with the same implementation as a base Tensor class. We pull the operator out of the class dynamically to avoid ordering issues. Args: tensor_class: The (Composite)Tensor to get the method from. operator: string. The operator name. """ tensor_oper = getattr(tensor_class, operator) # Compatibility with Python 2: # Python 2 unbound methods have type checks for the first arg, # so we need to extract the underlying function tensor_oper = getattr(tensor_oper, '__func__', tensor_oper) setattr(cls, operator, tensor_oper) KerasTensor._overload_all_operators(tf.Tensor) # pylint: disable=protected-access class SparseKerasTensor(KerasTensor): """A specialized KerasTensor representation for `tf.sparse.SparseTensor`s. Specifically, it specializes the conversion to a placeholder in order to maintain dense shape information. """ def _to_placeholder(self): spec = self.type_spec # nest.map_structure loses dense shape information for sparse tensors. # So, we special-case sparse placeholder creation. # This only preserves shape information for top-level sparse tensors; # not for sparse tensors that are nested inside another composite # tensor. return tf.compat.v1.sparse_placeholder(dtype=spec.dtype, shape=spec.shape) class RaggedKerasTensor(KerasTensor): """A specialized KerasTensor representation for `tf.RaggedTensor`s. Specifically, it: 1. Specializes the conversion to a placeholder in order to maintain shape information for non-ragged dimensions. 2. Overloads the KerasTensor's operators with the RaggedTensor versions when they don't match the `tf.Tensor` versions 3. Exposes some of the instance method/attribute that are unique to the RaggedTensor API (such as ragged_rank). """ def _to_placeholder(self): ragged_spec = self.type_spec if ragged_spec.ragged_rank == 0 or ragged_spec.shape.rank is None: return super(RaggedKerasTensor, self)._to_placeholder() flat_shape = ragged_spec.shape[ragged_spec.ragged_rank:] result = tf.compat.v1.placeholder(ragged_spec.dtype, flat_shape) known_num_splits = [] prod = 1 for axis_size in ragged_spec.shape: if prod is not None: if axis_size is None or ( getattr(axis_size, 'value', True) is None): prod = None else: prod = prod * axis_size known_num_splits.append(prod) for axis in range(ragged_spec.ragged_rank, 0, -1): axis_size = ragged_spec.shape[axis] if axis_size is None or (getattr(axis_size, 'value', True) is None): num_splits = known_num_splits[axis-1] if num_splits is not None: num_splits = num_splits + 1 splits = tf.compat.v1.placeholder( ragged_spec.row_splits_dtype, [num_splits]) result = tf.RaggedTensor.from_row_splits( result, splits, validate=False) else: rowlen = tf.constant(axis_size, ragged_spec.row_splits_dtype) result = tf.RaggedTensor.from_uniform_row_length( result, rowlen, validate=False) return result @property def ragged_rank(self): return self.type_spec.ragged_rank # Overload slicing RaggedKerasTensor._overload_operator(tf.RaggedTensor, '__getitem__') # pylint: disable=protected-access # Overload math ops RaggedKerasTensor._overload_operator(tf.RaggedTensor, '__add__') # pylint: disable=protected-access RaggedKerasTensor._overload_operator(tf.RaggedTensor, '__radd__') # pylint: disable=protected-access RaggedKerasTensor._overload_operator(tf.RaggedTensor, '__mul__') # pylint: disable=protected-access RaggedKerasTensor._overload_operator(tf.RaggedTensor, '__rmul__') # pylint: disable=protected-access # TODO(b/161487382): # Special-case user-registered symbolic objects (registered by the # private `register_symbolic_tensor_type` method) by passing them between # scratch graphs directly. # This is needed to not break Tensorflow probability # while they finish migrating to composite tensors. class UserRegisteredSpec(tf.TypeSpec): """TypeSpec to represent user-registered symbolic objects.""" def __init__(self, shape, dtype): self.shape = shape self._dtype = dtype self.dtype = dtype def _component_specs(self): raise NotImplementedError def _from_components(self, components): raise NotImplementedError def _serialize(self): raise NotImplementedError def _to_components(self, value): raise NotImplementedError def value_type(self): raise NotImplementedError # TODO(b/161487382): # Special-case user-registered symbolic objects (registered by the # private `register_symbolic_tensor_type` method) by passing them between # scratch graphs directly. # This is needed to not break Tensorflow probability # while they finish migrating to composite tensors. class UserRegisteredTypeKerasTensor(KerasTensor): """KerasTensor that represents legacy register_symbolic_tensor_type.""" def __init__(self, user_registered_symbolic_object): x = user_registered_symbolic_object self._user_registered_symbolic_object = x type_spec = UserRegisteredSpec(x.shape, x.dtype) name = getattr(x, 'name', None) super(UserRegisteredTypeKerasTensor, self).__init__(type_spec, name) @classmethod def from_tensor(cls, tensor): return cls(tensor) @classmethod def from_type_spec(cls, type_spec, name=None): raise NotImplementedError('You cannot instantiate a KerasTensor ' 'directly from TypeSpec: %s' % type_spec) def _to_placeholder(self): return self._user_registered_symbolic_object class _KerasTensorIterator: """Iterates over the leading dim of a KerasTensor. Performs 0 error checks.""" def __init__(self, tensor, dim0): self._tensor = tensor self._index = 0 self._limit = dim0 def __iter__(self): return self def __next__(self): if self._index == self._limit: raise StopIteration result = self._tensor[self._index] self._index += 1 return result # Specify the mappings of tensor class to KerasTensor class. # This is specifically a list instead of a dict for now because # 1. we do a check w/ isinstance because a key lookup based on class # would miss subclasses # 2. a list allows us to control lookup ordering # We include ops.Tensor -> KerasTensor in the first position as a fastpath, # *and* include object -> KerasTensor at the end as a catch-all. # We can re-visit these choices in the future as needed. keras_tensor_classes = [ (tf.Tensor, KerasTensor), (tf.SparseTensor, SparseKerasTensor), (tf.RaggedTensor, RaggedKerasTensor), (object, KerasTensor) ] def register_keras_tensor_specialization(cls, keras_tensor_subclass): """Register a specialized KerasTensor subclass for a Tensor type.""" # We always leave (object, KerasTensor) at the end as a generic fallback keras_tensor_classes.insert(-1, (cls, keras_tensor_subclass)) def keras_tensor_to_placeholder(x): """Construct a graph placeholder to represent a KerasTensor when tracing.""" if isinstance(x, KerasTensor): return x._to_placeholder() # pylint: disable=protected-access else: return x def keras_tensor_from_tensor(tensor): """Convert a traced (composite)tensor to a representative KerasTensor.""" # Create a specialized KerasTensor that supports instance methods, # operators, and additional value inference if possible keras_tensor_cls = None for tensor_type, cls in keras_tensor_classes: if isinstance(tensor, tensor_type): keras_tensor_cls = cls break out = keras_tensor_cls.from_tensor(tensor) if hasattr(tensor, '_keras_mask'): out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask) # pylint: disable=protected-access return out def keras_tensor_from_type_spec(type_spec, name=None): """Convert a TypeSpec to a representative KerasTensor.""" # Create a specialized KerasTensor that supports instance methods, # operators, and additional value inference if possible keras_tensor_cls = None value_type = type_spec.value_type for tensor_type, cls in keras_tensor_classes: if issubclass(value_type, tensor_type): keras_tensor_cls = cls break return keras_tensor_cls.from_type_spec(type_spec, name=name)
24,819
39.357724
104
py
keras
keras-master/keras/engine/ragged_keras_tensor_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RaggedKerasTensor tests.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import keras_parameterized from keras import layers from keras.engine import training class RaggedKerasTensorTest(keras_parameterized.TestCase): @parameterized.parameters( {'batch_size': None, 'shape': (None, 5), 'ragged_rank': 1}, {'batch_size': None, 'shape': (None, 3, 5), 'ragged_rank': 1}, {'batch_size': None, 'shape': (5, None), 'ragged_rank': 2}, {'batch_size': None, 'shape': (3, 5, None), 'ragged_rank': 3}, {'batch_size': None, 'shape': (None, 3, 5, None), 'ragged_rank': 4}, {'batch_size': None, 'shape': (2, 3, None, 4, 5, None), 'ragged_rank': 6}, {'batch_size': 8, 'shape': (None, 5), 'ragged_rank': 1}, {'batch_size': 9, 'shape': (None, 3, 5), 'ragged_rank': 1}, {'batch_size': 1, 'shape': (5, None), 'ragged_rank': 2}, {'batch_size': 4, 'shape': (3, 5, None), 'ragged_rank': 3}, {'batch_size': 7, 'shape': (None, 3, 5, None), 'ragged_rank': 4}, {'batch_size': 12, 'shape': (2, 3, None, 4, 5, None), 'ragged_rank': 6}, ) def test_to_placeholder(self, shape, batch_size, ragged_rank): inp = layers.Input(shape=shape, batch_size=batch_size, ragged=True) self.assertEqual(inp.ragged_rank, ragged_rank) self.assertAllEqual(inp.shape, [batch_size] + list(shape)) with tf.__internal__.FuncGraph('test').as_default(): placeholder = inp._to_placeholder() self.assertEqual(placeholder.ragged_rank, ragged_rank) self.assertAllEqual(placeholder.shape, [batch_size] + list(shape)) def test_add(self): inp = layers.Input(shape=[None], ragged=True) out = inp + inp model = training.Model(inp, out) x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]]) self.assertAllEqual(model(x), x + x) def test_mul(self): inp = layers.Input(shape=[None], ragged=True) out = inp * inp model = training.Model(inp, out) x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]]) self.assertAllEqual(model(x), x * x) def test_sub(self): inp = layers.Input(shape=[None], ragged=True) out = inp - inp model = training.Model(inp, out) x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]]) self.assertAllEqual(model(x), x - x) def test_div(self): inp = layers.Input(shape=[None], ragged=True) out = inp / inp model = training.Model(inp, out) x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]]) self.assertAllEqual(model(x), x / x) def test_getitem(self): # Test slicing / getitem inp = layers.Input(shape=(None, 2), ragged=True) out = inp[:, :2] model = training.Model(inp, out) x = tf.RaggedTensor.from_row_lengths( tf.cast(np.random.randn(6, 2), dtype=tf.float32), [3, 1, 2]) expected = x[:, :2] self.assertAllEqual(model(x), expected) # Test that models w/ slicing are correctly serialized/deserialized config = model.get_config() model = training.Model.from_config(config) self.assertAllEqual(model(x), expected) @parameterized.parameters( {'property_name': 'values'}, {'property_name': 'flat_values'}, {'property_name': 'row_splits'}, {'property_name': 'nested_row_splits'}, ) def test_instance_property(self, property_name): inp = layers.Input(shape=[None], ragged=True) out = getattr(inp, property_name) model = training.Model(inp, out) x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]]) expected_property = getattr(x, property_name) self.assertAllEqual(model(x), expected_property) # Test that it works with serialization and deserialization as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected_property) @parameterized.parameters( {'name': 'value_rowids'}, {'name': 'nested_value_rowids'}, {'name': 'nrows'}, {'name': 'row_starts'}, {'name': 'row_limits'}, {'name': 'row_lengths'}, {'name': 'nested_row_lengths'}, {'name': 'bounding_shape'}, { 'name': 'with_values', 'args': [[1, 2, 3, 4, 5, 6]] }, { 'name': 'with_flat_values', 'kwargs': { 'new_values': [1, 2, 3, 4, 5, 6] } }, { 'name': 'with_row_splits_dtype', 'kwargs': { 'dtype': tf.int32 } }, { 'name': 'merge_dims', 'args': [0], 'kwargs': { 'inner_axis': 1 } }, {'name': 'to_tensor'}, {'name': 'to_sparse'}, ) def test_instance_method(self, name, args=None, kwargs=None): if not args: args = [] if not kwargs: kwargs = {} inp = layers.Input(shape=[None], ragged=True) out = getattr(inp, name)(*args, **kwargs) model = training.Model(inp, out) x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]]) expected_property = getattr(x, name)(*args, **kwargs) # We expand composites before checking equality because # assertAllEqual otherwise wouldn't work for SparseTensor outputs for a, b in zip(tf.nest.flatten(model(x), expand_composites=True), tf.nest.flatten(expected_property, expand_composites=True)): self.assertAllEqual(a, b) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) for a, b in zip(tf.nest.flatten(model2(x), expand_composites=True), tf.nest.flatten(expected_property, expand_composites=True)): self.assertAllEqual(a, b) class RaggedTensorClassMethodAsLayerTest(keras_parameterized.TestCase): def test_from_value_rowids(self): inp = layers.Input(shape=[None]) out = tf.RaggedTensor.from_value_rowids( inp, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) model = training.Model(inp, out) x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6]) expected = tf.RaggedTensor.from_value_rowids( x, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_row_splits(self): inp = layers.Input(shape=[None]) out = tf.RaggedTensor.from_row_splits( inp, row_splits=[0, 4, 4, 7, 8, 8]) model = training.Model(inp, out) x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6]) expected = tf.RaggedTensor.from_row_splits( x, row_splits=[0, 4, 4, 7, 8, 8]) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_row_lengths(self): inp = layers.Input(shape=[None]) out = tf.RaggedTensor.from_row_lengths( inp, row_lengths=[4, 0, 3, 1, 0]) model = training.Model(inp, out) x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6]) expected = tf.RaggedTensor.from_row_lengths( x, row_lengths=[4, 0, 3, 1, 0]) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_row_starts(self): inp = layers.Input(shape=[None]) out = tf.RaggedTensor.from_row_starts( inp, row_starts=[0, 4, 4, 7, 8]) model = training.Model(inp, out) x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6]) expected = tf.RaggedTensor.from_row_starts( x, row_starts=[0, 4, 4, 7, 8]) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_row_limits(self): row_limits = tf.constant([2, 2, 5, 6, 7], tf.int64) inp = layers.Input(shape=[None], dtype=tf.string) out = tf.RaggedTensor.from_row_limits( inp, row_limits, validate=False) model = training.Model(inp, out) x = tf.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) expected = tf.RaggedTensor.from_row_limits( x, row_limits, validate=False) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_uniform_row_length(self): inp = layers.Input(shape=[None]) out = tf.RaggedTensor.from_uniform_row_length(inp, 2, 8) model = training.Model(inp, out) x = tf.constant( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]) expected = tf.RaggedTensor.from_uniform_row_length(x, 2, 8) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_nested_value_row_ids(self): nested_value_rowids = [ tf.constant([0, 0, 1, 3, 3], tf.int64), tf.constant([0, 0, 2, 2, 2, 3, 4], tf.int64) ] inp = layers.Input(shape=[None], dtype=tf.string) out = tf.RaggedTensor.from_nested_value_rowids( inp, nested_value_rowids) model = training.Model(inp, out) x = tf.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) expected = tf.RaggedTensor.from_nested_value_rowids( x, nested_value_rowids) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_nested_row_splits(self): nested_row_splits = [ tf.constant([0, 2, 3, 3, 5], tf.int64), tf.constant([0, 2, 2, 5, 6, 7], tf.int64) ] inp = layers.Input(shape=[None], dtype=tf.string) out = tf.RaggedTensor.from_nested_row_splits( inp, nested_row_splits) model = training.Model(inp, out) x = tf.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) expected = tf.RaggedTensor.from_nested_row_splits( x, nested_row_splits) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_nested_row_lengths(self): nested_row_lengths = [ tf.constant([2, 1, 0, 2], tf.int64), tf.constant([2, 0, 3, 1, 1], tf.int64) ] inp = layers.Input(shape=[None], dtype=tf.string) out = tf.RaggedTensor.from_nested_row_lengths( inp, nested_row_lengths) model = training.Model(inp, out) x = tf.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) expected = tf.RaggedTensor.from_nested_row_lengths( x, nested_row_lengths) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_tensor(self): inp = layers.Input(shape=[None], ragged=False) out = tf.RaggedTensor.from_tensor(inp) model = training.Model(inp, out) x = tf.constant([[3., 4.], [1., 2.], [3., 5.]]) expected = tf.RaggedTensor.from_tensor(x) self.assertAllEqual(model(x), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(x), expected) def test_from_sparse(self): inp = layers.Input(shape=[None], sparse=True, dtype=tf.string) out = tf.RaggedTensor.from_sparse(inp) model = training.Model(inp, out) indices = [[0, 0], [1, 0], [1, 1], [2, 0]] values = [b'a', b'b', b'c', b'd'] shape = [4, 5] sp_value = tf.SparseTensor(indices, values, shape) expected = tf.RaggedTensor.from_sparse(sp_value) self.assertAllEqual(model(sp_value), expected) # Test that the model can serialize and deserialize as well model_config = model.get_config() model2 = training.Model.from_config(model_config) self.assertAllEqual(model2(sp_value), expected) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.compat.v1.enable_v2_tensorshape() tf.test.main()
13,548
34.843915
80
py
keras
keras-master/keras/engine/base_preprocessing_layer_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras' base preprocessing layer.""" import os import keras from keras import keras_parameterized from keras import testing_utils from keras.engine import base_preprocessing_layer import numpy as np import tensorflow.compat.v2 as tf # Define a test-only implementation of BasePreprocessingLayer to validate # its correctness directly. class AddingPreprocessingLayer(base_preprocessing_layer.PreprocessingLayer): def build(self, input_shape): super(AddingPreprocessingLayer, self).build(input_shape) self.sum = tf.Variable(0., dtype=tf.float32) def update_state(self, data): self.sum.assign_add(tf.reduce_sum(tf.cast(data, tf.float32))) def reset_state(self): # pylint: disable=method-hidden self.sum.assign(0.) def set_total(self, sum_value): """This is an example of how a subclass would implement a direct setter. Args: sum_value: The total to set. """ self.sum.assign(sum_value) def call(self, inputs): return inputs + self.sum @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class PreprocessingLayerTest(keras_parameterized.TestCase): def test_adapt_bad_input_fails(self): """Test that non-Dataset/Numpy inputs cause a reasonable error.""" input_dataset = {"foo": 0} layer = AddingPreprocessingLayer() if tf.executing_eagerly(): with self.assertRaisesRegex(ValueError, "Failed to find data adapter"): layer.adapt(input_dataset) else: with self.assertRaisesRegex(ValueError, "requires a"): layer.adapt(input_dataset) def test_adapt_infinite_dataset_fails(self): """Test that preproc layers fail if an infinite dataset is passed.""" input_dataset = tf.data.Dataset.from_tensor_slices( np.array([[1], [2], [3], [4], [5], [0]])).repeat() layer = AddingPreprocessingLayer() if tf.executing_eagerly(): with self.assertRaisesRegex(ValueError, "infinite dataset"): layer.adapt(input_dataset) else: with self.assertRaisesRegex(ValueError, ".*infinite number of elements.*"): layer.adapt(input_dataset) def test_setter_update(self): """Test the prototyped setter method.""" input_data = keras.Input(shape=(1,)) layer = AddingPreprocessingLayer() output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() layer.set_total(15) self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) def test_pre_build_adapt_update_numpy(self): """Test that preproc layers can adapt() before build() is called.""" input_dataset = np.array([1, 2, 3, 4, 5]) layer = AddingPreprocessingLayer() layer.adapt(input_dataset) input_data = keras.Input(shape=(1,)) output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) def test_post_build_adapt_update_numpy(self): """Test that preproc layers can adapt() after build() is called.""" input_dataset = np.array([1, 2, 3, 4, 5]) input_data = keras.Input(shape=(1,)) layer = AddingPreprocessingLayer() output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() layer.adapt(input_dataset) self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) def test_pre_build_adapt_update_dataset(self): """Test that preproc layers can adapt() before build() is called.""" input_dataset = tf.data.Dataset.from_tensor_slices( np.array([[1], [2], [3], [4], [5], [0]])) layer = AddingPreprocessingLayer() layer.adapt(input_dataset) input_data = keras.Input(shape=(1,)) output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) def test_post_build_adapt_update_dataset(self): """Test that preproc layers can adapt() after build() is called.""" input_dataset = tf.data.Dataset.from_tensor_slices( np.array([[1], [2], [3], [4], [5], [0]])) input_data = keras.Input(shape=(1,)) layer = AddingPreprocessingLayer() output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() layer.adapt(input_dataset) self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) def test_weight_based_state_transfer(self): """Test that preproc layers can transfer state via get/set weights..""" def get_model(): input_data = keras.Input(shape=(1,)) layer = AddingPreprocessingLayer() output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() return (model, layer) input_dataset = np.array([1, 2, 3, 4, 5]) model, layer = get_model() layer.adapt(input_dataset) self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) # Create a new model and verify it has no state carryover. weights = model.get_weights() model_2, _ = get_model() self.assertAllEqual([[1], [2], [3]], model_2.predict([1., 2., 3.])) # Transfer state from model to model_2 via get/set weights. model_2.set_weights(weights) self.assertAllEqual([[16], [17], [18]], model_2.predict([1., 2., 3.])) def test_loading_without_providing_class_fails(self): input_data = keras.Input(shape=(1,)) layer = AddingPreprocessingLayer() output = layer(input_data) model = keras.Model(input_data, output) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.variables_initializer(model.variables)) output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model") model.save(output_path, save_format="tf") with self.assertRaisesRegex(RuntimeError, "Unable to restore a layer of"): _ = keras.models.load_model(output_path) def test_adapt_sets_input_shape_rank(self): """Check that `.adapt()` sets the `input_shape`'s rank.""" # Shape: (3,1,2) adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]], dtype=np.float32) layer = AddingPreprocessingLayer() layer.adapt(adapt_dataset) input_dataset = np.array([[[1., 2.], [3., 4.]], [[3., 4.], [5., 6.]]], dtype=np.float32) layer(input_dataset) model = keras.Sequential([layer]) self.assertTrue(model.built) self.assertEqual(model.input_shape, (None, None, None)) def test_adapt_doesnt_overwrite_input_shape(self): """Check that `.adapt()` doesn't change the `input_shape`.""" # Shape: (3, 1, 2) adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]], dtype=np.float32) layer = AddingPreprocessingLayer(input_shape=[1, 2]) layer.adapt(adapt_dataset) model = keras.Sequential([layer]) self.assertTrue(model.built) self.assertEqual(model.input_shape, (None, 1, 2)) class PreprocessingLayerV1Test(keras_parameterized.TestCase): def test_adapt_fails(self): """Test that calling adapt leads to a runtime error.""" input_dataset = {"foo": 0} with tf.Graph().as_default(): layer = AddingPreprocessingLayer() with self.assertRaisesRegex(RuntimeError, "`adapt` is only supported in tensorflow v2"): layer.adapt(input_dataset) if __name__ == "__main__": tf.test.main()
8,358
34.121849
80
py
keras
keras-master/keras/engine/data_adapter_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DataAdapter tests.""" import tensorflow.compat.v2 as tf import math from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import testing_utils from keras.engine import data_adapter from keras.utils import data_utils class DummyArrayLike: """Dummy array-like object.""" def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[key] @property def shape(self): return self.data.shape @property def dtype(self): return self.data.dtype def fail_on_convert(x, **kwargs): _ = x _ = kwargs raise TypeError('Cannot convert DummyArrayLike to a tensor') tf.register_tensor_conversion_function(DummyArrayLike, fail_on_convert) class DataAdapterTestBase(keras_parameterized.TestCase): def setUp(self): super(DataAdapterTestBase, self).setUp() self.batch_size = 5 self.numpy_input = np.zeros((50, 10)) self.numpy_target = np.ones(50) self.tensor_input = tf.constant(2.0, shape=(50, 10)) self.tensor_target = tf.ones((50,)) self.arraylike_input = DummyArrayLike(self.numpy_input) self.arraylike_target = DummyArrayLike(self.numpy_target) self.dataset_input = tf.data.Dataset.from_tensor_slices( (self.numpy_input, self.numpy_target)).shuffle(50).batch( self.batch_size) def generator(): while True: yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size)) self.generator_input = generator() self.iterator_input = data_utils.threadsafe_generator(generator)() self.sequence_input = TestSequence(batch_size=self.batch_size, feature_shape=10) self.text_input = [['abc']] self.bytes_input = [[b'abc']] self.model = keras.models.Sequential( [keras.layers.Dense(8, input_shape=(10,), activation='softmax')]) class TestSequence(data_utils.Sequence): def __init__(self, batch_size, feature_shape): self.batch_size = batch_size self.feature_shape = feature_shape def __getitem__(self, item): return (np.zeros((self.batch_size, self.feature_shape)), np.ones((self.batch_size,))) def __len__(self): return 10 class TensorLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(TensorLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.TensorLikeDataAdapter def test_can_handle_numpy(self): self.assertTrue(self.adapter_cls.can_handle(self.numpy_input)) self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) self.assertFalse(self.adapter_cls.can_handle(self.text_input)) self.assertFalse(self.adapter_cls.can_handle(self.bytes_input)) def test_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_batch_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=4) self.assertEqual(adapter.get_size(), 13) # 50/4 self.assertTrue(adapter.has_partial_batch()) self.assertEqual(adapter.partial_batch_size(), 2) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.numpy_input, self.numpy_target, batch_size=5) def test_can_handle_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input))) self.assertTrue( self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0])) self.assertTrue( self.adapter_cls.can_handle( pd.DataFrame(self.numpy_input), pd.DataFrame(self.numpy_input)[0])) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') input_a = keras.Input(shape=(3,), name='input_a') input_b = keras.Input(shape=(3,), name='input_b') input_c = keras.Input(shape=(1,), name='input_b') x = keras.layers.Dense(4, name='dense_1')(input_a) y = keras.layers.Dense(3, name='dense_2')(input_b) z = keras.layers.Dense(1, name='dense_3')(input_c) model_1 = keras.Model(inputs=input_a, outputs=x) model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y]) model_3 = keras.Model(inputs=input_c, outputs=z) model_1.compile(optimizer='rmsprop', loss='mse') model_2.compile(optimizer='rmsprop', loss='mse') model_3.compile(optimizer='rmsprop', loss='mse') input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) input_a_df = pd.DataFrame(input_a_np) input_b_df = pd.DataFrame(input_b_np) output_a_df = pd.DataFrame(np.random.random((10, 4))) output_b_df = pd.DataFrame(np.random.random((10, 3))) output_c_series = pd.DataFrame(np.random.random((10, 4)))[0] model_1.fit(input_a_df, output_a_df) model_2.fit([input_a_df, input_b_df], [output_a_df, output_b_df]) model_3.fit(input_a_df[[0]], output_c_series) model_1.fit([input_a_df], [output_a_df]) model_1.fit({'input_a': input_a_df}, output_a_df) model_2.fit({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) model_1.evaluate(input_a_df, output_a_df) model_2.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df]) model_3.evaluate(input_a_df[[0]], output_c_series) model_1.evaluate([input_a_df], [output_a_df]) model_1.evaluate({'input_a': input_a_df}, output_a_df) model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) # Verify predicting on pandas vs numpy returns the same result predict_1_pandas = model_1.predict(input_a_df) predict_2_pandas = model_2.predict([input_a_df, input_b_df]) predict_3_pandas = model_3.predict(input_a_df[0]) predict_1_numpy = model_1.predict(input_a_np) predict_2_numpy = model_2.predict([input_a_np, input_b_np]) predict_3_numpy = model_3.predict(np.asarray(input_a_df[0])) self.assertAllClose(predict_1_numpy, predict_1_pandas) self.assertAllClose(predict_2_numpy, predict_2_pandas) self.assertAllClose(predict_3_numpy, predict_3_pandas) # Extra ways to pass in dataframes model_1.predict([input_a_df]) model_1.predict({'input_a': input_a_df}) model_2.predict({'input_a': input_a_df, 'input_b': input_b_df}) def test_can_handle(self): self.assertTrue(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input)) self.assertFalse( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) self.assertFalse(self.adapter_cls.can_handle(self.text_input)) self.assertFalse(self.adapter_cls.can_handle(self.bytes_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.tensor_input, self.tensor_target, batch_size=5) def test_size(self): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_shuffle_correctness(self): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter).numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_batch_shuffle_correctness(self): num_samples = 100 batch_size = 6 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, and that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch remains contiguous for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class GenericArrayLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GenericArrayLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter def test_can_handle_some_numpy(self): self.assertTrue(self.adapter_cls.can_handle( self.arraylike_input)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) # Because adapters are mutually exclusive, don't handle cases # where all the data is numpy or an eagertensor self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) # But do handle mixes that include generic arraylike data self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.arraylike_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.numpy_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.tensor_target)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) self.assertFalse(self.adapter_cls.can_handle(self.text_input)) self.assertFalse(self.adapter_cls.can_handle(self.bytes_input)) def test_size(self): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.arraylike_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): # First verify that DummyArrayLike can't be converted to a Tensor with self.assertRaises(TypeError): tf.convert_to_tensor(self.arraylike_input) # Then train on the array like. # It should not be converted to a tensor directly (which would force it into # memory), only the sliced data should be converted. self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.predict(self.arraylike_input, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.numpy_target, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.numpy_target, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_tensor_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.tensor_target, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.tensor_target, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_shuffle_correctness(self): num_samples = 100 batch_size = 32 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter).numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_batch_shuffle_correctness(self): num_samples = 100 batch_size = 6 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, but that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch is shuffled contiguous data for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class DatasetAdapterTest(DataAdapterTestBase): def setUp(self): super(DatasetAdapterTest, self).setUp() self.adapter_cls = data_adapter.DatasetAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): dataset = self.adapter_cls(self.dataset_input).get_dataset() self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(dataset) def test_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.batch_size()) def test_partial_batch(self): adapter = self.adapter_cls(self.dataset_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.dataset_input, y=self.dataset_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegex(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input) class GeneratorDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GeneratorDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GeneratorDataAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertTrue(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) self.assertFalse(self.adapter_cls.can_handle(self.text_input)) self.assertFalse(self.adapter_cls.can_handle(self.bytes_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.generator_input, steps_per_epoch=10) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @testing_utils.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.generator_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.generator_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.generator_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.generator_input, y=self.generator_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegex(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls( self.generator_input, sample_weights=self.generator_input) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_not_shuffled(self): def generator(): for i in range(10): yield np.ones((1, 1)) * i adapter = self.adapter_cls(generator(), shuffle=True) for i, data in enumerate(adapter.get_dataset()): self.assertEqual(i, data[0].numpy().flatten()) def test_model_without_forward_pass(self): class MyModel(keras.Model): def train_step(self, data): return {'loss': 0.} def test_step(self, data): return {'loss': 0.} model = MyModel() model.compile('rmsprop') model.fit(self.generator_input, steps_per_epoch=5) out = model.evaluate(self.generator_input, steps=5) self.assertEqual(out, 0) class KerasSequenceAdapterTest(DataAdapterTestBase): def setUp(self): super(KerasSequenceAdapterTest, self).setUp() self.adapter_cls = data_adapter.KerasSequenceAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertTrue(self.adapter_cls.can_handle(self.sequence_input)) self.assertFalse(self.adapter_cls.can_handle(self.text_input)) self.assertFalse(self.adapter_cls.can_handle(self.bytes_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @testing_utils.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.get_size(), 10) def test_batch_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.sequence_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.sequence_input, y=self.sequence_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegex(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input) class DataHandlerTest(keras_parameterized.TestCase): def test_finite_dataset_with_steps_per_epoch(self): data = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1) # User can choose to only partially consume `Dataset`. data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=2) self.assertEqual(data_handler.inferred_steps, 2) self.assertFalse(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1], [2, 3]]) def test_finite_dataset_without_steps_per_epoch(self): data = tf.data.Dataset.from_tensor_slices([0, 1, 2]).batch(1) data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2) self.assertEqual(data_handler.inferred_steps, 3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]]) def test_finite_dataset_with_steps_per_epoch_exact_size(self): data = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1) # If user specifies exact size of `Dataset` as `steps_per_epoch`, # create a new iterator each epoch. data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=4) self.assertTrue(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]]) def test_infinite_dataset_with_steps_per_epoch(self): data = tf.data.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat() data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]]) def test_unknown_cardinality_dataset_with_steps_per_epoch(self): ds = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3, 4, 5, 6]) filtered_ds = ds.filter(lambda x: x < 4) self.assertEqual( tf.data.experimental.cardinality(filtered_ds).numpy(), tf.data.experimental.UNKNOWN_CARDINALITY) # User can choose to only partially consume `Dataset`. data_handler = data_adapter.DataHandler( filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2) self.assertFalse(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[0, 1], [2, 3]]) self.assertEqual(data_handler.inferred_steps, 2) def test_unknown_cardinality_dataset_without_steps_per_epoch(self): ds = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3, 4, 5, 6]) filtered_ds = ds.filter(lambda x: x < 4) self.assertEqual( tf.data.experimental.cardinality(filtered_ds).numpy(), tf.data.experimental.UNKNOWN_CARDINALITY) data_handler = data_adapter.DataHandler( filtered_ds, initial_epoch=0, epochs=2) self.assertEqual(data_handler.inferred_steps, None) self.assertTrue(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] with data_handler.catch_stop_iteration(): for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]]) self.assertEqual(data_handler.inferred_steps, 4) def test_insufficient_data(self): ds = tf.data.Dataset.from_tensor_slices([0, 1]) ds = ds.filter(lambda *args, **kwargs: True) data_handler = data_adapter.DataHandler( ds, initial_epoch=0, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): with data_handler.catch_stop_iteration(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertTrue(data_handler._insufficient_data) self.assertEqual(returned_data, [[0, 1]]) def test_numpy(self): x = np.array([0, 1, 2]) y = np.array([0, 2, 4]) sw = np.array([0, 4, 8]) data_handler = data_adapter.DataHandler( x=x, y=y, sample_weight=sw, batch_size=1, epochs=2) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[(0, 0, 0), (1, 2, 4), (2, 4, 8)], [(0, 0, 0), (1, 2, 4), (2, 4, 8)]]) def test_generator(self): def generator(): for _ in range(2): for step in range(3): yield (tf.convert_to_tensor([step]),) data_handler = data_adapter.DataHandler( generator(), epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_composite_tensor(self): st = tf.SparseTensor( indices=[[0, 0], [1, 0], [2, 0]], values=[0, 1, 2], dense_shape=[3, 1]) data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate( tf.nest.map_structure(tf.sparse.to_dense, returned_data)) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_iterator(self): def generator(): for _ in range(2): for step in range(3): yield (tf.convert_to_tensor([step]),) it = iter(tf.data.Dataset.from_generator( generator, output_types=('float32',))) data_handler = data_adapter.DataHandler(it, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_list_of_scalars(self): data_handler = data_adapter.DataHandler([[0], [1], [2]], epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_class_weight_user_errors(self): with self.assertRaisesRegex(ValueError, 'to be a dict with keys'): data_adapter.DataHandler( x=[[0], [1], [2]], y=[[2], [1], [0]], batch_size=1, sample_weight=[[1.], [2.], [4.]], class_weight={ 0: 0.5, 1: 1., 3: 1.5 # Skips class `2`. }) with self.assertRaisesRegex(ValueError, 'with a single output'): data_adapter.DataHandler( x=np.ones((10, 1)), y=[np.ones((10, 1)), np.zeros((10, 1))], batch_size=2, class_weight={ 0: 0.5, 1: 1., 2: 1.5 }) @parameterized.named_parameters(('numpy', True), ('dataset', False)) def test_single_x_input_no_tuple_wrapping(self, use_numpy): x = np.ones((10, 1)) if use_numpy: batch_size = 2 else: x = tf.data.Dataset.from_tensor_slices(x).batch(2) batch_size = None data_handler = data_adapter.DataHandler(x, batch_size=batch_size) for _, iterator in data_handler.enumerate_epochs(): for _ in data_handler.steps(): # Check that single x input is not wrapped in a tuple. self.assertIsInstance(next(iterator), tf.Tensor) class TestValidationSplit(keras_parameterized.TestCase): @parameterized.named_parameters(('numpy_arrays', True), ('tensors', False)) def test_validation_split_unshuffled(self, use_numpy): if use_numpy: x = np.array([0, 1, 2, 3, 4]) y = np.array([0, 2, 4, 6, 8]) sw = np.array([0, 4, 8, 12, 16]) else: x = tf.convert_to_tensor([0, 1, 2, 3, 4]) y = tf.convert_to_tensor([0, 2, 4, 6, 8]) sw = tf.convert_to_tensor([0, 4, 8, 12, 16]) (train_x, train_y, train_sw), (val_x, val_y, val_sw) = ( data_adapter.train_validation_split((x, y, sw), validation_split=0.2)) if use_numpy: train_x = tf.convert_to_tensor(train_x) train_y = tf.convert_to_tensor(train_y) train_sw = tf.convert_to_tensor(train_sw) val_x = tf.convert_to_tensor(val_x) val_y = tf.convert_to_tensor(val_y) val_sw = tf.convert_to_tensor(val_sw) self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3]) self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6]) self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12]) self.assertEqual(val_x.numpy().tolist(), [4]) self.assertEqual(val_y.numpy().tolist(), [8]) self.assertEqual(val_sw.numpy().tolist(), [16]) def test_validation_split_user_error(self): with self.assertRaisesRegex(ValueError, 'is only supported for Tensors'): data_adapter.train_validation_split( lambda: np.ones((10, 1)), validation_split=0.2) def test_validation_split_examples_too_few(self): with self.assertRaisesRegex(ValueError, 'not sufficient to split it'): data_adapter.train_validation_split( np.ones((1, 10)), validation_split=0.2) def test_validation_split_none(self): train_sw, val_sw = data_adapter.train_validation_split( None, validation_split=0.2) self.assertIsNone(train_sw) self.assertIsNone(val_sw) (_, train_sw), (_, val_sw) = data_adapter.train_validation_split( (np.ones((10, 1)), None), validation_split=0.2) self.assertIsNone(train_sw) self.assertIsNone(val_sw) class ListsOfScalarsDataAdapterTest(DataAdapterTestBase): def setUp(self): super(ListsOfScalarsDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.ListsOfScalarsDataAdapter def test_can_list_inputs(self): self.assertTrue(self.adapter_cls.can_handle(self.text_input)) self.assertTrue(self.adapter_cls.can_handle(self.bytes_input)) self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) self.assertFalse(self.adapter_cls.can_handle([])) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
43,822
38.267921
104
py
keras
keras-master/keras/engine/training_eager_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import metrics as metrics_module from keras import testing_utils from keras.optimizer_v2 import rmsprop class TrainingTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_dynamic_model_has_trainable_weights(self): if not tf.executing_eagerly(): # Only test Eager modes, as Graph mode is not relevant for dynamic models. return class DynamicModel(keras.Model): def __init__(self): super(DynamicModel, self).__init__(dynamic=True) self.dense = keras.layers.Dense( 1, kernel_initializer='zeros', bias_initializer='ones') def call(self, inputs): return self.dense(inputs) model = DynamicModel() model.compile( 'rmsprop', 'mae', run_eagerly=True) hist = model.fit(np.zeros((1, 1)), np.zeros((1, 1))) self.assertEqual(hist.history['loss'][-1], 1) self.assertEqual(len(model.trainable_weights), 2) loss = model.train_on_batch(np.zeros((1, 1)), np.zeros((1, 1))) # The loss must have been updated if the trainable weights are taken into # account during tracking. self.assertLess(loss, 1) @keras_parameterized.run_with_all_model_types(exclude_models='sequential') @keras_parameterized.run_all_keras_modes def test_model_methods_with_eager_tensors_multi_io(self): if not tf.executing_eagerly(): # Only test V2 Function and V2 Eager modes, as V1 Graph mode with # symbolic tensors has different requirements. return input_a = keras.layers.Input(shape=(3,), name='input_a') input_b = keras.layers.Input(shape=(3,), name='input_b') dense = keras.layers.Dense(4, name='dense') dropout = keras.layers.Dropout(0.5, name='dropout') model = testing_utils.get_multi_io_model( [input_a, dense], [input_b, dense, dropout]) optimizer = rmsprop.RMSprop(learning_rate=0.001) loss = 'mse' loss_weights = [1., 0.5] metrics = ['mae', metrics_module.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics, loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly(), sample_weight_mode=None) input_a = tf.zeros(shape=(10, 3)) input_b = tf.zeros(shape=(10, 3)) target_a = tf.zeros(shape=(10, 4)) target_b = tf.zeros(shape=(10, 4)) model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0) # Test: no shuffle. model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0, shuffle=False) # Test: validation data. model.fit([input_a, input_b], [target_a, target_b], epochs=1, batch_size=2, verbose=0, validation_data=([input_a, input_b], [target_a, target_b])) model.train_on_batch([input_a, input_b], [target_a, target_b]) model.predict([input_a, input_b], batch_size=5) model.evaluate([input_a, input_b], [target_a, target_b], batch_size=2, verbose=0) model.test_on_batch([input_a, input_b], [target_a, target_b]) # Test: mix np and tensors. input_b = np.zeros(shape=(10, 3)).astype('float32') target_b = np.zeros(shape=(10, 4)).astype('float32') model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0) model.fit([input_a, input_b], [target_a, target_b], epochs=1, batch_size=2, verbose=0, validation_data=([input_a, input_b], [target_a, target_b])) model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0, shuffle=False) model.train_on_batch([input_a, input_b], [target_a, target_b]) model.predict([input_a, input_b], batch_size=5) model.evaluate([input_a, input_b], [target_a, target_b], batch_size=2, verbose=0) model.test_on_batch([input_a, input_b], [target_a, target_b]) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_model_methods_with_eager_tensors_single_io(self): if not tf.executing_eagerly(): # Only test V2 Function and V2 Eager modes, as V1 Graph mode with # symbolic tensors has different requirements. return model = testing_utils.get_small_mlp(10, 4, 3) optimizer = rmsprop.RMSprop(learning_rate=0.001) loss = 'mse' metrics = ['mae', metrics_module.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = tf.zeros(shape=(10, 3)) targets = tf.zeros(shape=(10, 4)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0) model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False) model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0, validation_data=(inputs, targets)) model.evaluate(inputs, targets, batch_size=2, verbose=0) model.predict(inputs, batch_size=2) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) @keras_parameterized.run_with_all_model_types def test_model_fit_and_validation_with_missing_arg_errors(self): model = testing_utils.get_small_mlp(10, 4, 3) model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001), loss='mse', run_eagerly=True) x = tf.zeros(shape=(10, 3)) y = tf.zeros(shape=(10, 4)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat(10).batch(5) validation_dataset = tf.data.Dataset.from_tensor_slices( (x, y)).repeat().batch(5) # Infinite dataset. model.fit(dataset, epochs=1, verbose=0) # Step argument is required for infinite datasets. with self.assertRaises(ValueError): model.fit(dataset, steps_per_epoch=2, epochs=1, verbose=0, validation_data=validation_dataset) with self.assertRaises(ValueError): model.fit(dataset, steps_per_epoch=2, epochs=1, verbose=0, validation_data=validation_dataset) # TODO(b/120931266): Enable test on subclassed models after bug causing an # extra dimension to be added to predict outputs is fixed. @keras_parameterized.run_with_all_model_types(exclude_models='subclass') def test_generator_methods(self): model = testing_utils.get_small_mlp(10, 4, 3) optimizer = rmsprop.RMSprop(learning_rate=0.001) model.compile( optimizer, loss='mse', metrics=['mae', metrics_module.CategoricalAccuracy()], run_eagerly=True) x = np.random.random((10, 3)) y = np.random.random((10, 4)) def numpy_iterator(): while True: yield x, y model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1) model.evaluate_generator(numpy_iterator(), steps=3) def inference_numpy_iterator(): while True: yield x out = model.predict_generator(inference_numpy_iterator(), steps=3) self.assertEqual(out.shape, (30, 4)) class CorrectnessTest(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('', dict()), ('_clipvalue_inf', {'clipvalue': 999999}), ('_clipnorm_inf', {'clipnorm': 999999}), ]) def test_loss_correctness(self, optimizer_kwargs): # Test that training loss is the same in eager and graph # (by comparing it to a reference value in a deterministic case) layers = [ keras.layers.Dense(3, activation='relu', kernel_initializer='ones'), keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')] model = testing_utils.get_model_from_layers(layers, input_shape=(4,)) model.compile( loss='sparse_categorical_crossentropy', optimizer=rmsprop.RMSprop(learning_rate=0.001, **optimizer_kwargs), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((100, 4)) np.random.seed(123) y = np.random.randint(0, 1, size=(100, 1)) history = model.fit(x, y, epochs=1, batch_size=10) self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_loss_correctness_clipvalue_zero(self): # Test that training loss is the same in eager and graph # (by comparing it to a reference value in a deterministic case) # And confirm that setting clipvalue to zero stops all training layers = [ keras.layers.Dense(3, activation='relu', kernel_initializer='ones'), keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')] model = testing_utils.get_model_from_layers(layers, input_shape=(4,)) model.compile( loss='sparse_categorical_crossentropy', optimizer=rmsprop.RMSprop(learning_rate=0.001, clipvalue=0.0), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((100, 4)) np.random.seed(123) y = np.random.randint(0, 1, size=(100, 1)) history = model.fit(x, y, epochs=3, batch_size=10) self.assertAlmostEqual(history.history['loss'][-3], 0.6931, 4) self.assertAlmostEqual(history.history['loss'][-2], 0.6931, 4) self.assertAlmostEqual(history.history['loss'][-1], 0.6931, 4) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_loss_correctness_with_iterator(self): # Test that training loss is the same in eager and graph # (by comparing it to a reference value in a deterministic case) layers = [ keras.layers.Dense(3, activation='relu', kernel_initializer='ones'), keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')] model = testing_utils.get_model_from_layers(layers, input_shape=(4,)) model.compile( loss='sparse_categorical_crossentropy', optimizer=rmsprop.RMSprop(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((100, 4), dtype=np.float32) np.random.seed(123) y = np.random.randint(0, 1, size=(100, 1)) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) history = model.fit(dataset, epochs=1, steps_per_epoch=10) self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4) @parameterized.named_parameters([ ('_None', None, 0., 4.), ('_False', False, 4., 4.), ('_True', True, 0., 0.), ]) def test_nested_model_learning_phase(self, training, expected_training_loss, expected_validation_loss): """Tests that learning phase is correctly set in an intermediate layer.""" def _make_unregularized_model(): inputs = keras.Input((4,)) # Zero out activations when `training=True`. x = keras.layers.Dropout(1. - 1. / (1 << 24))(inputs) x = keras.layers.Dense( 10, activation='relu', trainable=False, bias_initializer='zeros', kernel_initializer='ones')( x) # Just sum together all the activations. outputs = keras.layers.Dense(3)(x) return keras.Model(inputs, outputs) def _regularize_model(unregularized_model): # Regularize the most recent activations of a post-dropout layer. sample_activations = unregularized_model.get_layer( index=-2).get_output_at(-1) regularization_loss = keras.backend.mean(sample_activations) unregularized_model.add_loss(regularization_loss) unregularized_model.add_metric( regularization_loss, aggregation='mean', name='regularization_loss') inputs = keras.Input(unregularized_model.inputs[0].shape[1:]) logits = unregularized_model(inputs, training=training) outputs = keras.activations.softmax(logits) model = keras.Model(inputs, outputs) return model # Make and compile models. model = _regularize_model(_make_unregularized_model()) model.compile('sgd', 'sparse_categorical_crossentropy') # Prepare fake data. x = np.ones((20, 4)).astype(np.float32) y = np.random.randint(0, 3, size=(20,)).astype(np.int64) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2) results = model.evaluate(dataset) evaluation_results = dict(zip(model.metrics_names, results)) # Rate of dropout depends on the learning phase. self.assertEqual(evaluation_results['regularization_loss'], expected_validation_loss) history = model.fit(dataset, epochs=2, validation_data=dataset).history self.assertAllEqual(history['regularization_loss'], [expected_training_loss] * 2) self.assertAllEqual(history['val_regularization_loss'], [expected_validation_loss] * 2) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
14,033
38.532394
80
py
keras
keras-master/keras/engine/base_layer.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access # pylint: disable=g-classes-have-attributes # pylint: disable=g-bad-import-order """Contains the base Layer class, from which all layers inherit.""" import tensorflow.compat.v2 as tf import collections import copy import functools import itertools import threading import warnings import weakref import numpy as np from google.protobuf import json_format from keras import backend from keras import constraints from keras import initializers from keras import regularizers from keras.engine import base_layer_utils from keras.engine import input_spec from keras.engine import keras_tensor from keras.engine import node as node_module from keras.mixed_precision import autocast_variable from keras.mixed_precision import loss_scale_optimizer from keras.mixed_precision import policy from keras.saving.saved_model import layer_serialization from keras.utils import generic_utils from keras.utils import layer_utils from keras.utils import object_identity from keras.utils import tf_inspect from keras.utils import tf_utils from keras.utils import traceback_utils from keras.utils import version_utils # A module that only depends on `keras.layers` import these from here. from keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import from keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import from tensorflow.python.platform import tf_logging from tensorflow.python.util.tf_export import get_canonical_name_for_symbol from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls # pylint: disable=g-inconsistent-quotes metrics_mod = generic_utils.LazyLoader( "metrics_mod", globals(), "keras.metrics") # pylint: enable=g-inconsistent-quotes # Prefix that is added to the TF op layer names. _TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_' # TODO(mdan): Should we have a single generic type for types that can be passed # to tf.cast? _AUTOCAST_TYPES = (tf.Tensor, tf.SparseTensor, tf.RaggedTensor) keras_layers_gauge = tf.__internal__.monitoring.BoolGauge( '/tensorflow/api/keras/layers', 'keras layers usage', 'method') keras_models_gauge = tf.__internal__.monitoring.BoolGauge( '/tensorflow/api/keras/models', 'keras model usage', 'method') keras_api_gauge = tf.__internal__.monitoring.BoolGauge( '/tensorflow/api/keras', 'keras api usage', 'method') keras_premade_model_gauge = tf.__internal__.monitoring.BoolGauge( '/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type') _is_name_scope_on_model_declaration_enabled = False @keras_export('keras.layers.Layer') class Layer(tf.Module, version_utils.LayerVersionSelector): """This is the class from which all layers inherit. A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves *computation*, defined in the `call()` method, and a *state* (weight variables), defined either in the constructor `__init__()` or in the `build()` method. Users will just instantiate a layer and then treat it as a callable. Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights. Can also be a `tf.keras.mixed_precision.Policy`, which allows the computation and weight dtype to differ. Default of `None` means to use `tf.keras.mixed_precision.global_policy()`, which is a float32 policy unless set to different value. dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Attributes: name: The name of the layer (string). dtype: The dtype of the layer's weights. variable_dtype: Alias of `dtype`. compute_dtype: The dtype of the layer's computations. Layers automatically cast inputs to this dtype which causes the computations and output to also be in this dtype. When mixed precision is used with a `tf.keras.mixed_precision.Policy`, this will be different than `variable_dtype`. dtype_policy: The layer's dtype policy. See the `tf.keras.mixed_precision.Policy` documentation for details. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean), i.e. whether its potentially-trainable weights should be returned as part of `layer.trainable_weights`. input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Defines custom layer attributes, and creates layer state variables that do not depend on input shapes, using `add_weight()`. * `build(self, input_shape)`: This method can be used to create weights that depend on the shape(s) of the input(s), using `add_weight()`. `__call__()` will automatically build the layer (if it has not been built yet) by calling `build()`. * `call(self, inputs, *args, **kwargs)`: Called in `__call__` after making sure `build()` has been called. `call()` performs the logic of applying the layer to the input tensors (which should be passed in as argument). Two reserved keyword arguments you can optionally use in `call()` are: - `training` (boolean, whether the call is in inference mode or training mode). See more details in [the layer/model subclassing guide]( https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_training_argument_in_the_call_method) - `mask` (boolean tensor encoding masked timesteps in the input, used in RNN layers). See more details in [the layer/model subclassing guide]( https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_mask_argument_in_the_call_method) A typical signature for this method is `call(self, inputs)`, and user could optionally add `training` and `mask` if the layer need them. `*args` and `**kwargs` is only useful for future extension when more input parameters are planned to be added. * `get_config(self)`: Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in `__init__`, then override `from_config(self)` as well. This method is used when saving the layer or a model that contains this layer. Examples: Here's a basic example: a layer with two variables, `w` and `b`, that returns `y = w . x + b`. It shows how to implement `build()` and `call()`. Variables set as attributes of a layer are tracked as weights of the layers (in `layer.weights`). ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): # Create the state of the layer (weights) w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(input_shape[-1], self.units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable( initial_value=b_init(shape=(self.units,), dtype='float32'), trainable=True) def call(self, inputs): # Defines the computation from inputs to outputs return tf.matmul(inputs, self.w) + self.b # Instantiates the layer. linear_layer = SimpleDense(4) # This will also call `build(input_shape)` and create the weights. y = linear_layer(tf.ones((2, 2))) assert len(linear_layer.weights) == 2 # These weights are trainable, so they're listed in `trainable_weights`: assert len(linear_layer.trainable_weights) == 2 ``` Note that the method `add_weight()` offers a shortcut to create weights: ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b ``` Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during `call()`. Here's a example layer that computes the running sum of its inputs: ```python class ComputeSum(Layer): def __init__(self, input_dim): super(ComputeSum, self).__init__() # Create a non-trainable weight. self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False) def call(self, inputs): self.total.assign_add(tf.reduce_sum(inputs, axis=0)) return self.total my_sum = ComputeSum(2) x = tf.ones((2, 2)) y = my_sum(x) print(y.numpy()) # [2. 2.] y = my_sum(x) print(y.numpy()) # [4. 4.] assert my_sum.weights == [my_sum.total] assert my_sum.non_trainable_weights == [my_sum.total] assert my_sum.trainable_weights == [] ``` For more information about creating layers, see the guide [Making new Layers and Models via subclassing]( https://www.tensorflow.org/guide/keras/custom_layers_and_models) """ # See tf.Module for the usage of this property. # The key for _obj_reference_counts_dict is a Trackable, which could be a # variable or layer etc. tf.Module._flatten will fail to flatten the key # since it is trying to convert Trackable to a string. This attribute can be # ignored even after the fix of nest lib, since the trackable object should # already been available as individual attributes. _obj_reference_counts_dict # just contains a copy of them. _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain( ('_obj_reference_counts_dict',), tf.Module._TF_MODULE_IGNORED_PROPERTIES )) # When loading from a SavedModel, Layers typically can be revived into a # generic Layer wrapper. Sometimes, however, layers may implement methods # that go beyond this wrapper, as in the case of PreprocessingLayers' # `adapt` method. When this is the case, layer implementers can override # must_restore_from_config to return True; layers with this property must # be restored into their actual objects (and will fail if the object is # not available to the restoration code). _must_restore_from_config = False def _get_cell_name(self): canonical_name = get_canonical_name_for_symbol( self.__class__, api_name='keras', add_prefix_to_v1_names=True) if canonical_name is not None: return 'tf.{}'.format(canonical_name) return self.__class__.__module__ + '.' + self.__class__.__name__ def _instrument_layer_creation(self): self._instrumented_keras_api = False self._instrumented_keras_layer_class = False self._instrumented_keras_model_class = False if not getattr(self, '_disable_keras_instrumentation', False): keras_api_gauge.get_cell('layer').set(True) self._instrumented_keras_api = True if getattr(self, '_is_model_for_instrumentation', False): keras_models_gauge.get_cell(self._get_cell_name()).set(True) self._instrumented_keras_model_class = True else: keras_layers_gauge.get_cell(self._get_cell_name()).set(True) self._instrumented_keras_layer_class = True else: # This is a legacy layer that has disabled instrumentation # as a native keras object. We still instrument this as # legacy usage. keras_api_gauge.get_cell('legacy_layer').set(True) @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__(self, trainable=True, name=None, dtype=None, dynamic=False, **kwargs): self._instrument_layer_creation() # These properties should be set by the user via keyword arguments. # note that 'dtype', 'input_shape' and 'batch_input_shape' # are only applicable to input layers: do not pass these keywords # to non-input layers. allowed_kwargs = { 'input_dim', 'input_shape', 'batch_input_shape', 'batch_size', 'weights', 'activity_regularizer', 'autocast', 'implementation', } # Validate optional keyword arguments. generic_utils.validate_kwargs(kwargs, allowed_kwargs) # Mutable properties # Indicates whether the layer's weights are updated during training # and whether the layer's updates are run during training. if not (isinstance(trainable, bool) or (isinstance(trainable, (tf.Tensor, tf.Variable)) and trainable.dtype is tf.bool)): raise TypeError( 'Expected `trainable` argument to be a boolean, ' f'but got: {trainable}') self._trainable = trainable # A stateful layer is a layer whose updates are run during inference too, # for instance stateful RNNs. self._stateful = False # Indicates whether `build` needs to be called upon layer call, to create # the layer's weights. self.built = False # Provides information about which inputs are compatible with the layer. self._input_spec = None # SavedModel-related attributes. # Record the build input shape for loading purposes. # TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is # submitted. self._build_input_shape = None self._saved_model_inputs_spec = None self._saved_model_arg_spec = None # `Layer.compute_mask` will be called at the end of `Layer.__call__` if # `Layer.compute_mask` is overridden, or if the `Layer` subclass sets # `self.supports_masking=True`. self._supports_masking = not generic_utils.is_default(self.compute_mask) self._init_set_name(name) self._activity_regularizer = regularizers.get( kwargs.pop('activity_regularizer', None)) self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) self._updates = [] # Object to store all thread local layer properties. self._thread_local = threading.local() # A list of zero-argument lambdas which return Tensors, used for variable # regularizers. self._callable_losses = [] # A list of symbolic Tensors containing activity regularizers and losses # manually added through `add_loss` in graph-building mode. self._losses = [] # A list of metric instances corresponding to the symbolic metric tensors # added using the `add_metric` API. self._metrics = [] # Ensures the same metric is not added multiple times in `MirroredStrategy`. self._metrics_lock = threading.Lock() # Both graph and subclassed networks have a dtype policy. For graph # networks, the policy's compute and variable dtypes are ignored. Such # networks only use the policy if it is a PolicyV1, in which case it uses # the PolicyV1's loss_scale (Policy does not have a loss_scale). For # subclassed networks, the compute and variable dtypes are used as like any # ordinary layer. self._set_dtype_policy(dtype) # Boolean indicating whether the layer automatically casts its inputs to the # layer's compute_dtype. self._autocast = kwargs.get('autocast', base_layer_utils.v2_dtype_behavior_enabled()) # Tracks `TrackableDataStructure`s, `Module`s, and `Layer`s. # Ordered by when the object was assigned as an attr. # Entries are unique. self._maybe_create_attribute('_self_tracked_trackables', []) # These lists will be filled via successive calls # to self._add_inbound_node(). # Used in symbolic mode only, only in conjunction with graph-networks self._inbound_nodes_value = [] self._outbound_nodes_value = [] self._init_call_fn_args() # Whether the `call` method can be used to build a TF graph without issues. # This attribute has no effect if the model is created using the Functional # API. Instead, `model.dynamic` is determined based on the internal layers. if not isinstance(dynamic, bool): raise TypeError( f'Expected `dynamic` argument to be a boolean, but got: {dynamic}') self._dynamic = dynamic # Manage input shape information if passed. if 'input_dim' in kwargs and 'input_shape' not in kwargs: # Backwards compatibility: alias 'input_dim' to 'input_shape'. kwargs['input_shape'] = (kwargs['input_dim'],) if 'input_shape' in kwargs or 'batch_input_shape' in kwargs: # In this case we will later create an input layer # to insert before the current layer if 'batch_input_shape' in kwargs: batch_input_shape = tuple(kwargs['batch_input_shape']) elif 'input_shape' in kwargs: if 'batch_size' in kwargs: batch_size = kwargs['batch_size'] else: batch_size = None batch_input_shape = (batch_size,) + tuple(kwargs['input_shape']) self._batch_input_shape = batch_input_shape # Manage initial weight values if passed. self._initial_weights = kwargs.get('weights', None) # Whether the layer will track any layers that is set as attribute on itself # as sub-layers, the weights from the sub-layers will be included in the # parent layer's variables() as well. # Default to True, which means auto tracking is turned on. Certain subclass # might want to turn it off, like Sequential model. self._auto_track_sub_layers = True # For backwards compat reasons, most built-in layers do not guarantee # That they will 100% preserve the structure of input args when saving # / loading configs. E.g. they may un-nest an arg that is # a list with one element. self._preserve_input_structure_in_config = False # Save outer name scope at layer declaration so that it is preserved at # the actual layer construction. self._outer_name_scope = tf.get_current_name_scope() @tf.__internal__.tracking.no_automatic_dependency_tracking @generic_utils.default def build(self, input_shape): """Creates the variables of the layer (optional, for subclass implementers). This is a method that implementers of subclasses of `Layer` or `Model` can override if they need a state-creation step in-between layer instantiation and layer call. This is typically used to create the weights of `Layer` subclasses. Args: input_shape: Instance of `TensorShape`, or list of instances of `TensorShape` if the layer expects a list of inputs (one instance per input). """ # Only record the build input shapes of overridden build methods. if not hasattr(self.build, '_is_default'): self._build_input_shape = input_shape self.built = True @doc_controls.for_subclass_implementers def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument """This is where the layer's logic lives. Note here that `call()` method in `tf.keras` is little bit different from `keras` API. In `keras` API, you can pass support masking for layers as additional arguments. Whereas `tf.keras` has `compute_mask()` method to support masking. Args: inputs: Input tensor, or dict/list/tuple of input tensors. The first positional `inputs` argument is subject to special rules: - `inputs` must be explicitly passed. A layer cannot have zero arguments, and `inputs` cannot be provided via the default value of a keyword argument. - NumPy array or Python scalar values in `inputs` get cast as tensors. - Keras mask metadata is only collected from `inputs`. - Layers are built (`build(input_shape)` method) using shape info from `inputs` only. - `input_spec` compatibility is only checked against `inputs`. - Mixed precision input casting is only applied to `inputs`. If a layer has tensor arguments in `*args` or `**kwargs`, their casting behavior in mixed precision should be handled manually. - The SavedModel input specification is generated using `inputs` only. - Integration with various ecosystem packages like TFMOT, TFLite, TF.js, etc is only supported for `inputs` and not for tensors in positional and keyword arguments. *args: Additional positional arguments. May contain tensors, although this is not recommended, for the reasons above. **kwargs: Additional keyword arguments. May contain tensors, although this is not recommended, for the reasons above. The following optional keyword arguments are reserved: - `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. - `mask`: Boolean input mask. If the layer's `call()` method takes a `mask` argument, its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support). Returns: A tensor or list/tuple of tensors. """ return inputs @doc_controls.for_subclass_implementers def _add_trackable(self, trackable_object, trainable): """Adds a Trackable object to this layer's state. Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean and variance). Returns: The TrackableWeightHandler used to track this object. """ if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler): handler = trackable_object else: handler = base_layer_utils.TrackableWeightHandler(trackable_object) if trainable: self._trainable_weights.append(handler) else: self._non_trainable_weights.append(handler) return handler @doc_controls.for_subclass_implementers def add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, use_resource=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.VariableAggregation.NONE, **kwargs): """Adds a new variable to the layer. Args: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to `self.dtype`. initializer: Initializer instance (callable). regularizer: Regularizer instance (callable). trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean and variance). Note that `trainable` cannot be `True` if `synchronization` is set to `ON_READ`. constraint: Constraint instance (callable). use_resource: Whether to use `ResourceVariable`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. **kwargs: Additional keyword arguments. Accepted values are `getter`, `collections`, `experimental_autocast` and `caching_device`. Returns: The variable created. Raises: ValueError: When giving unsupported dtype and no initializer or when trainable has been set to True with synchronization set as `ON_READ`. """ if shape is None: shape = () kwargs.pop('partitioner', None) # Ignored. # Validate optional keyword arguments. for kwarg in kwargs: if kwarg not in ['collections', 'experimental_autocast', 'caching_device', 'getter']: raise TypeError('Unknown keyword argument:', kwarg) collections_arg = kwargs.pop('collections', None) # 'experimental_autocast' can be set to False by the caller to indicate an # AutoCastVariable should never be created. autocast = kwargs.pop('experimental_autocast', True) # See the docstring for tf.Variable about the details for caching_device. caching_device = kwargs.pop('caching_device', None) if dtype is None: dtype = self.dtype or backend.floatx() dtype = tf.as_dtype(dtype) if self._dtype_policy.variable_dtype is None: # The policy is "_infer", so we infer the policy from the variable dtype. self._set_dtype_policy(policy.Policy(dtype.base_dtype.name)) initializer = initializers.get(initializer) regularizer = regularizers.get(regularizer) constraint = constraints.get(constraint) if synchronization == tf.VariableSynchronization.ON_READ: if trainable: raise ValueError( 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.') else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True # Initialize variable when no initializer provided if initializer is None: # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = initializers.get('glorot_uniform') # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = initializers.get('zeros') # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? elif 'getter' not in kwargs: # When `getter` is specified, it's possibly fine for `initializer` to be # None since it's up to the custom `getter` to raise error in case it # indeed needs `initializer`. raise ValueError('An initializer for variable %s of type %s is required' ' for layer %s' % (name, dtype.base_dtype, self.name)) getter = kwargs.pop('getter', base_layer_utils.make_variable) if (autocast and self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype and dtype.is_floating): old_getter = getter # Wrap variable constructor to return an AutoCastVariable. def getter(*args, **kwargs): # pylint: disable=function-redefined variable = old_getter(*args, **kwargs) return autocast_variable.create_autocast_variable(variable) # Also the caching_device does not work with the mixed precision API, # disable it if it is specified. # TODO(b/142020079): Reenable it once the bug is fixed. if caching_device is not None: tf_logging.warning( '`caching_device` does not work with mixed precision API. Ignoring ' 'user specified `caching_device`.') caching_device = None variable = self._add_variable_with_custom_getter( name=name, shape=shape, # TODO(allenl): a `make_variable` equivalent should be added as a # `Trackable` method. getter=getter, # Manage errors in Layer rather than Trackable. overwrite=True, initializer=initializer, dtype=dtype, constraint=constraint, trainable=trainable, use_resource=use_resource, collections=collections_arg, synchronization=synchronization, aggregation=aggregation, caching_device=caching_device) if regularizer is not None: # TODO(fchollet): in the future, this should be handled at the # level of variable creation, and weight regularization losses # should be variable attributes. name_in_scope = variable.name[:variable.name.find(':')] self._handle_weight_regularization(name_in_scope, variable, regularizer) if base_layer_utils.is_split_variable(variable): for v in variable: backend.track_variable(v) if trainable: self._trainable_weights.append(v) else: self._non_trainable_weights.append(v) else: backend.track_variable(variable) if trainable: self._trainable_weights.append(variable) else: self._non_trainable_weights.append(variable) return variable @generic_utils.default def get_config(self): """Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by `Network` (one layer of abstraction above). Note that `get_config()` does not guarantee to return a fresh copy of dict every time it is called. The callers should make a copy of the returned dict if they want to modify it. Returns: Python dictionary. """ all_args = tf_inspect.getfullargspec(self.__init__).args config = { 'name': self.name, 'trainable': self.trainable, } if hasattr(self, '_batch_input_shape'): config['batch_input_shape'] = self._batch_input_shape config['dtype'] = policy.serialize(self._dtype_policy) if hasattr(self, 'dynamic'): # Only include `dynamic` in the `config` if it is `True` if self.dynamic: config['dynamic'] = self.dynamic elif 'dynamic' in all_args: all_args.remove('dynamic') expected_args = config.keys() # Finds all arguments in the `__init__` that are not in the config: extra_args = [arg for arg in all_args if arg not in expected_args] # Check that either the only argument in the `__init__` is `self`, # or that `get_config` has been overridden: if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'): raise NotImplementedError(f'Layer {self.__class__.__name__} ' 'has arguments in `__init__)_` and ' 'therefore must override `get_config()`.') return config @classmethod def from_config(cls, config): """Creates a layer from its config. This method is the reverse of `get_config`, capable of instantiating the same layer from the config dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by `set_weights`). Args: config: A Python dictionary, typically the output of get_config. Returns: A layer instance. """ return cls(**config) def compute_output_shape(self, input_shape): """Computes the output shape of the layer. If the layer has not been built, this method will call `build` on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: An input shape tuple. """ if tf.executing_eagerly(): # In this case we build the model first in order to do shape inference. # This is acceptable because the framework only calls # `compute_output_shape` on shape values that the layer would later be # built for. It would however cause issues in case a user attempts to # use `compute_output_shape` manually with shapes that are incompatible # with the shape the Layer will be called on (these users will have to # implement `compute_output_shape` themselves). self._maybe_build(input_shape) graph_name = str(self.name) + '_scratch_graph' with tf.__internal__.FuncGraph(graph_name).as_default(): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) def _make_placeholder_like(shape): ph = backend.placeholder(shape=shape, dtype=self.dtype) ph._keras_mask = None return ph inputs = tf.nest.map_structure(_make_placeholder_like, input_shape) try: outputs = self(inputs, training=False) except TypeError as e: raise NotImplementedError( 'We could not automatically infer the static shape of the ' 'layer\'s output. Please implement the ' '`compute_output_shape` method on your layer (%s).' % self.__class__.__name__) from e return tf.nest.map_structure(lambda t: t.shape, outputs) raise NotImplementedError( 'Please run in eager mode or implement the `compute_output_shape` ' 'method on your layer (%s).' % self.__class__.__name__) @doc_controls.for_subclass_implementers def compute_output_signature(self, input_signature): """Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use `compute_output_shape`, and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object. """ def check_type_return_shape(s): if not isinstance(s, tf.TensorSpec): raise TypeError('Only TensorSpec signature types are supported, ' 'but saw signature entry: {}.'.format(s)) return s.shape input_shape = tf.nest.map_structure(check_type_return_shape, input_signature) output_shape = self.compute_output_shape(input_shape) dtype = self._compute_dtype if dtype is None: input_dtypes = [s.dtype for s in tf.nest.flatten(input_signature)] # Default behavior when self.dtype is None, is to use the first input's # dtype. dtype = input_dtypes[0] return tf.nest.map_structure( lambda s: tf.TensorSpec(dtype=dtype, shape=s), output_shape) def _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs): if self.dynamic: # We will use static shape inference to return symbolic tensors # matching the specifications of the layer outputs. # Since `self.dynamic` is True, we will never attempt to # run the underlying TF graph (which is disconnected). # TODO(fchollet): consider py_func as an alternative, which # would enable us to run the underlying graph if needed. input_signature = tf.nest.map_structure( lambda x: tf.TensorSpec(shape=x.shape, dtype=x.dtype), inputs) output_signature = self.compute_output_signature(input_signature) return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature) else: return self._infer_output_signature(inputs, args, kwargs, input_masks) def _infer_output_signature(self, inputs, args, kwargs, input_masks): """Call the layer on input KerasTensors and returns output KerasTensors.""" call_fn = self.call # Wrapping `call` function in autograph to allow for dynamic control # flow and control dependencies in call. We are limiting this to # subclassed layers as autograph is strictly needed only for # subclassed layers and models. # tf_convert will respect the value of autograph setting in the # enclosing tf.function, if any. if (base_layer_utils.is_subclassed(self) and not base_layer_utils.from_saved_model(self)): call_fn = tf.__internal__.autograph.tf_convert( self.call, tf.__internal__.autograph.control_status_ctx()) call_fn = traceback_utils.inject_argument_info_in_traceback( call_fn, object_name=f'layer "{self.name}" (type {self.__class__.__name__})') # We enter a scratch graph and build placeholder inputs inside of it that # match the input args. # We then call the layer inside of the scratch graph to identify the # output signatures, then we build KerasTensors corresponding to those # outputs. scratch_graph = tf.__internal__.FuncGraph(str(self.name) + '_scratch_graph') with scratch_graph.as_default(): inputs = tf.nest.map_structure( keras_tensor.keras_tensor_to_placeholder, inputs) args = tf.nest.map_structure( keras_tensor.keras_tensor_to_placeholder, args) kwargs = tf.nest.map_structure( keras_tensor.keras_tensor_to_placeholder, kwargs) input_masks = tf.nest.map_structure( keras_tensor.keras_tensor_to_placeholder, input_masks) with backend.name_scope(self._name_scope()): # pylint: disable=not-callable with autocast_variable.enable_auto_cast_variables( self._compute_dtype_object): # Build layer if applicable (if the `build` method has been # overridden). # TODO(kaftan): do we maybe_build here, or have we already done it? self._maybe_build(inputs) inputs = self._maybe_cast_inputs(inputs) outputs = call_fn(inputs, *args, **kwargs) self._handle_activity_regularization(inputs, outputs) self._set_mask_metadata(inputs, outputs, input_masks, build_graph=False) outputs = tf.nest.map_structure( keras_tensor.keras_tensor_from_tensor, outputs) if hasattr(self, '_set_inputs') and not self.inputs: # TODO(kaftan): figure out if we need to do this at all # Subclassed network: explicitly set metadata normally set by # a call to self._set_inputs(). self._set_inputs(inputs, outputs) del scratch_graph return outputs @generic_utils.default def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument """Computes an output mask tensor. Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer). """ if not self._supports_masking: if any(m is not None for m in tf.nest.flatten(mask)): raise TypeError('Layer ' + self.name + ' does not support masking, ' 'but was passed an input_mask: ' + str(mask)) # masking not explicitly supported: return None as mask. return None # if masking is explicitly supported, by default # carry over the input mask return mask @traceback_utils.filter_traceback def __call__(self, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. Args: *args: Positional arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`. Returns: Output tensor(s). Note: - The following optional keyword arguments are reserved for specific uses: * `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. * `mask`: Boolean input mask. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support. - If the layer is not built, the method will call `build`. Raises: ValueError: if the layer's `call` method returns None (an invalid value). RuntimeError: if `super().__init__()` was not called in the constructor. """ if not hasattr(self, '_thread_local'): raise RuntimeError( 'You must call `super().__init__()` in the layer constructor.') # `inputs` (the first arg in the method spec) is special cased in # layer call due to historical reasons. # This special casing currently takes the form of: # - 'inputs' must be explicitly passed. A layer cannot have zero arguments, # and inputs cannot have been provided via the default value of a kwarg. # - numpy/scalar values in `inputs` get converted to tensors # - implicit masks / mask metadata are only collected from 'inputs` # - Layers are built using shape info from 'inputs' only # - input_spec compatibility is only checked against `inputs` # - mixed precision casting (autocast) is only applied to `inputs`, # not to any other argument. inputs, args, kwargs = self._split_out_first_arg(args, kwargs) input_list = tf.nest.flatten(inputs) # Functional Model construction mode is invoked when `Layer`s are called on # symbolic `KerasTensor`s, i.e.: # >> inputs = tf.keras.Input(10) # >> outputs = MyLayer()(inputs) # Functional construction mode. # >> model = tf.keras.Model(inputs, outputs) if _in_functional_construction_mode(self, inputs, args, kwargs, input_list): return self._functional_construction_call(inputs, args, kwargs, input_list) # Maintains info about the `Layer.call` stack. call_context = base_layer_utils.call_context() # Accept NumPy and scalar inputs by converting to Tensors. if any(isinstance(x, ( tf.Tensor, np.ndarray, float, int)) for x in input_list): inputs = tf.nest.map_structure(_convert_numpy_or_python_types, inputs) input_list = tf.nest.flatten(inputs) # Handle `mask` propagation from previous layer to current layer. Masks can # be propagated explicitly via the `mask` argument, or implicitly via # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed # explicitly take priority. input_masks, mask_is_implicit = self._get_input_masks( inputs, input_list, args, kwargs) if self._expects_mask_arg and mask_is_implicit: kwargs['mask'] = input_masks # Training mode for `Layer.call` is set via (in order of priority): # (1) The `training` argument passed to this `Layer.call`, if it is not None # (2) The training mode of an outer `Layer.call`. # (3) The default mode set by `tf.keras.backend.set_learning_phase` (if set) # (4) Any non-None default value for `training` specified in the call # signature # (5) False (treating the layer as if it's in inference) args, kwargs, training_mode = self._set_training_mode( args, kwargs, call_context) # Losses are cleared for all sublayers on the outermost `Layer.call`. # Losses are not cleared on inner `Layer.call`s, because sublayers can be # called multiple times. if not call_context.in_call: self._clear_losses() eager = tf.executing_eagerly() with call_context.enter( layer=self, inputs=inputs, build_graph=not eager, training=training_mode): input_spec.assert_input_compatibility(self.input_spec, inputs, self.name) if eager: call_fn = self.call name_scope = self._name else: name_scope = self._name_scope() # Avoid autoincrementing. # pylint: disable=not-callable call_fn = self._autographed_call() call_fn = traceback_utils.inject_argument_info_in_traceback( call_fn, object_name=f'layer "{self.name}" (type {self.__class__.__name__})') with tf.name_scope(name_scope): if not self.built: self._maybe_build(inputs) if self._autocast: inputs = self._maybe_cast_inputs(inputs, input_list) with autocast_variable.enable_auto_cast_variables( self._compute_dtype_object): outputs = call_fn(inputs, *args, **kwargs) if self._activity_regularizer: self._handle_activity_regularization(inputs, outputs) if self._supports_masking: self._set_mask_metadata(inputs, outputs, input_masks, not eager) if self._saved_model_inputs_spec is None: self._set_save_spec(inputs, args, kwargs) return outputs def _functional_construction_call(self, inputs, args, kwargs, input_list): call_context = base_layer_utils.call_context() # Accept NumPy and scalar inputs by converting to Tensors. if any(isinstance(x, ( tf.Tensor, np.ndarray, float, int)) for x in input_list): def _convert_non_tensor(x): # Don't call `ops.convert_to_tensor` on all `inputs` because # `SparseTensors` can't be converted to `Tensor`. if isinstance(x, (tf.Tensor, np.ndarray, float, int)): return tf.convert_to_tensor(x) return x inputs = tf.nest.map_structure(_convert_non_tensor, inputs) input_list = tf.nest.flatten(inputs) # Handle `mask` propagation from previous layer to current layer. Masks can # be propagated explicitly via the `mask` argument, or implicitly via # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed # explicitly take priority. mask_arg_passed_by_framework = False input_masks, mask_is_implicit = self._get_input_masks( inputs, input_list, args, kwargs) if self._expects_mask_arg and mask_is_implicit: kwargs['mask'] = input_masks mask_arg_passed_by_framework = True # If `training` argument is None or not explicitly passed, # propagate `training` value from this layer's calling layer. training_value = None training_arg_passed_by_framework = False # Priority 1: `training` was explicitly passed a non-None value. if self._call_arg_was_passed('training', args, kwargs): training_value = self._get_call_arg_value('training', args, kwargs) if not self._expects_training_arg: kwargs.pop('training') if training_value is None: # Priority 2: `training` was passed to a parent layer. if call_context.training is not None: training_value = call_context.training # Priority 3: `learning_phase()` has been set. elif backend.global_learning_phase_is_set(): training_value = backend.learning_phase() # Force the training_value to be bool type which matches to the contract # for layer/model call args. if tf.is_tensor(training_value): training_value = tf.cast(training_value, tf.bool) else: training_value = bool(training_value) # Priority 4: trace layer with the default training argument specified # in the `call` signature (or in inference mode if the `call` signature # specifies no non-None default). else: training_value = self._default_training_arg # In cases (2), (3), (4) the training argument is passed automatically # by the framework, and will not be hard-coded into the model. if self._expects_training_arg: args, kwargs = self._set_call_arg_value('training', training_value, args, kwargs) training_arg_passed_by_framework = True with call_context.enter( layer=self, inputs=inputs, build_graph=True, training=training_value): # Check input assumptions set after layer building, e.g. input shape. outputs = self._keras_tensor_symbolic_call( inputs, input_masks, args, kwargs) if outputs is None: raise ValueError('A layer\'s `call` method should return a ' 'Tensor or a list of Tensors, not None ' '(layer: ' + self.name + ').') if training_arg_passed_by_framework: args, kwargs = self._set_call_arg_value( 'training', None, args, kwargs, pop_kwarg_if_none=True) if mask_arg_passed_by_framework: kwargs.pop('mask') # Node connectivity does not special-case the first argument. outputs = self._set_connectivity_metadata((inputs,) + args, kwargs, outputs) return outputs def _set_training_mode(self, args, kwargs, call_context): training_mode = None if self._expects_training_arg: # (1) `training` was passed to this `Layer.call`. if self._call_arg_was_passed('training', args, kwargs): training_mode = self._get_call_arg_value('training', args, kwargs) # If no `training` arg was passed, or `None` was explicitly passed, # the framework will make a decision about the training mode is. if training_mode is None: call_ctx_training = call_context.training # (2) `training` mode is inferred from an outer `Layer.call`. if call_ctx_training is not None: training_mode = call_ctx_training # (3) User set `tf.keras.backend.set_learning_phase`. elif backend.global_learning_phase_is_set(): training_mode = backend.learning_phase() # Ensure value is a `bool` or `tf.bool`. if isinstance(training_mode, bool): pass elif tf.is_tensor(training_mode): training_mode = tf.cast(training_mode, tf.bool) else: training_mode = bool(training_mode) # (4) We default to using `call`'s default value for `training`, # or treating the layer as if it is in inference if no non-None default # is specified in the `call` signature. else: training_mode = self._default_training_arg # For case (2), (3), (4) `training` arg is passed by framework. args, kwargs = self._set_call_arg_value('training', training_mode, args, kwargs) else: if 'training' in kwargs: # `training` was passed to this `Layer` but is not needed for # `Layer.call`. It will set the default mode for inner `Layer.call`s. training_mode = kwargs.pop('training') else: # Grab the current `training` mode from any outer `Layer.call`. training_mode = call_context.training return args, kwargs, training_mode def _autographed_call(self): # Wrapping `call` function in autograph to allow for dynamic control # flow and control dependencies in call. We are limiting this to # subclassed layers as autograph is strictly needed only for # subclassed layers and models. # tf_convert will respect the value of autograph setting in the # enclosing tf.function, if any. if (base_layer_utils.is_subclassed(self) and not base_layer_utils.from_saved_model(self)): return tf.__internal__.autograph.tf_convert( self.call, tf.__internal__.autograph.control_status_ctx()) else: return self.call @property def dtype(self): """The dtype of the layer weights. This is equivalent to `Layer.dtype_policy.variable_dtype`. Unless mixed precision is used, this is the same as `Layer.compute_dtype`, the dtype of the layer's computations. """ return self._dtype_policy.variable_dtype @property def name(self): """Name of the layer (string), set in the constructor.""" return self._name @property def supports_masking(self): """Whether this layer supports computing a mask using `compute_mask`.""" return self._supports_masking @supports_masking.setter def supports_masking(self, value): self._supports_masking = value @property def dynamic(self): """Whether the layer is dynamic (eager-only); set in the constructor.""" return any(layer._dynamic for layer in self._flatten_layers()) @property @doc_controls.do_not_doc_inheritable def stateful(self): return any(layer._stateful for layer in self._flatten_layers()) @stateful.setter def stateful(self, value): self._stateful = value @property def trainable(self): return self._trainable @trainable.setter def trainable(self, value): """Sets trainable attribute for the layer and its sublayers. When this value is changed during training (e.g. with a `tf.keras.callbacks.Callback`) you need to call the parent `tf.keras.Model.make_train_function` with `force=True` in order to recompile the training graph. Args: value: Boolean with the desired state for the layer's trainable attribute. """ for layer in self._flatten_layers(): layer._trainable = value @property def activity_regularizer(self): """Optional regularizer function for the output of this layer.""" return self._activity_regularizer @activity_regularizer.setter def activity_regularizer(self, regularizer): """Optional regularizer function for the output of this layer.""" self._activity_regularizer = regularizer @property def input_spec(self): """`InputSpec` instance(s) describing the input format for this layer. When you create a layer subclass, you can set `self.input_spec` to enable the layer to run input compatibility checks when it is called. Consider a `Conv2D` layer: it can only be called on a single input tensor of rank 4. As such, you can set, in `__init__()`: ```python self.input_spec = tf.keras.layers.InputSpec(ndim=4) ``` Now, if you try to call the layer on an input that isn't rank 4 (for instance, an input of shape `(2,)`, it will raise a nicely-formatted error: ``` ValueError: Input 0 of layer conv2d is incompatible with the layer: expected ndim=4, found ndim=1. Full shape received: [2] ``` Input checks that can be specified via `input_spec` include: - Structure (e.g. a single input, a list of 2 inputs, etc) - Shape - Rank (ndim) - Dtype For more information, see `tf.keras.layers.InputSpec`. Returns: A `tf.keras.layers.InputSpec` instance, or nested structure thereof. """ return self._input_spec @input_spec.setter # Must be decorated to prevent tracking, since the input_spec can be nested # InputSpec objects. @tf.__internal__.tracking.no_automatic_dependency_tracking def input_spec(self, value): for v in tf.nest.flatten(value): if v is not None and not isinstance(v, InputSpec): raise TypeError('Layer input_spec must be an instance of InputSpec. ' 'Got: {}'.format(v)) self._input_spec = value @property def trainable_weights(self): """List of all trainable weights tracked by this layer. Trainable weights are updated via gradient descent during training. Returns: A list of trainable variables. """ if self.trainable: children_weights = self._gather_children_attribute('trainable_variables') return self._dedup_weights(self._trainable_weights + children_weights) else: return [] @property def non_trainable_weights(self): """List of all non-trainable weights tracked by this layer. Non-trainable weights are *not* updated during training. They are expected to be updated manually in `call()`. Returns: A list of non-trainable variables. """ if self.trainable: children_weights = self._gather_children_attribute( 'non_trainable_variables') non_trainable_weights = self._non_trainable_weights + children_weights else: children_weights = self._gather_children_attribute('variables') non_trainable_weights = ( self._trainable_weights + self._non_trainable_weights + children_weights) return self._dedup_weights(non_trainable_weights) @property def weights(self): """Returns the list of all layer variables/weights. Returns: A list of variables. """ return self.trainable_weights + self.non_trainable_weights @property @doc_controls.do_not_generate_docs def updates(self): warnings.warn('`layer.updates` will be removed in a future version. ' 'This property should not be used in TensorFlow 2.0, ' 'as `updates` are applied automatically.') return [] @property def losses(self): """List of losses added using the `add_loss()` API. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Examples: >>> class MyLayer(tf.keras.layers.Layer): ... def call(self, inputs): ... self.add_loss(tf.abs(tf.reduce_mean(inputs))) ... return inputs >>> l = MyLayer() >>> l(np.ones((10, 1))) >>> l.losses [1.0] >>> inputs = tf.keras.Input(shape=(10,)) >>> x = tf.keras.layers.Dense(10)(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Activity regularization. >>> len(model.losses) 0 >>> model.add_loss(tf.abs(tf.reduce_mean(x))) >>> len(model.losses) 1 >>> inputs = tf.keras.Input(shape=(10,)) >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones') >>> x = d(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Weight regularization. >>> model.add_loss(lambda: tf.reduce_mean(d.kernel)) >>> model.losses [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>] Returns: A list of tensors. """ collected_losses = [] for layer in self._flatten_layers(): # If any eager losses are present, we assume the model to be part of an # eager training loop (either a custom one or the one used when # `run_eagerly=True`) and so we always return just the eager losses. if layer._eager_losses: # Filter placeholder losses that may have been added by revived layers. # (see base_layer_utils for details). if (layer._eager_losses[0] is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER): collected_losses.extend(layer._eager_losses) else: collected_losses.extend(layer._losses) for regularizer in layer._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses def add_loss(self, losses, **kwargs): """Add loss tensor(s), potentially dependent on layer inputs. Some losses (for instance, activity regularization losses) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.losses` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. This method can be used inside a subclassed layer or model's `call` function, in which case `losses` should be a Tensor or list of Tensors. Example: ```python class MyLayer(tf.keras.layers.Layer): def call(self, inputs): self.add_loss(tf.abs(tf.reduce_mean(inputs))) return inputs ``` This method can also be called directly on a Functional Model during construction. In this case, any loss Tensors passed to this Model must be symbolic and be able to be traced back to the model's `Input`s. These losses become part of the model's topology and are tracked in `get_config`. Example: ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) # Activity regularization. model.add_loss(tf.abs(tf.reduce_mean(x))) ``` If this is not the case for your loss (if, for example, your loss references a `Variable` of one of the model's layers), you can wrap your loss in a zero-argument lambda. These losses are not tracked as part of the model's topology since they can't be serialized. Example: ```python inputs = tf.keras.Input(shape=(10,)) d = tf.keras.layers.Dense(10) x = d(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) # Weight regularization. model.add_loss(lambda: tf.reduce_mean(d.kernel)) ``` Args: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. **kwargs: Additional keyword arguments for backward compatibility. Accepted values: inputs - Deprecated, will be automatically inferred. """ kwargs.pop('inputs', None) if kwargs: raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),)) def _tag_callable(loss): """Tags callable loss tensor as `_unconditional_loss`.""" if callable(loss): # We run the loss without autocasting, as regularizers are often # numerically unstable in float16. with autocast_variable.enable_auto_cast_variables(None): loss = loss() if loss is None: return None # Will be filtered out when computing the .losses property if not tf.is_tensor(loss): loss = tf.convert_to_tensor( loss, dtype=backend.floatx()) loss._unconditional_loss = True # pylint: disable=protected-access return loss losses = tf.nest.flatten(losses) callable_losses = [] eager_losses = [] symbolic_losses = [] for loss in losses: if callable(loss): callable_losses.append(functools.partial(_tag_callable, loss)) continue if loss is None: continue if not tf.is_tensor(loss) and not isinstance( loss, keras_tensor.KerasTensor): loss = tf.convert_to_tensor( loss, dtype=backend.floatx()) # TF Functions should take the eager path. if ((tf_utils.is_symbolic_tensor(loss) or isinstance(loss, keras_tensor.KerasTensor)) and not base_layer_utils.is_in_tf_function()): symbolic_losses.append(loss) elif tf.is_tensor(loss): eager_losses.append(loss) self._callable_losses.extend(callable_losses) in_call_context = base_layer_utils.call_context().in_call if eager_losses and not in_call_context: raise ValueError( 'Expected a symbolic Tensors or a callable for the loss value. ' 'Please wrap your loss computation in a zero argument `lambda`.') self._eager_losses.extend(eager_losses) for symbolic_loss in symbolic_losses: if getattr(self, '_is_graph_network', False): self._graph_network_add_loss(symbolic_loss) else: # Possible a loss was added in a Layer's `build`. self._losses.append(symbolic_loss) def _clear_losses(self): """Used every step in eager to reset losses.""" # Set to thread local directly to avoid Layer.__setattr__ overhead. if not getattr(self, '_self_tracked_trackables', None): # Fast path for single Layer. self._thread_local._eager_losses = [] else: for layer in self._flatten_layers(): layer._thread_local._eager_losses = [] @property def metrics(self): """List of metrics added using the `add_metric()` API. Example: >>> input = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2) >>> output = d(input) >>> d.add_metric(tf.reduce_max(output), name='max') >>> d.add_metric(tf.reduce_min(output), name='min') >>> [m.name for m in d.metrics] ['max', 'min'] Returns: A list of `Metric` objects. """ collected_metrics = [] for layer in self._flatten_layers(): if not hasattr(layer, '_metrics_lock'): continue with layer._metrics_lock: collected_metrics.extend(layer._metrics) return collected_metrics def add_metric(self, value, name=None, **kwargs): """Adds metric tensor to the layer. This method can be used inside the `call()` method of a subclassed layer or model. ```python class MyMetricLayer(tf.keras.layers.Layer): def __init__(self): super(MyMetricLayer, self).__init__(name='my_metric_layer') self.mean = tf.keras.metrics.Mean(name='metric_1') def call(self, inputs): self.add_metric(self.mean(inputs)) self.add_metric(tf.reduce_sum(inputs), name='metric_2') return inputs ``` This method can also be called directly on a Functional Model during construction. In this case, any tensor passed to this Model must be symbolic and be able to be traced back to the model's `Input`s. These metrics become part of the model's topology and are tracked when you save the model via `save()`. ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) model.add_metric(math_ops.reduce_sum(x), name='metric_1') ``` Note: Calling `add_metric()` with the result of a metric object on a Functional Model, as shown in the example below, is not supported. This is because we cannot trace the metric result tensor back to the model's inputs. ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1') ``` Args: value: Metric tensor. name: String metric name. **kwargs: Additional keyword arguments for backward compatibility. Accepted values: `aggregation` - When the `value` tensor provided is not the result of calling a `keras.Metric` instance, it will be aggregated by default using a `keras.Metric.Mean`. """ kwargs_keys = list(kwargs.keys()) if (len(kwargs_keys) > 1 or (len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')): raise TypeError('Unknown keyword arguments: ', str(kwargs.keys())) from_metric_obj = hasattr(value, '_metric_obj') is_symbolic = isinstance(value, keras_tensor.KerasTensor) in_call_context = base_layer_utils.call_context().in_call if name is None and not from_metric_obj: # Eg. `self.add_metric(math_ops.reduce_sum(x))` # In eager mode, we use metric name to lookup a metric. Without a name, # a new Mean metric wrapper will be created on every model/layer call. # So, we raise an error when no name is provided. # We will do the same for symbolic mode for consistency although a name # will be generated if no name is provided. # We will not raise this error in the foll use case for the sake of # consistency as name in provided in the metric constructor. # mean = metrics.Mean(name='my_metric') # model.add_metric(mean(outputs)) raise ValueError('Please provide a name for your metric like ' '`self.add_metric(tf.reduce_sum(inputs), ' 'name=\'mean_activation\')`') elif from_metric_obj: name = value._metric_obj.name if not in_call_context and not is_symbolic: raise ValueError('Expected a symbolic Tensor for the metric value, ' 'received: ' + str(value)) # If a metric was added in a Layer's `call` or `build`. if in_call_context or not getattr(self, '_is_graph_network', False): # TF Function path should take the eager path. # If the given metric is available in `metrics` list we just update state # on it, otherwise we create a new metric instance and # add it to the `metrics` list. metric_obj = getattr(value, '_metric_obj', None) # Tensors that come from a Metric object already updated the Metric state. should_update_state = not metric_obj name = metric_obj.name if metric_obj else name with self._metrics_lock: match = self._get_existing_metric(name) if match: metric_obj = match elif metric_obj: self._metrics.append(metric_obj) else: # Build the metric object with the value's dtype if it defines one metric_obj = metrics_mod.Mean( name=name, dtype=getattr(value, 'dtype', None)) self._metrics.append(metric_obj) if should_update_state: metric_obj(value) else: if from_metric_obj: raise ValueError('Using the result of calling a `Metric` object ' 'when calling `add_metric` on a Functional ' 'Model is not supported. Please pass the ' 'Tensor to monitor directly.') # Insert layers into the Keras Graph Network. aggregation = None if from_metric_obj else 'mean' self._graph_network_add_metric(value, aggregation, name) @doc_controls.do_not_doc_inheritable def add_update(self, updates, inputs=None): """Add update op(s), potentially dependent on layer inputs. Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.updates` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution). Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting `trainable=False` on this Layer, when executing in Eager mode. inputs: Deprecated, will be automatically inferred. """ if inputs is not None: tf_logging.warning( '`add_update` `inputs` kwarg has been deprecated. You no longer need ' 'to pass a value to `inputs` as it is being automatically inferred.') call_context = base_layer_utils.call_context() # No need to run updates during Functional API construction. if call_context.in_keras_graph: return # Callable updates are disabled by setting `trainable=False`. if not call_context.frozen: for update in tf.nest.flatten(updates): if callable(update): update() # pylint: disable=not-callable def set_weights(self, weights): """Sets the weights of the layer, from NumPy arrays. The weights of a layer represent the state of the layer. This function sets the weight values from numpy arrays. The weight values should be passed in the order they are created by the layer. Note that the layer's weights must be instantiated before calling this function, by calling the layer. For example, a `Dense` layer returns a list of two values: the kernel matrix and the bias vector. These can be used to set the weights of another `Dense` layer: >>> layer_a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) >>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]])) >>> layer_a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] >>> layer_b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) >>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]])) >>> layer_b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] >>> layer_b.set_weights(layer_a.get_weights()) >>> layer_b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] Args: weights: a list of NumPy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the layer (i.e. it should match the output of `get_weights`). Raises: ValueError: If the provided weights list does not match the layer's specifications. """ params = self.weights expected_num_weights = 0 for param in params: if isinstance(param, base_layer_utils.TrackableWeightHandler): expected_num_weights += param.num_tensors else: expected_num_weights += 1 if expected_num_weights != len(weights): raise ValueError( 'You called `set_weights(weights)` on layer "%s" ' 'with a weight list of length %s, but the layer was ' 'expecting %s weights. Provided weights: %s...' % (self.name, len(weights), expected_num_weights, str(weights)[:50])) weight_index = 0 weight_value_tuples = [] for param in params: if isinstance(param, base_layer_utils.TrackableWeightHandler): num_tensors = param.num_tensors tensors = weights[weight_index:weight_index + num_tensors] param.set_weights(tensors) weight_index += num_tensors else: weight = weights[weight_index] weight_shape = weight.shape if hasattr(weight, 'shape') else () ref_shape = param.shape if not ref_shape.is_compatible_with(weight_shape): raise ValueError( 'Layer weight shape %s not compatible with provided weight ' 'shape %s' % (ref_shape, weight_shape)) weight_value_tuples.append((param, weight)) weight_index += 1 backend.batch_set_value(weight_value_tuples) # Perform any layer defined finalization of the layer state. for layer in self._flatten_layers(): layer.finalize_state() def get_weights(self): """Returns the current weights of the layer, as NumPy arrays. The weights of a layer represent the state of the layer. This function returns both trainable and non-trainable weight values associated with this layer as a list of NumPy arrays, which can in turn be used to load state into similarly parameterized layers. For example, a `Dense` layer returns a list of two values: the kernel matrix and the bias vector. These can be used to set the weights of another `Dense` layer: >>> layer_a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) >>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]])) >>> layer_a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] >>> layer_b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) >>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]])) >>> layer_b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] >>> layer_b.set_weights(layer_a.get_weights()) >>> layer_b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] Returns: Weights values as a list of NumPy arrays. """ weights = self.weights output_weights = [] for weight in weights: if isinstance(weight, base_layer_utils.TrackableWeightHandler): output_weights.extend(weight.get_tensors()) else: output_weights.append(weight) return backend.batch_get_value(output_weights) @doc_controls.do_not_generate_docs def finalize_state(self): """Finalizes the layers state after updating layer weights. This function can be subclassed in a layer and will be called after updating a layer weights. It can be overridden to finalize any additional layer state after a weight update. """ pass @doc_controls.do_not_generate_docs def get_updates_for(self, inputs): """Deprecated, do NOT use! Retrieves updates relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of update ops of the layer that depend on `inputs`. """ warnings.warn('`layer.get_updates_for` is deprecated and ' 'will be removed in a future version. ' 'Please use `layer.updates` method instead.') return self.updates @doc_controls.do_not_generate_docs def get_losses_for(self, inputs): """Deprecated, do NOT use! Retrieves losses relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of loss tensors of the layer that depend on `inputs`. """ warnings.warn('`layer.get_losses_for` is deprecated and ' 'will be removed in a future version. ' 'Please use `layer.losses` instead.') return self.losses @doc_controls.do_not_doc_inheritable def get_input_mask_at(self, node_index): """Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs). """ inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None) @doc_controls.do_not_doc_inheritable def get_output_mask_at(self, node_index): """Retrieves the output mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple outputs). """ output = self.get_output_at(node_index) if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None) @property @doc_controls.do_not_doc_inheritable def input_mask(self): """Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Input mask tensor (potentially None) or list of input mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. """ inputs = self.input if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None) @property @doc_controls.do_not_doc_inheritable def output_mask(self): """Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Output mask tensor (potentially None) or list of output mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. """ output = self.output if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None) @doc_controls.do_not_doc_inheritable def get_input_shape_at(self, node_index): """Retrieves the input shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape') @doc_controls.do_not_doc_inheritable def get_output_shape_at(self, node_index): """Retrieves the output shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape') @doc_controls.do_not_doc_inheritable def get_input_at(self, node_index): """Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input') @doc_controls.do_not_doc_inheritable def get_output_at(self, node_index): """Retrieves the output tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first output node of the layer. Returns: A tensor (or list of tensors if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output') @property def input(self): """Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found. """ if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.') return self._get_node_attribute_at_index(0, 'input_tensors', 'input') @property def output(self): """Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' has no inbound nodes.') return self._get_node_attribute_at_index(0, 'output_tensors', 'output') @property @doc_controls.do_not_doc_inheritable def input_shape(self): """Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('The layer has never been called ' 'and thus has no defined input shape.') all_input_shapes = set( [str(node.input_shapes) for node in self._inbound_nodes]) if len(all_input_shapes) == 1: return self._inbound_nodes[0].input_shapes else: raise AttributeError('The layer "' + str(self.name) + ' has multiple inbound nodes, ' 'with different input shapes. Hence ' 'the notion of "input shape" is ' 'ill-defined for the layer. ' 'Use `get_input_shape_at(node_index)` ' 'instead.') def count_params(self): """Count the total number of scalars composing the weights. Returns: An integer count. Raises: ValueError: if the layer isn't yet built (in which case its weights aren't yet defined). """ if not self.built: if getattr(self, '_is_graph_network', False): with tf_utils.maybe_init_scope(self): self._maybe_build(self.inputs) else: raise ValueError('You tried to call `count_params` on ' + self.name + ', but the layer isn\'t built. ' 'You can build it manually via: `' + self.name + '.build(batch_input_shape)`.') return layer_utils.count_params(self.weights) @property @doc_controls.do_not_doc_inheritable def output_shape(self): """Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('The layer has never been called ' 'and thus has no defined output shape.') all_output_shapes = set( [str(node.output_shapes) for node in self._inbound_nodes]) if len(all_output_shapes) == 1: return self._inbound_nodes[0].output_shapes else: raise AttributeError('The layer "%s"' ' has multiple inbound nodes, ' 'with different output shapes. Hence ' 'the notion of "output shape" is ' 'ill-defined for the layer. ' 'Use `get_output_shape_at(node_index)` ' 'instead.' % self.name) @property @doc_controls.do_not_doc_inheritable def inbound_nodes(self): """Deprecated, do NOT use! Only for compatibility with external Keras.""" return self._inbound_nodes @property @doc_controls.do_not_doc_inheritable def outbound_nodes(self): """Deprecated, do NOT use! Only for compatibility with external Keras.""" return self._outbound_nodes ############################################################################## # Methods & attributes below are public aliases of other methods. # ############################################################################## @doc_controls.do_not_doc_inheritable def apply(self, inputs, *args, **kwargs): """Deprecated, do NOT use! This is an alias of `self.__call__`. Args: inputs: Input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. Returns: Output tensor(s). """ warnings.warn('`layer.apply` is deprecated and ' 'will be removed in a future version. ' 'Please use `layer.__call__` method instead.') return self.__call__(inputs, *args, **kwargs) @doc_controls.do_not_doc_inheritable def add_variable(self, *args, **kwargs): """Deprecated, do NOT use! Alias for `add_weight`.""" warnings.warn('`layer.add_variable` is deprecated and ' 'will be removed in a future version. ' 'Please use `layer.add_weight` method instead.') return self.add_weight(*args, **kwargs) @property @doc_controls.do_not_generate_docs def variables(self): """Returns the list of all layer variables/weights. Alias of `self.weights`. Note: This will not track the weights of nested `tf.Modules` that are not themselves Keras layers. Returns: A list of variables. """ return self.weights @property @doc_controls.do_not_generate_docs def trainable_variables(self): return self.trainable_weights @property @doc_controls.do_not_generate_docs def non_trainable_variables(self): return self.non_trainable_weights ############################################################################## # Methods & attributes below are all private and only used by the framework. # ############################################################################## @property def _inbound_nodes(self): return self._inbound_nodes_value @_inbound_nodes.setter @tf.__internal__.tracking.no_automatic_dependency_tracking def _inbound_nodes(self, value): self._inbound_nodes_value = value @property def _outbound_nodes(self): return self._outbound_nodes_value @_outbound_nodes.setter @tf.__internal__.tracking.no_automatic_dependency_tracking def _outbound_nodes(self, value): self._outbound_nodes_value = value def _set_dtype_policy(self, dtype): """Sets self._dtype_policy.""" if isinstance(dtype, policy.Policy): self._dtype_policy = dtype elif isinstance(dtype, dict): self._dtype_policy = policy.deserialize(dtype) elif isinstance(dtype, str) and dtype in ('mixed_float16', 'mixed_bfloat16'): # The isinstance check is required since np.dtype raises an error if # compared to a non-dtype string. self._dtype_policy = policy.Policy(dtype) elif dtype: self._dtype_policy = policy.Policy(tf.as_dtype(dtype).name) else: self._dtype_policy = policy.global_policy() if (self._dtype_policy.name == 'mixed_float16' and not loss_scale_optimizer.strategy_supports_loss_scaling()): # Although only loss scaling doesn't support certain strategies, to avoid # confusion, we disallow the 'mixed_float16' policy with unsupported # strategies. This is because 'mixed_float16' requires loss scaling for # numeric stability. strategy = tf.distribute.get_strategy() raise ValueError('Mixed precision is not supported with the ' 'tf.distribute.Strategy: %s. Either stop using mixed ' 'precision by removing the use of the "%s" policy or ' 'use a different Strategy, e.g. a MirroredStrategy.' % (strategy.__class__.__name__, self._dtype_policy.name)) # Performance optimization: cache the compute dtype as a Dtype object or # None, so that str to Dtype conversion doesn't happen in Layer.__call__. # TODO(b/157486353): Investigate returning DTypes in Policy. if self._dtype_policy.compute_dtype: self._compute_dtype_object = tf.as_dtype( self._dtype_policy.compute_dtype) else: self._compute_dtype_object = None @property def dtype_policy(self): """The dtype policy associated with this layer. This is an instance of a `tf.keras.mixed_precision.Policy`. """ return self._dtype_policy @property def compute_dtype(self): """The dtype of the layer's computations. This is equivalent to `Layer.dtype_policy.compute_dtype`. Unless mixed precision is used, this is the same as `Layer.dtype`, the dtype of the weights. Layers automatically cast their inputs to the compute dtype, which causes computations and the output to be in the compute dtype as well. This is done by the base Layer class in `Layer.__call__`, so you do not have to insert these casts if implementing your own layer. Layers often perform certain internal computations in higher precision when `compute_dtype` is float16 or bfloat16 for numeric stability. The output will still typically be float16 or bfloat16 in such cases. Returns: The layer's compute dtype. """ return self._dtype_policy.compute_dtype @property def _compute_dtype(self): """Deprecated alias of `compute_dtype`.""" return self._dtype_policy.compute_dtype @property def variable_dtype(self): """Alias of `Layer.dtype`, the dtype of the weights.""" return self.dtype def _maybe_cast_inputs(self, inputs, input_list=None): """Maybe casts the inputs to the compute dtype. If self._compute_dtype is floating-point, and self_autocast is True, floating-point inputs are casted to self._compute_dtype. Args: inputs: Input tensor, or structure of input tensors. input_list: Flat list of input tensors. Returns: `inputs`, but tensors may have been casted to self._compute_dtype """ if not input_list: input_list = tf.nest.flatten(inputs) compute_dtype_object = self._compute_dtype_object should_autocast = ( self._autocast and compute_dtype_object and compute_dtype_object.is_floating) if (should_autocast and any(map(self._should_cast_single_input, input_list))): # Only perform expensive `nest` operation when needed. return tf.nest.map_structure(self._cast_single_input, inputs) else: return inputs def _should_cast_single_input(self, x): if isinstance(x, _AUTOCAST_TYPES): return (self._compute_dtype_object and x.dtype != self._compute_dtype_object and x.dtype.is_floating) return False def _cast_single_input(self, x): """Cast a single Tensor or TensorSpec to the compute dtype.""" if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_object) else: return x # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property. @property def _dtype(self): # This is equivalent to returning self.dtype . We do not return self.dtype # as it would cause infinite recursion in a few subclasses, which override # "dtype" to return self._dtype. return self._dtype_policy.variable_dtype @_dtype.setter def _dtype(self, value): value = tf.as_dtype(value).name self._set_dtype_policy(policy.Policy(value)) def _name_scope(self): # pylint: disable=method-hidden if not tf.__internal__.tf2.enabled(): return self.name name_scope = self.name if _is_name_scope_on_model_declaration_enabled and self._outer_name_scope: name_scope = self._outer_name_scope + '/' + name_scope current_name_scope = tf.__internal__.get_name_scope() if current_name_scope: name_scope = current_name_scope + '/' + name_scope if name_scope: # Note that the trailing `/` prevents autogenerated # numerical suffixes to get appended. It will also fully reset # nested name scope (i.e. the outer name scope has no effect). name_scope += '/' return name_scope def _init_set_name(self, name, zero_based=True): if name is None: self._name = backend.unique_object_name( generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based) elif isinstance(name, str): backend.observe_object_name(name) self._name = name else: raise TypeError( f'Expected `name` argument to be a string, but got: {name}') def _get_existing_metric(self, name=None): match = [m for m in self._metrics if m.name == name] if not match: return if len(match) > 1: raise ValueError( 'Please provide different names for the metrics you have added. ' 'We found {} metrics with the name: "{}"'.format(len(match), name)) return match[0] def _handle_weight_regularization(self, name, variable, regularizer): """Create lambdas which compute regularization losses.""" def _loss_for_variable(v): """Creates a regularization loss `Tensor` for variable `v`.""" with backend.name_scope(name + '/Regularizer'): regularization = regularizer(v) return regularization if base_layer_utils.is_split_variable(variable): for v in variable: self.add_loss(functools.partial(_loss_for_variable, v)) else: self.add_loss(functools.partial(_loss_for_variable, variable)) def _handle_activity_regularization(self, inputs, outputs): # Apply activity regularization. # Note that it should be applied every time the layer creates a new # output, since it is output-specific. if self._activity_regularizer: output_list = tf.nest.flatten(outputs) with backend.name_scope('ActivityRegularizer'): for output in output_list: activity_loss = self._activity_regularizer(output) batch_size = tf.cast( tf.shape(output)[0], activity_loss.dtype) # Make activity regularization strength batch-agnostic. mean_activity_loss = activity_loss / batch_size self.add_loss(mean_activity_loss) def _set_mask_metadata(self, inputs, outputs, previous_mask, build_graph): # Many `Layer`s don't need to call `compute_mask`. # This method is optimized to do as little work as needed for the common # case. if not self._supports_masking: return flat_outputs = tf.nest.flatten(outputs) mask_already_computed = ( getattr(self, '_compute_output_and_mask_jointly', False) or all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs)) if mask_already_computed: if build_graph: self._set_mask_keras_history_checked(flat_outputs) return output_masks = self.compute_mask(inputs, previous_mask) if output_masks is None: return flat_masks = tf.nest.flatten(output_masks) for tensor, mask in zip(flat_outputs, flat_masks): try: tensor._keras_mask = mask except AttributeError: # C Type such as np.ndarray. pass if build_graph: self._set_mask_keras_history_checked(flat_outputs) def _set_mask_keras_history_checked(self, flat_outputs): for output in flat_outputs: if getattr(output, '_keras_mask', None) is not None: # Do not track masks for `TensorFlowOpLayer` construction. output._keras_mask._keras_history_checked = True def _get_input_masks(self, inputs, input_list, args, kwargs): if not self._supports_masking and not self._expects_mask_arg: # Input masks only need to be retrieved if they are needed for `call` # or `compute_mask`. input_masks = None implicit_mask = False elif self._call_arg_was_passed('mask', args, kwargs): input_masks = self._get_call_arg_value('mask', args, kwargs) implicit_mask = False else: input_masks = [getattr(t, '_keras_mask', None) for t in input_list] if all(mask is None for mask in input_masks): input_masks = None implicit_mask = False else: # Only do expensive `nest` op when masking is actually being used. input_masks = tf.nest.pack_sequence_as(inputs, input_masks) implicit_mask = True return input_masks, implicit_mask def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False): # Performance optimization: do no work in most common case. if not args and not kwargs: return False if arg_name in kwargs: return True call_fn_args = self._call_fn_args if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] return arg_name in dict(zip(call_fn_args, args)) def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False): if arg_name in kwargs: return kwargs[arg_name] call_fn_args = self._call_fn_args if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] args_dict = dict(zip(call_fn_args, args)) return args_dict[arg_name] def _set_call_arg_value( self, arg_name, new_value, args, kwargs, inputs_in_args=False, pop_kwarg_if_none=False): arg_pos = self._call_fn_arg_positions.get(arg_name, None) if arg_pos is not None: if not inputs_in_args: # Ignore `inputs` arg. arg_pos = arg_pos - 1 if len(args) > arg_pos: args = list(args) args[arg_pos] = new_value return tuple(args), kwargs if new_value is None and pop_kwarg_if_none: kwargs.pop(arg_name, None) else: kwargs[arg_name] = new_value return args, kwargs def _set_connectivity_metadata(self, args, kwargs, outputs): # If the layer returns tensors from its inputs unmodified, # we copy them to avoid loss of KerasHistory metadata. flat_outputs = tf.nest.flatten(outputs) flat_inputs = tf.nest.flatten((args, kwargs)) input_ids_set = {id(i) for i in flat_inputs} outputs_copy = [] for x in flat_outputs: if id(x) in input_ids_set: with backend.name_scope(self.name): x = tf.identity(x) outputs_copy.append(x) outputs = tf.nest.pack_sequence_as(outputs, outputs_copy) # Create node, Node wires itself to inbound and outbound layers. # The Node constructor actually updates this layer's self._inbound_nodes, # sets _keras_history on the outputs, and adds itself to the # `_outbound_nodes` of the layers that produced the inputs to this # layer call. node_module.Node(self, call_args=args, call_kwargs=kwargs, outputs=outputs) return outputs def _get_node_attribute_at_index(self, node_index, attr, attr_name): """Private utility to retrieves an attribute (e.g. inputs) from a node. This is used to implement the methods: - get_input_shape_at - get_output_shape_at - get_input_at etc... Args: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. attr_name: Human-readable attribute name, for error messages. Returns: The layer's attribute `attr` at the node of index `node_index`. Raises: RuntimeError: If the layer has no inbound nodes, or if called in Eager mode. ValueError: If the index provided does not match any node. """ if not self._inbound_nodes: raise RuntimeError('The layer has never been called ' 'and thus has no defined ' + attr_name + '.') if not len(self._inbound_nodes) > node_index: raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.') values = getattr(self._inbound_nodes[node_index], attr) if isinstance(values, list) and len(values) == 1: return values[0] else: return values def _maybe_build(self, inputs): # Check input assumptions set before layer building, e.g. input rank. if not self.built: input_spec.assert_input_compatibility( self.input_spec, inputs, self.name) input_list = tf.nest.flatten(inputs) if input_list and self._dtype_policy.compute_dtype is None: try: dtype = input_list[0].dtype.base_dtype.name except AttributeError: pass else: self._set_dtype_policy(policy.Policy(dtype)) input_shapes = None # Converts Tensors / CompositeTensors to TensorShapes. if all(hasattr(x, 'shape') for x in input_list): input_shapes = tf_utils.get_shapes(inputs) else: # Converts input shape to TensorShapes. try: input_shapes = tf_utils.convert_shapes(inputs, to_tuples=False) except ValueError: pass # Only call `build` if the user has manually overridden the build method. if not hasattr(self.build, '_is_default'): # Any setup work performed only once should happen in an `init_scope` # to avoid creating symbolic Tensors that will later pollute any eager # operations. with tf_utils.maybe_init_scope(self): self.build(input_shapes) # pylint:disable=not-callable # We must set also ensure that the layer is marked as built, and the build # shape is stored since user defined build functions may not be calling # `super.build()` Layer.build(self, input_shapes) # Optionally load weight values specified at layer instantiation. if self._initial_weights is not None: with tf.init_scope(): # Using `init_scope` since we want variable assignment in # `set_weights` to be treated like variable initialization. self.set_weights(self._initial_weights) self._initial_weights = None def _symbolic_call(self, inputs): input_shapes = tf.nest.map_structure(lambda x: x.shape, inputs) output_shapes = self.compute_output_shape(input_shapes) # Convert to TensorShape so that nest.map_structure will not map into # individual dim of the shape. output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False) def _make_placeholder_like(shape): ph = backend.placeholder(shape=shape, dtype=self.dtype) ph._keras_mask = None return ph return tf.nest.map_structure(_make_placeholder_like, output_shapes) def _get_trainable_state(self): """Get the `trainable` state of each sublayer. Returns: A dict mapping all sublayers to their `trainable` value. """ trainable_state = weakref.WeakKeyDictionary() for layer in self._flatten_layers(): trainable_state[layer] = layer.trainable return trainable_state def _set_trainable_state(self, trainable_state): """Set `trainable` state for each sublayer.""" for layer in self._flatten_layers(): if layer in trainable_state: layer.trainable = trainable_state[layer] @property def _obj_reference_counts(self): """A dictionary counting the number of attributes referencing an object.""" self._maybe_create_attribute('_obj_reference_counts_dict', object_identity.ObjectIdentityDictionary()) return self._obj_reference_counts_dict @tf.__internal__.tracking.no_automatic_dependency_tracking def _maybe_create_attribute(self, name, default_value): """Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute. """ if not hasattr(self, name): self.__setattr__(name, default_value) def __delattr__(self, name): # For any super.__delattr__() call, we will directly use the implementation # in Trackable and skip the behavior in AutoTrackable. The Layer was # originally use Trackable as base class, the change of using Module as base # class forced us to have AutoTrackable in the class hierarchy. # # TODO(b/180760306) Keeping the status quo of skipping _delattr__ and # __setattr__ in AutoTrackable may be unsustainable. existing_value = getattr(self, name, None) # If this value is replacing an existing object assigned to an attribute, we # should clean it out to avoid leaking memory. First we check if there are # other attributes referencing it. reference_counts = self._obj_reference_counts if existing_value not in reference_counts: super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call return reference_count = reference_counts[existing_value] if reference_count > 1: # There are other remaining references. We can't remove this object from # _layers etc. reference_counts[existing_value] = reference_count - 1 super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call return else: # This is the last remaining reference. del reference_counts[existing_value] super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call if (isinstance(existing_value, Layer) or base_layer_utils.has_weights(existing_value)): super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call '_self_tracked_trackables', [l for l in self._self_tracked_trackables if l is not existing_value]) if isinstance(existing_value, tf.Variable): super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call '_trainable_weights', [w for w in self._trainable_weights if w is not existing_value]) super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call '_non_trainable_weights', [w for w in self._non_trainable_weights if w is not existing_value]) def __setattr__(self, name, value): if (name == '_self_setattr_tracking' or not getattr(self, '_self_setattr_tracking', True) or # Exclude @property.setters from tracking hasattr(self.__class__, name)): try: super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call except AttributeError: raise AttributeError( ('Can\'t set the attribute "{}", likely because it conflicts with ' 'an existing read-only @property of the object. Please choose a ' 'different name.').format(name)) return # Wraps data structures in `Trackable`, unwraps `NoDependency` objects. value = tf.__internal__.tracking.sticky_attribute_assignment( trackable=self, value=value, name=name) reference_counts = self._obj_reference_counts reference_counts[value] = reference_counts.get(value, 0) + 1 # Clean out the old attribute, which clears _layers and _trainable_weights # if necessary. try: self.__delattr__(name) except AttributeError: pass # Keep track of metric instance created in subclassed layer. for val in tf.nest.flatten(value): if isinstance(val, metrics_mod.Metric) and hasattr(self, '_metrics'): self._metrics.append(val) # Append value to self._self_tracked_trackables if relevant if (getattr(self, '_auto_track_sub_layers', True) and (isinstance(value, tf.Module) or base_layer_utils.has_weights(value))): self._maybe_create_attribute('_self_tracked_trackables', []) # We need to check object identity to avoid de-duplicating empty # container types which compare equal. if not any((layer is value for layer in self._self_tracked_trackables)): self._self_tracked_trackables.append(value) if hasattr(value, '_use_resource_variables'): # Legacy layers (V1 tf.layers) must always use # resource variables. value._use_resource_variables = True # Append value to list of trainable / non-trainable weights if relevant # TODO(b/125122625): This won't pick up on any variables added to a # list/dict after creation. for val in tf.nest.flatten(value, expand_composites=True): if not isinstance(val, tf.Variable): continue # Users may add extra weights/variables # simply by assigning them to attributes (invalid for graph networks) self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) if val.trainable: if any(val is w for w in self._trainable_weights): continue self._trainable_weights.append(val) else: if any(val is w for w in self._non_trainable_weights): continue self._non_trainable_weights.append(val) backend.track_variable(val) # TODO(b/180760306) Skip the auto trackable from tf.Module to keep status # quo. See the comment at __delattr__. super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call def _gather_children_attribute(self, attribute): assert attribute in { 'variables', 'trainable_variables', 'non_trainable_variables' } if hasattr(self, '_self_tracked_trackables'): nested_layers = self._flatten_modules(include_self=False, recursive=False) return list( itertools.chain.from_iterable( getattr(layer, attribute) for layer in nested_layers)) return [] def _flatten_layers(self, recursive=True, include_self=True): for m in self._flatten_modules( recursive=recursive, include_self=include_self): if isinstance(m, Layer): yield m def _flatten_modules(self, recursive=True, include_self=True): """Flattens `tf.Module` instances (excluding `Metrics`). Args: recursive: Whether to recursively flatten through submodules. include_self: Whether to include this `Layer` instance. Yields: `tf.Module` instance tracked by this `Layer`. """ if include_self: yield self # Only instantiate set and deque if needed. trackables = getattr(self, '_self_tracked_trackables', None) if trackables: seen_object_ids = set() deque = collections.deque(trackables) while deque: trackable_obj = deque.popleft() trackable_id = id(trackable_obj) if trackable_id in seen_object_ids: continue seen_object_ids.add(trackable_id) # Metrics are not considered part of the Layer's topology. if (isinstance(trackable_obj, tf.Module) and not isinstance(trackable_obj, metrics_mod.Metric)): yield trackable_obj # Introspect recursively through sublayers. if recursive: subtrackables = getattr(trackable_obj, '_self_tracked_trackables', None) if subtrackables: deque.extendleft(reversed(subtrackables)) elif isinstance(trackable_obj, tf.__internal__.tracking.TrackableDataStructure): # Data structures are introspected even with `recursive=False`. tracked_values = trackable_obj._values if tracked_values: deque.extendleft(reversed(tracked_values)) # This is a hack so that the is_layer (within # training/trackable/layer_utils.py) check doesn't get the weights attr. # TODO(b/110718070): Remove when fixed. def _is_layer(self): return True def _init_call_fn_args(self, expects_training_arg=None): # Clear cached call function arguments. self.__class__._call_full_argspec.fget.cache.pop(self, None) self.__class__._call_fn_args.fget.cache.pop(self, None) self.__class__._call_accepts_kwargs.fget.cache.pop(self, None) call_fn_args = self._call_fn_args call_fn_args += self._call_full_argspec.kwonlyargs or [] if expects_training_arg is None: self._expects_training_arg = ('training' in call_fn_args or self._call_accepts_kwargs) else: # Use value encoded into the metadata when loading from the SavedModel. self._expects_training_arg = expects_training_arg # The default training arg will be any (non-None) default specified in the # method signature, or None if no value is specified. call_fn_arg_defaults = self._call_fn_arg_defaults.copy() call_fn_arg_defaults.update(self._call_full_argspec.kwonlydefaults or {}) self._default_training_arg = call_fn_arg_defaults.get('training') self._expects_mask_arg = ('mask' in call_fn_args or self._call_accepts_kwargs) @property @layer_utils.cached_per_instance def _call_full_argspec(self): # Argspec inspection is expensive and the call spec is used often, so it # makes sense to cache the result. return tf_inspect.getfullargspec(self.call) @property @layer_utils.cached_per_instance def _call_fn_args(self): all_args = self._call_full_argspec.args # Scrub `self` that appears if a decorator was applied. if all_args and all_args[0] == 'self': return all_args[1:] return all_args @property @layer_utils.cached_per_instance def _call_fn_arg_defaults(self): call_fn_args = self._call_fn_args call_fn_defaults = self._call_full_argspec.defaults or [] defaults = dict() # The call arg defaults are an n-tuple of the last n elements of the args # list. (n = # of elements that have a default argument) for i in range(-1 * len(call_fn_defaults), 0): defaults[call_fn_args[i]] = call_fn_defaults[i] return defaults @property @layer_utils.cached_per_instance def _call_fn_arg_positions(self): call_fn_arg_positions = dict() for pos, arg in enumerate(self._call_fn_args): call_fn_arg_positions[arg] = pos return call_fn_arg_positions @property @layer_utils.cached_per_instance def _call_accepts_kwargs(self): return self._call_full_argspec.varkw is not None @property def _eager_losses(self): # A list of loss values containing activity regularizers and losses # manually added through `add_loss` during eager execution. It is cleared # after every batch. # Because we plan on eventually allowing a same model instance to be trained # in eager mode or graph mode alternatively, we need to keep track of # eager losses and symbolic losses via separate attributes. if not hasattr(self._thread_local, '_eager_losses'): self._thread_local._eager_losses = [] return self._thread_local._eager_losses @_eager_losses.setter def _eager_losses(self, losses): self._thread_local._eager_losses = losses def _dedup_weights(self, weights): """Dedupe weights while maintaining order as much as possible.""" output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output def _split_out_first_arg(self, args, kwargs): # Grab the argument corresponding to the first argument in the # layer's `call` method spec. This will either be the first positional # argument, or it will be provided as a keyword argument. if args: inputs = args[0] args = args[1:] elif self._call_fn_args[0] in kwargs: kwargs = copy.copy(kwargs) inputs = kwargs.pop(self._call_fn_args[0]) else: raise ValueError( 'The first argument to `Layer.call` must always be passed.') return inputs, args, kwargs # SavedModel properties. Please see keras/saving/saved_model for details. @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_save_spec(self, inputs, args=None, kwargs=None): """Defines the save spec so that serialization is able to trace layer call. The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are saved into a tuple of `([inputs] + args, kwargs)`. Args: inputs: possibly nested inputs passed into the call function. args: a list of positional arguments passed into call. kwargs: a dictionary of keyword arguments passed into call. """ if self._saved_model_inputs_spec is not None: return # Already set. args = args or [] kwargs = kwargs or {} inputs_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, inputs) # Filter out non-tensor arguments from args and kwargs. args_spec = [] for arg in args: flat_arg = tf.nest.flatten(arg) flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_arg] if any(s is None for s in flat_specs): break # Stop recording positional args once a non-tensor has been found args_spec.append(tf.nest.pack_sequence_as(arg, flat_specs)) kwargs_spec = {} for key, kwarg in kwargs.items(): if key == 'training': continue flat_kwarg = tf.nest.flatten(kwarg) flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg] if any(s is None for s in flat_specs): continue kwargs[key] = args_spec.append( tf.nest.pack_sequence_as(kwarg, flat_specs)) self._saved_model_inputs_spec = inputs_spec self._saved_model_arg_spec = ([inputs_spec] + args_spec, kwargs_spec) def _get_save_spec(self, dynamic_batch=True, inputs_only=True): if self._saved_model_inputs_spec is None: return None spec = tf.nest.map_structure( lambda t: tf_utils.get_tensor_spec(t, dynamic_batch=dynamic_batch), self._saved_model_arg_spec) return spec[0][0] if inputs_only else spec @property def _trackable_saved_model_saver(self): return layer_serialization.LayerSavedModelSaver(self) @property def _object_identifier(self): return self._trackable_saved_model_saver.object_identifier @property def _tracking_metadata(self): """Info about this layer to be saved into the SavedModel.""" return self._trackable_saved_model_saver.tracking_metadata def _list_extra_dependencies_for_serialization(self, serialization_cache): return (self._trackable_saved_model_saver .list_extra_dependencies_for_serialization(serialization_cache)) def _list_functions_for_serialization(self, serialization_cache): return (self._trackable_saved_model_saver .list_functions_for_serialization(serialization_cache)) @property def _use_input_spec_as_call_signature(self): # Whether input spec can be used as the call signature when tracing the # Layer for SavedModel. By default, this is set to `True` for layers # exported from the Keras library, because the layers more rigidly define # the `input_specs` property (many custom layers only set the `ndims`) return get_canonical_name_for_symbol(type(self), api_name='keras') is not None def __getstate__(self): # Override to support `copy.deepcopy` and pickling. # Thread-local objects cannot be copied in Python 3, so pop these. # Thread-local objects are used to cache losses in MirroredStrategy, and # so shouldn't be copied. state = self.__dict__.copy() state.pop('_thread_local', None) state.pop('_metrics_lock', None) return state def __setstate__(self, state): state['_thread_local'] = threading.local() state['_metrics_lock'] = threading.Lock() # Bypass Trackable logic as `__dict__` already contains this info. object.__setattr__(self, '__dict__', state) class TensorFlowOpLayer(Layer): """Wraps a TensorFlow Operation in a Layer. This class is used internally by the Functional API. When a user uses a raw TensorFlow Operation on symbolic tensors originating from an `Input` Layer, the resultant operation will be wrapped with this Layer object in order to make the operation compatible with the Keras API. This Layer will create a new, identical operation (except for inputs and outputs) every time it is called. If `run_eagerly` is `True`, the op creation and calculation will happen inside an Eager function. Instances of this Layer are created when `autolambda` is called, which is whenever a Layer's `__call__` encounters symbolic inputs that do not have Keras metadata, or when a Network's `__init__` encounters outputs that do not have Keras metadata. Attributes: node_def: String, the serialized NodeDef of the Op this layer will wrap. name: String, the name of the Layer. constants: Dict of NumPy arrays, the values of any Tensors needed for this Operation that do not originate from a Keras `Input` Layer. Since all placeholders must come from Keras `Input` Layers, these Tensors must be treated as constant in the Functional API. trainable: Bool, whether this Layer is trainable. Currently Variables are not supported, and so this parameter has no effect. dtype: The default dtype of this Layer. Inherited from `Layer` and has no effect on this class, however is used in `get_config`. """ @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__(self, node_def, name, constants=None, trainable=True, dtype=None): # Pass autocast=False, as if inputs are cast, input types might not match # Operation type. super(TensorFlowOpLayer, self).__init__( name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype, autocast=False) if isinstance(node_def, dict): self.node_def = json_format.ParseDict(node_def, tf.compat.v1.NodeDef()) else: if not isinstance(node_def, bytes): node_def = node_def.encode('utf-8') self.node_def = tf.compat.v1.NodeDef.FromString(node_def) # JSON serialization stringifies keys which are integer input indices. self.constants = ({ int(index): constant for index, constant in constants.items() } if constants is not None else {}) # Layer uses original op unless it is called on new inputs. # This means `built` is not set in `__call__`. self.built = True # Do not individually trace TensorflowOpLayers in the SavedModel. self._must_restore_from_config = True def call(self, inputs): if tf.executing_eagerly(): return self._defun_call(inputs) return self._make_op(inputs) def _make_node_def(self, graph): node_def = tf.compat.v1.NodeDef() node_def.CopyFrom(self.node_def) # Used in TPUReplicateContext to indicate whether this node has been cloned # and to not add TPU attributes. node_def.attr['_cloned'].b = True node_def.name = graph.unique_name(node_def.name) return node_def def _make_op(self, inputs): inputs = tf.nest.flatten(inputs) graph = inputs[0].graph node_def = self._make_node_def(graph) with graph.as_default(): for index, constant in self.constants.items(): # Recreate constant in graph to add distribution context. value = tf.get_static_value(constant) if value is not None: constant = tf.constant(value, name=node_def.input[index]) inputs.insert(index, constant) # TODO(b/183990973): We should drop or consolidate these private api calls # for adding an op to the graph and recording its gradient. c_op = tf.__internal__.create_c_op(graph, node_def, inputs, control_inputs=[]) op = graph._create_op_from_tf_operation(c_op) op._control_flow_post_processing() # Record the gradient because custom-made ops don't go through the # code-gen'd eager call path op_type = tf.compat.as_str(op.op_def.name) attr_names = [tf.compat.as_str(attr.name) for attr in op.op_def.attr] attrs = [] for attr_name in attr_names: attrs.append(attr_name) attrs.append(op.get_attr(attr_name)) attrs = tuple(attrs) tf.__internal__.record_gradient(op_type, op.inputs, attrs, op.outputs) if len(op.outputs) == 1: return op.outputs[0] return op.outputs @tf.function def _defun_call(self, inputs): """Wraps the op creation method in an Eager function for `run_eagerly`.""" return self._make_op(inputs) def get_config(self): config = super(TensorFlowOpLayer, self).get_config() config.update({ # `__init__` prefixes the name. Revert to the constructor argument. 'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):], 'node_def': json_format.MessageToDict(self.node_def), 'constants': { i: backend.get_value(c) for i, c in self.constants.items() } }) return config class AddLoss(Layer): """Adds its inputs as a loss. Attributes: unconditional: Whether or not the loss should be conditioned on the inputs. """ def __init__(self, unconditional, **kwargs): # Pass autocast=False, as there is no reason to cast loss to a different # dtype. kwargs['autocast'] = False super(AddLoss, self).__init__(**kwargs) self.unconditional = unconditional def call(self, inputs): self.add_loss(inputs, inputs=(not self.unconditional)) return inputs def get_config(self): config = super(AddLoss, self).get_config() config.update({'unconditional': self.unconditional}) return config class AddMetric(Layer): """Adds its inputs as a metric. Attributes: aggregation: 'mean' or None. How the inputs should be aggregated. metric_name: The name to use for this metric. """ def __init__(self, aggregation=None, metric_name=None, **kwargs): super(AddMetric, self).__init__(**kwargs) self.aggregation = aggregation self.metric_name = metric_name def call(self, inputs): self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name) return inputs def get_config(self): config = super(AddMetric, self).get_config() config.update({ 'aggregation': self.aggregation, 'metric_name': self.metric_name }) return config def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list): # pylint: disable=unused-argument """Check the arguments to see if we are constructing a functional model.""" # We are constructing a functional model if any of the inputs # are KerasTensors return any( isinstance(tensor, keras_tensor.KerasTensor) for tensor in tf.nest.flatten([inputs, args, kwargs])) def _convert_numpy_or_python_types(x): if isinstance(x, (tf.Tensor, np.ndarray, float, int)): return tf.convert_to_tensor(x) return x @keras_export( 'keras.__internal__.apply_name_scope_on_model_declaration', v1=[]) def _apply_name_scope_on_model_declaration(enable): """Apply `with tf.name_scope(...)` on model declaration. ```python tf.keras.__internal__.apply_name_scope_on_model_declaration(True) inputs = input_layer.Input((3,)) with tf.name_scope('MyScope'): outputs = layers.Dense(10, name='MyDense')(inputs) model = tf.keras.Model(inputs, outputs) # with `tf.keras.__internal__.apply_name_scope_on_model_declaration(True)`, # The name of the dense layer is "model/MyScope/MyDense/*", and without, # "model/MyDense/*" ``` Args: enable: Enables if `True`, disables if `False`. """ if not isinstance(enable, bool): raise TypeError( '`enable` argument must be `True` or `False`, got {}'.format(enable)) global _is_name_scope_on_model_declaration_enabled _is_name_scope_on_model_declaration_enabled = enable # Avoid breaking users who directly import this symbol from this file. # TODO(fchollet): remove this. InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
134,150
38.831057
120
py
keras
keras-master/keras/engine/base_layer_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains private utilities used mainly by the base Layer class.""" import tensorflow.compat.v2 as tf import functools import threading from keras import backend from keras.utils import control_flow_util from keras.utils import tf_inspect from keras.utils import tf_utils from tensorflow.python.util.tf_export import keras_export _call_context = threading.local() def create_mean_metric(value, name=None): # import keras will import base_layer and then this module, and metric relies # on base_layer, which result into a cyclic dependency. from keras import metrics as metrics_module # pylint: disable=g-import-not-at-top metric_obj = metrics_module.Mean(name=name, dtype=value.dtype) return metric_obj, metric_obj(value) def make_variable(name, shape=None, dtype=tf.float32, initializer=None, trainable=None, caching_device=None, validate_shape=True, constraint=None, use_resource=None, collections=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.VariableAggregation.NONE, partitioner=None): # pylint: disable=unused-argument """Temporary util to create a variable (relies on `variable_scope.variable`). Some reuse-related technicalities prevent us from using `variable_scope.get_variable()` directly, so we use a subcomponent that has fewer constraints (`variable_scope.variable()`). In the longer term, it seems like a similar "default variable creator" method should exist in `Trackable` instead. When this happens, we can get rid of this temporary solution. TODO(fchollet): remove this method when no longer needed. Args: name: Variable name. shape: Variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: Initializer instance (callable). trainable: Whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean, stddev). Note, if the current variable scope is marked as non-trainable then this parameter is ignored and any added variables are also marked as non-trainable. `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. caching_device: Passed to `tf.Variable`. validate_shape: Passed to `tf.Variable`. constraint: Constraint instance (callable). use_resource: Whether to use a `ResourceVariable`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. partitioner: Not handled at this time. Returns: Variable instance. """ initializing_from_value = False if initializer is not None and not callable(initializer): initializing_from_value = True if initializing_from_value: init_val = initializer variable_dtype = None else: # Instantiate initializer if provided initializer is a type object. if tf_inspect.isclass(initializer): initializer = initializer() init_val = functools.partial(initializer, shape, dtype=dtype) variable_dtype = dtype.base_dtype variable_shape = tf.TensorShape(shape) if use_resource is None: use_resource = True # In theory, in `use_resource` is True and `collections` is empty # (that is to say, in TF2), we can use tf.Variable. # However, this breaks legacy (Estimator) checkpoints # because it changes variable names. Remove this when V1 is fully deprecated. return tf.compat.v1.Variable( initial_value=init_val, name=name, trainable=trainable, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, use_resource=use_resource, collections=collections, synchronization=synchronization, aggregation=aggregation, shape=variable_shape if variable_shape else None) def collect_previous_mask(input_tensors): """Retrieves the output mask(s) of the previous node. Args: input_tensors: An arbitrary structure of Tensors. Returns: A mask tensor or list of mask tensors. """ def _collect_previous_mask(x): return getattr(x, '_keras_mask', None) return tf.nest.map_structure(_collect_previous_mask, input_tensors) def have_all_keras_metadata(tensors): return all(hasattr(x, '_keras_history') for x in tf.nest.flatten(tensors)) def generate_placeholders_from_shape(shape): return tf.compat.v1.placeholder(shape=shape, dtype=backend.floatx()) def create_keras_history(tensors): """Wraps TensorFlow Operations for compatibility with the Functional API. This method checks to see if a Tensor in `tensors` is missing Keras metadata and has its origin in a Keras `Input` Layer. If so, this method will replace the raw TensorFlow Operations that created this tensor with `TensorFlowOpLayer` instances that create identical operations. Any Tensors not originating from a Keras `Input` Layer will be treated as constants when constructing `TensorFlowOpLayer` instances. Args: tensors: A structure of Tensors, some of which come from raw TensorFlow operations and need to have Keras metadata assigned to them. Returns: created_layers: List. The `TensorFlowOpLayer` instances created to wrap the raw Tensorflow operations. """ _, created_layers = _create_keras_history_helper(tensors, set(), []) return created_layers # Unsafe Internal attribute. # If True, Keras will not evaluate the constant-foldable inputs to tf op # layers in TF1 graphs. This *might* speed up model construction time in # certain settings, but it means # the models will not be serializable/deserializable via get_config # (Only via Savedmodels). It may also change the semantics of whether # generated random numbers are generated once and re-used, or recomputed # each time. # Note: This path triggers for TPUEstimators / xla compiled graphs regardless # of this setting. _UNSAFE_GRAPH_OP_LAYER_CREATION = False def _create_keras_history_helper(tensors, processed_ops, created_layers): """Helper method for `create_keras_history`. Args: tensors: A structure of Tensors for which to create Keras metadata. processed_ops: Set. TensorFlow operations that have already been wrapped in `TensorFlowOpLayer` instances. created_layers: List. The `TensorFlowOpLayer` instances created. Returns: Tuple. First element is the updated set of TensorFlow Operations that have been wrapped in `TensorFlowOpLayer` instances. Second element is a list of the `TensorFlowOpLayer` instances created. """ if tf.compat.v1.executing_eagerly_outside_functions(): raise ValueError( '`create_keras_history` should only be called if eager is disabled!') # Import of `base_layer` needed in order to create `TensorFlowOpLayer`. # Cannot be imported at top because of circular dependencies. # TODO(omalleyt): Resolve circular dependency. from keras.engine import base_layer # pylint: disable=g-import-not-at-top tensor_list = tf.nest.flatten(tensors) sparse_ops = [] ragged_tensors = [] for tensor in tensor_list: if getattr(tensor, '_keras_history', None) is not None: continue if isinstance( tensor, (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): sparse_ops.append(tensor.op) continue if tf_utils.is_ragged(tensor): # Ragged tensors don't have an op property ragged_tensors.append(tensor) continue op = tensor.op # The Op that created this Tensor. if op not in processed_ops: # Recursively set `_keras_history`. op_inputs = list(op.inputs) constants = {} layer_inputs = [] for i, op_input in enumerate(op_inputs): if uses_keras_history(op_input): layer_inputs.append(op_input) else: # Treat any value not originating from a `keras.Input` as # a constant. Variables cannot be supported. ds_with_session = ( tf.distribute.in_cross_replica_context() and not tf.compat.v1.executing_eagerly_outside_functions()) using_xla = control_flow_util.GraphOrParentsInXlaContext( tf.compat.v1.get_default_graph()) if ds_with_session or using_xla or _UNSAFE_GRAPH_OP_LAYER_CREATION: # In Legacy Graph mode, evaluating here makes Session be # configured improperly. The downside of this is that saving # via `get_config` breaks, but SavedModel still works. constants[i] = op_input else: with tf.init_scope(): constants[i] = backend.function([], op_input)([]) layer_inputs = unnest_if_single_tensor(layer_inputs) processed_ops, created_layers = _create_keras_history_helper( layer_inputs, processed_ops, created_layers) name = op.name node_def = op.node_def.SerializeToString() op_layer = base_layer.TensorFlowOpLayer( node_def, constants=constants, name=name) created_layers.append(op_layer) op_layer._set_connectivity_metadata( # pylint: disable=protected-access args=(layer_inputs,), kwargs={}, outputs=op.outputs) processed_ops.update([op]) if sparse_ops or ragged_tensors: lambda_example = """ weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights) output = tf.keras.layers.Lambda(weights_mult)(input) """ raise ValueError( 'Tensorflow ops that generate ragged or sparse tensor ' 'outputs are currently not supported by Keras automatic ' 'op wrapping. Please wrap these ops in a Lambda layer: ' '\n\n```\n{example}\n```\n' 'Sparse ops encountered: {sparse_ops}\n' 'Ragged tensors encountered: {ragged_tensors}\n'.format( example=lambda_example, sparse_ops=str(sparse_ops), ragged_tensors=str(ragged_tensors))) return processed_ops, created_layers def unnest_if_single_tensor(input_tensors): # Preserve compatibility with older configs flat_input_tensors = tf.nest.flatten(input_tensors) # If this is a single element but not a dict, unwrap. If this is a dict, # assume the first layer expects a dict (as is the case with a # DenseFeatures layer); pass through. if not isinstance(input_tensors, dict) and len(flat_input_tensors) == 1: input_tensors = flat_input_tensors[0] return input_tensors def needs_keras_history(tensors, ignore_call_context=False): """Check if any Tensors need to be wrapped in TensorFlowOpLayers. This will never return True inside a sublayer, because sublayers do not need to create Keras History. Otherwise, this returns True if one or more of `tensors` originates from a `keras.Input` and does not have `_keras_history` set. Args: tensors: An arbitrary nested structure of Tensors. ignore_call_context: Whether to ignore the check of if currently outside of a `call` context. This is `True` when creating KerasHistory inside `Node`, where we always know that Tensors are being used with the Functional API. Returns: Bool, whether at least one Tensor needs to be wrapped. """ input_tensors = tf.nest.flatten(tensors) if call_context().in_call and not ignore_call_context: return False if all( getattr(tensor, '_keras_history', None) is not None for tensor in input_tensors): # KerasHistory already set. return False return uses_keras_history(tensors) def is_in_keras_graph(): """Returns if currently executing inside of a Keras graph.""" return call_context().in_keras_graph def is_in_eager_or_tf_function(): """Returns if in eager mode or inside of a tf.function.""" return tf.executing_eagerly() or is_in_tf_function() def is_in_tf_function(): """Returns if inside of a tf.function.""" # Check if running in V1 graph mode. if not tf.compat.v1.executing_eagerly_outside_functions(): return False if not tf.inside_function(): return False # Check if inside Keras FuncGraph. if is_in_keras_graph(): return False # Check for a v1 `wrap_function` FuncGraph. graph = tf.compat.v1.get_default_graph() if (getattr(graph, 'name', False) and graph.name.startswith('wrapped_function')): return False return True def uses_keras_history(tensors): """Check if at least one Tensor originates from a `keras.Input`. This is `True` if at least one Tensor has its origin in a `keras.Input`. Any Tensor that originates from a `keras.Input` will have a dependency Tensor with a `_keras_history` attribute attached. Tensors that have already been checked to not originate from a `keras.Input` are marked as `_keras_history_checked`. Args: tensors: An arbitrary nested structure of Tensors. Returns: Bool, whether at least one Tensor originates from a `keras.Input`. """ checked_tensors = set() tensors_to_check = tf.nest.flatten(tensors) while tensors_to_check: new_tensors_to_check = [] for tensor in tensors_to_check: if id(tensor) in checked_tensors: continue checked_tensors.add(id(tensor)) if getattr(tensor, '_keras_history_checked', None) is not None: continue if getattr(tensor, '_keras_history', None) is not None: return True try: new_tensors_to_check.extend(tensor.op.inputs) except AttributeError: # In case `tensor` is a Variable created in an Eager context. pass tensors_to_check = new_tensors_to_check # Mark that these Tensors have been checked once for `_keras_history`, # and should not be checked again for performance reasons. mark_checked(tensors) return False def mark_checked(tensors): """Marks that these Tensors should not be tracked. This prevents Layers from attempting to create TensorFlowOpLayers for these Tensors. Args: tensors: An arbitrary structure of Tensors. """ def _mark_checked(tensor): tensor._keras_history_checked = True # pylint: disable=protected-access tf.nest.map_structure(_mark_checked, tensors) def call_context(): """Returns currently active `CallContext`.""" call_ctx = getattr(_call_context, 'call_context', None) if call_ctx is None: call_ctx = CallContext() _call_context.call_context = call_ctx return call_ctx # Inject the call_context function to keras_deps to remove the dependency # from TFLite to Keras. tf.__internal__.register_call_context_function(call_context) class CallContext: """Keeps track of properties currently inside a Layer/Model's `call`. Attributes: in_call: Whether currently inside the `call` of a Layer. layer: The `Layer` whose `call` is currently active. inputs: The inputs to the currently active `Layer`. build_graph: Whether currently inside a Graph or FuncGraph. training: Whether currently executing in training or inference mode. saving: Whether currently saving to SavedModel. frozen: Whether currently executing inside a `Layer` with `trainable` set to `False`. in_keras_graph: Whether executing inside the Keras Graph. """ def __init__(self): # Handle `in_call` separately as it is the most-read attr and reading it is # on the hot path. self.in_call = False self._state = { 'layer': None, 'inputs': None, 'build_graph': False, 'training': None, 'saving': None } # TODO(b/150169018): This logic can be replaced after the Functional API # refactor. self._in_keras_graph = False def enter(self, layer, inputs, build_graph, training, saving=None): """Push a Layer and its inputs and state onto the current call context. Args: layer: The `Layer` whose `call` is currently active. inputs: The inputs to the currently active `Layer`. build_graph: Whether currently inside a Graph or FuncGraph. training: Whether currently executing in training or inference mode. saving: Whether currently saving to SavedModel. Returns: Context manager. """ state = { 'layer': layer, 'inputs': inputs, 'build_graph': build_graph, 'training': training, 'saving': saving } return CallContextManager(self, state) @property def layer(self): return self._state['layer'] @property def inputs(self): return self._state['inputs'] @property def build_graph(self): return self._state['build_graph'] @property def training(self): return self._state['training'] @property def saving(self): return self._state['saving'] @property def frozen(self): layer = self._state['layer'] if not layer: return False return not layer.trainable @property def in_keras_graph(self): # Returns True even if in a subgraph of the Keras graph, such as those # created by control flow ops. if tf.executing_eagerly(): return False return (self._in_keras_graph or getattr(backend.get_graph(), 'name', None) == 'keras_graph') class CallContextManager: """Context manager for `CallContext`.""" def __init__(self, call_ctx, state): self._call_ctx = call_ctx self._state = state self._build_graph = state['build_graph'] def __enter__(self): call_ctx = self._call_ctx self._prev_in_call = call_ctx.in_call self._prev_state = call_ctx._state call_ctx.in_call = True call_ctx._state = self._state # TODO(b/150169018): This logic can be removed after the Functional API # refactor. if self._build_graph: self._prev_in_keras_graph = call_ctx._in_keras_graph call_ctx._in_keras_graph = ( call_ctx._in_keras_graph or getattr(backend.get_graph(), 'name', None) == 'keras_graph') def __exit__(self, *exc_info): call_ctx = self._call_ctx call_ctx.in_call = self._prev_in_call call_ctx._state = self._prev_state if self._build_graph: call_ctx._in_keras_graph = self._prev_in_keras_graph def training_arg_passed_to_call(argspec, args, kwargs): """Returns whether a user passed the `training` argument in `__call__`.""" # `argspec.args` starts with ['self', 'inputs'] full_args = dict(zip(argspec.args[2:], args)) full_args.update(kwargs) return 'training' in full_args and full_args['training'] is not None def is_subclassed(layer): """Returns True if the object is a subclassed layer or subclassed model.""" return (layer.__module__.find('keras.engine') == -1 and layer.__module__.find('keras.layers') == -1) def from_saved_model(layer): """Returns whether the layer is loaded from a SavedModel.""" return layer.__module__.find('keras.saving.saved_model') != -1 def check_graph_consistency(tensor=None, method='add_loss', force_raise=False): """Checks that tensors passed to `add_*` method match the Keras graph. When one of the `add_*` method is called inside a V2 conditional branch, the underlying tensor gets created in a FuncGraph managed by control_flow_v2. We need to raise clear error messages in such cases. Args: tensor: Tensor to check, or `False` if it is known that an error should be raised. method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}. force_raise: If an error should be raised regardless of `tensor`. Raises: RuntimeError: In case of an out-of-graph tensor. """ if (force_raise or (tf.compat.v1.executing_eagerly_outside_functions() and hasattr(tensor, 'graph') and tensor.graph.is_control_flow_graph)): if method == 'activity_regularizer': bad_example = """ class TestModel(tf.keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2') def call(self, x, training=None): if training: return self.dense(x) else: return self.dense(x) """ correct_example = """ class TestModel(tf.keras.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2') def call(self, x, training=None): return self.dense(x) """ raise RuntimeError( 'You are using a layer with `activity_regularizer` in a control flow ' 'branch, e.g.:\n{bad_example}\nThis is currently not supported. ' 'Please move your call to the layer with `activity_regularizer` out ' 'of the control flow branch, e.g.:\n{correct_example}\n' 'You can also resolve this by marking your outer model/layer dynamic' ' (eager-only) by passing `dynamic=True` to the layer constructor. ' 'Any kind of control flow is supported with dynamic layers. ' 'Note that using `dynamic=True` requires you to implement static ' 'shape inference in the `compute_output_shape(input_shape)` ' 'method.'.format( bad_example=bad_example, correct_example=correct_example)) if method == 'add_metric': bad_example = """ def call(self, inputs, training=None): if training: metric = compute_metric(inputs) self.add_metric(metric, name='my_metric', aggregation='mean') return inputs """ correct_example = """ def call(self, inputs, training=None): if training: metric = compute_metric(inputs) else: metric = 0. self.add_metric(metric, name='my_metric', aggregation='mean') return inputs """ elif method == 'add_loss': bad_example = """ def call(self, inputs, training=None): if training: loss = compute_loss(inputs) self.add_loss(loss) return inputs """ correct_example = """ def call(self, inputs, training=None): if training: loss = compute_loss(inputs) else: loss = 0. self.add_loss(loss) return inputs """ else: bad_example = """ def call(self, inputs, training=None): if training: self.add_update(self.w.assign_add(1)) return inputs """ correct_example = """ def call(self, inputs, training=None): if training: increment = 1 else: increment = 0 self.add_update(self.w.assign_add(increment)) return inputs """ raise RuntimeError( 'You are using the method `{method}` in a control flow branch ' 'in your layer, e.g.:\n{bad_example}\n' 'This is not currently supported. ' 'Please move your call to {method} out of the control flow branch, ' 'e.g.:\n{correct_example}\n' 'You can also resolve this by marking your layer ' 'as dynamic (eager-only) by passing ' '`dynamic=True` to the layer constructor. ' 'Any kind of control flow is supported with dynamic layers. ' 'Note that using `dynamic=True` requires you ' 'to implement static shape inference ' 'in the `compute_output_shape(input_shape)` method.'.format( method=method, bad_example=bad_example, correct_example=correct_example)) def mark_as_return(outputs, acd): """Marks `outputs` as the return values for automatic control deps.""" def _mark_as_return(tensor): """Marks `tensor` as the return value for automatic control deps.""" if not tf.is_tensor(tensor): return tensor # pylint: disable=protected-access return_tensor = acd.mark_as_return(tensor) if getattr(tensor, '_keras_mask', None) is not None: return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask) else: return_tensor._keras_mask = None # Handle TensorFlow Probability attached metadata. # TODO(b/132076537): Remove this once TFP uses `CompositeTensor`. if getattr(tensor, '_tfp_distribution', None) is not None: return_tensor._tfp_distribution = tensor._tfp_distribution return return_tensor # pylint: enable=protected-access return tf.nest.map_structure(_mark_as_return, outputs) V2_DTYPE_BEHAVIOR = None @keras_export(v1=['keras.layers.enable_v2_dtype_behavior']) def enable_v2_dtype_behavior(): """Enable the V2 dtype behavior for Keras layers. By default, the V2 dtype behavior is enabled in TensorFlow 2, so this function is only useful if `tf.compat.v1.disable_v2_behavior` has been called. Since mixed precision requires V2 dtype behavior to be enabled, this function allows you to use mixed precision in Keras layers if `disable_v2_behavior` has been called. When enabled, the dtype of Keras layers defaults to floatx (which is typically float32) instead of None. In addition, layers will automatically cast floating-point inputs to the layer's dtype. >>> x = tf.ones((4, 4, 4, 4), dtype='float64') >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) >>> print(layer.dtype) # float32 since V2 dtype behavior is enabled float32 >>> y = layer(x) # Layer casts inputs since V2 dtype behavior is enabled >>> print(y.dtype.name) float32 A layer author can opt-out their layer from the automatic input casting by passing `autocast=False` to the base Layer's constructor. This disables the autocasting part of the V2 behavior for that layer, but not the defaulting to floatx part of the V2 behavior. When a global `tf.keras.mixed_precision.Policy` is set, a Keras layer's dtype will default to the global policy instead of floatx. Layers will automatically cast inputs to the policy's compute_dtype. """ global V2_DTYPE_BEHAVIOR V2_DTYPE_BEHAVIOR = True @keras_export(v1=['keras.layers.disable_v2_dtype_behavior']) def disable_v2_dtype_behavior(): """Disables the V2 dtype behavior for Keras layers. See `tf.compat.v1.keras.layers.enable_v2_dtype_behavior`. """ global V2_DTYPE_BEHAVIOR V2_DTYPE_BEHAVIOR = False def v2_dtype_behavior_enabled(): """Returns True if the V2 dtype behavior is enabled.""" if V2_DTYPE_BEHAVIOR is None: return tf.__internal__.tf2.enabled() return V2_DTYPE_BEHAVIOR class TrackableWeightHandler: """Keras wrapper for handling tracking.Trackable object saving and restoring. This class handles Trackables in both V1 and V2 modes, ensuring that they can be saved and restored with the correct data and without adding additional ops on every save. Attributes: trackable: The trackable to wrap. num_tensors: The number of tensors that this trackable requires for saving. """ def __init__(self, trackable): if not isinstance(trackable, tf.__internal__.tracking.Trackable): raise ValueError(f'{trackable} is not a Trackable object.') self._trackable = trackable self._distribute_strategy = tf.distribute.get_strategy() # TODO(b/141682913): Figure out why this is private and fix it. saveables = trackable._gather_saveables_for_checkpoint().values() # pylint: disable=protected-access # 'Saveables' won't exist when we're passed a legacy TF1 table like # a StaticHashTable. if not saveables: self._num_tensors = 0 self._setter = lambda weights: None self._getter = lambda: [] elif len(saveables) == 1: saveable = list(saveables)[0] if tf.compat.v1.executing_eagerly_outside_functions(): # If we're in eager mode, we need to defer calling the Trackable's # saveable() callable until data export time. # However, it is safe to call the saveable as many times as we want, so # we will call it now to figure out how many tensors this Trackable will # produce. self._saveable = saveable self._num_tensors = len(self._saveable().specs) self._setter = lambda weights: self._saveable().restore(weights, None) self._getter = lambda: [spec.tensor for spec in self._saveable().specs] else: # If we're in Graph mode, we need to evaluate the Saveable only once and # cache the resulting restore graph. Failing to do this will result in # new assignment ops being added to the graph each time set_weights() is # called. self._placeholder_tensors = [] self._saveable = saveable() self._num_tensors = len(self._saveable.specs) for spec in self._saveable.specs: tensor = spec.tensor self._placeholder_tensors.append( tf.compat.v1.placeholder(tensor.dtype, tensor.shape)) self._assign_op = self._saveable.restore(self._placeholder_tensors, None) self._setter = self._set_weights_v1 self._getter = lambda: [spec.tensor for spec in self._saveable.specs] else: raise ValueError( 'Only Trackables with one Saveable are supported. The Trackable ' f'{trackable} has {len(saveables)} Saveables.') @property def num_tensors(self): return self._num_tensors def set_weights(self, weights): if len(weights) != self._num_tensors: raise ValueError( f'Weight handler for trackable {self._trackable} received ' 'an incorrect number of weights: ' f'expected {self._num_tensors} weights, got {len(weights)} weights.') self._setter(weights) def get_tensors(self): return self._getter() def _set_weights_v1(self, weights): feed_dict = {} for idx, tensor in enumerate(weights): feed_dict[self._placeholder_tensors[idx]] = tensor backend.get_session().run(self._assign_op, feed_dict) def no_ragged_support(inputs, layer_name): input_list = tf.nest.flatten(inputs) if any(isinstance(x, tf.RaggedTensor) for x in input_list): raise ValueError( f'Layer {layer_name} does not support RaggedTensors as input. ' f'Inputs received: {inputs}. You can try converting your ' 'input to a dense (uniform) tensor.') def is_split_variable(v): """Returns True if `v` is either a PartionedVariable or a ShardedVariable.""" return hasattr(v, '_variable_list') or hasattr(v, '_variables') def has_weights(obj): obj_type = type(obj) return (hasattr(obj_type, 'trainable_weights') and hasattr(obj_type, 'non_trainable_weights') and not isinstance(obj, type)) # TODO(kathywu): This is a temporary hack. When a network of layers is revived # from SavedModel, only the top-level layer will have losses. This causes issues # in eager mode because the child layers may have graph losses # (thus model.losses returns a mix of Eager and graph tensors). To fix this, # whenever eager losses are added to one layer, add eager losses to all # child layers. This causes `.losses` to only return eager losses. REVIVED_LOSS_PLACEHOLDER = ( 'This layer\'s losses have been added to the parent layer.')
32,431
35.687783
105
py
keras
keras-master/keras/engine/compile_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for `Model.compile`.""" import tensorflow.compat.v2 as tf import copy from keras import losses as losses_mod from keras import metrics as metrics_mod from keras.utils import generic_utils from keras.utils import losses_utils from keras.utils import tf_utils class Container: """Base Container class.""" def __init__(self, output_names=None): self._output_names = output_names def build(self, y_pred): if self._output_names is None: # In Subclass API, output names like 'output_1' are used for # `Metric` names. self._output_names = create_pseudo_output_names(y_pred) def _conform_to_outputs(self, outputs, struct): """Convenience method to conform `struct` to `outputs` structure. Mappings performed: (1) Map a dict to a list of outputs, using the output names. (2) Fill missing keys in a dict w/ `None`s. (3) Map a single item to all outputs. Args: outputs: Model predictions. struct: Arbitrary nested structure (e.g. of labels, sample_weights, losses, or metrics). Returns: Mapping of `struct` to `outputs` structure. """ struct = map_to_output_names(outputs, self._output_names, struct) struct = map_missing_dict_keys(outputs, struct) # Allow passing one object that applies to all outputs. if not tf.nest.is_nested(struct) and tf.nest.is_nested(outputs): struct = tf.nest.map_structure(lambda _: struct, outputs) return struct def _maybe_broadcast_to_outputs(self, outputs, objects): """Determines if losses / metrics should be applied to all outputs. NOTE: This method should only be called for Metrics / Losses, not for y_true / sample_weight. Args: outputs: Model predictions. objects: Arbitrary nested structure (e.g. of losses or metrics) Returns: Arbitrary nested structure of objects, maybe copied to each output. Applies a Loss / Metric to all outputs. """ if not self._should_broadcast(objects): return objects # When there is more than one Model output, this is needed to keep # each Metric / Loss separate. When there is only one Model output, # the user-supplied object should be used. should_copy_objects = len(tf.nest.flatten(outputs)) > 1 def _broadcast_fn(): if should_copy_objects: return tf.nest.map_structure(self._copy_object, objects) return objects return tf.nest.map_structure(lambda _: _broadcast_fn(), outputs) def _should_broadcast(self, objects): raise NotImplementedError def _copy_object(self, obj): raise NotImplementedError class LossesContainer(Container): """A container class for losses passed to `Model.compile`.""" def __init__(self, losses, loss_weights=None, output_names=None): super(LossesContainer, self).__init__(output_names=output_names) # Keep user-supplied values untouched for recompiling and serialization. self._user_losses = losses self._user_loss_weights = loss_weights self._losses = losses self._loss_weights = loss_weights self._per_output_metrics = None # Per-output losses become metrics. self._loss_metric = metrics_mod.Mean(name='loss') # Total loss. self._built = False @property def metrics(self): """Per-output loss metrics.""" if not self._built: return [] per_output_metrics = [ metric_obj for metric_obj in tf.nest.flatten(self._per_output_metrics) if metric_obj is not None ] return [self._loss_metric] + per_output_metrics def build(self, y_pred): """One-time setup of loss objects.""" super(LossesContainer, self).build(y_pred) self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses) self._losses = self._conform_to_outputs(y_pred, self._losses) self._losses = tf.nest.map_structure(self._get_loss_object, self._losses) self._losses = tf.nest.flatten(self._losses) self._loss_weights = self._maybe_broadcast_to_outputs( y_pred, self._loss_weights) self._loss_weights = self._conform_to_outputs(y_pred, self._loss_weights) self._loss_weights = tf.nest.flatten(self._loss_weights) self._create_metrics() self._built = True @property def built(self): return self._built def _create_metrics(self): """Creates per-output loss metrics, but only for multi-output Models.""" if len(self._output_names) == 1: self._per_output_metrics = [None] else: self._per_output_metrics = [] for loss_obj, output_name in zip(self._losses, self._output_names): if loss_obj is None: self._per_output_metrics.append(None) else: self._per_output_metrics.append( metrics_mod.Mean(output_name + '_loss')) def __call__(self, y_true, y_pred, sample_weight=None, regularization_losses=None): """Computes the overall loss. Args: y_true: An arbitrary structure of Tensors representing the ground truth. y_pred: An arbitrary structure of Tensors representing a Model's outputs. sample_weight: An arbitrary structure of Tensors representing the per-sample loss weights. If one Tensor is passed, it is used for all losses. If multiple Tensors are passed, the structure should match `y_pred`. regularization_losses: Additional losses to be added to the total loss. Returns: Tuple of `(total_loss, per_output_loss_list)` """ y_true = self._conform_to_outputs(y_pred, y_true) sample_weight = self._conform_to_outputs(y_pred, sample_weight) if not self._built: self.build(y_pred) y_pred = tf.nest.flatten(y_pred) y_true = tf.nest.flatten(y_true) sample_weight = tf.nest.flatten(sample_weight) loss_values = [] # Used for gradient calculation. loss_metric_values = [] # Used for loss metric calculation. batch_dim = None zip_args = (y_true, y_pred, sample_weight, self._losses, self._loss_weights, self._per_output_metrics) for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args): if y_t is None or loss_obj is None: # Ok to have no loss for an output. continue y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw) sw = apply_mask(y_p, sw, get_mask(y_p)) loss_value = loss_obj(y_t, y_p, sample_weight=sw) loss_metric_value = loss_value # Correct for the `Mean` loss metrics counting each replica as a batch. if loss_obj.reduction == losses_utils.ReductionV2.SUM: loss_metric_value *= tf.distribute.get_strategy().num_replicas_in_sync if batch_dim is None: if tf_utils.is_ragged(y_t): batch_dim = y_t.nrows() else: batch_dim = tf.shape(y_t)[0] if metric_obj is not None: metric_obj.update_state(loss_metric_value, sample_weight=batch_dim) if loss_weight is not None: loss_value *= loss_weight loss_metric_value *= loss_weight if (loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or loss_obj.reduction == losses_utils.ReductionV2.AUTO): loss_value = losses_utils.scale_loss_for_distribution(loss_value) loss_values.append(loss_value) loss_metric_values.append(loss_metric_value) if regularization_losses: regularization_losses = losses_utils.cast_losses_to_common_dtype( regularization_losses) reg_loss = tf.add_n(regularization_losses) loss_metric_values.append(reg_loss) loss_values.append(losses_utils.scale_loss_for_distribution(reg_loss)) if loss_values: loss_metric_values = losses_utils.cast_losses_to_common_dtype( loss_metric_values) total_loss_metric_value = tf.add_n(loss_metric_values) self._loss_metric.update_state( total_loss_metric_value, sample_weight=batch_dim) loss_values = losses_utils.cast_losses_to_common_dtype(loss_values) total_loss = tf.add_n(loss_values) return total_loss else: # Ok for a model to have no compiled loss. return tf.zeros(shape=()) def reset_state(self): """Resets the state of loss metrics.""" if not self._built: return metrics = [self._loss_metric] + tf.nest.flatten(self._per_output_metrics) for metric_obj in metrics: if metric_obj is not None: metric_obj.reset_state() def _get_loss_object(self, loss): """Returns a `Loss` object. Converts the user-supplied loss to a `Loss` object. Also allows `SUM_OVER_BATCH_SIZE` reduction to be used for this loss. Args: loss: A string, function, or `Loss` object. Returns: A `Loss` object. """ if loss is None: return None # Ok to have no loss for an output. loss = losses_mod.get(loss) if not isinstance(loss, losses_mod.Loss): loss_name = get_custom_object_name(loss) if loss_name is None: raise ValueError( f'Loss should be a callable, received: {loss}') loss = losses_mod.LossFunctionWrapper(loss, name=loss_name) loss._allow_sum_over_batch_size = True # pylint: disable=protected-access return loss def _should_broadcast(self, obj): return not tf.nest.is_nested(obj) def _copy_object(self, obj): return obj # Losses don't need to be copied. class MetricsContainer(Container): """A container class for metrics passed to `Model.compile`.""" def __init__(self, metrics=None, weighted_metrics=None, output_names=None, from_serialized=False): """Initializes a container for metrics. Arguments: metrics: see the `metrics` argument from `tf.keras.Model.compile`. weighted_metrics: see the `weighted_metrics` argument from `tf.keras.Model.compile`. output_names: A list of strings of names of outputs for the model. from_serialized: Whether the model being compiled is from a serialized model. Used to avoid redundantly applying pre-processing renaming steps. """ super(MetricsContainer, self).__init__(output_names=output_names) # Keep user-supplied values untouched for recompiling and serialization. self._user_metrics = metrics self._user_weighted_metrics = weighted_metrics self._metrics = metrics self._weighted_metrics = weighted_metrics self._built = False self._from_serialized = from_serialized @property def metrics(self): """All metrics in this container.""" if not self._built: return [] return self._metrics_in_order @property def unweighted_metrics(self): """Metrics in this container that should not be passed `sample_weight`.""" if not self._built: return None return tf.nest.flatten(self._metrics) @property def weighted_metrics(self): """Metrics in this container that should be passed `sample_weight`.""" if not self._built: return None return tf.nest.flatten(self._weighted_metrics) def build(self, y_pred, y_true): """One-time setup of metric objects.""" super(MetricsContainer, self).build(y_pred) self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics) self._metrics = self._conform_to_outputs(y_pred, self._metrics) self._weighted_metrics = self._maybe_broadcast_to_outputs( y_pred, self._weighted_metrics) self._weighted_metrics = self._conform_to_outputs(y_pred, self._weighted_metrics) # Standardize on tuple since `tf.data` turns lists into `Tensor`s. y_pred = tf.__internal__.nest.list_to_tuple(y_pred) y_true = tf.__internal__.nest.list_to_tuple(y_true) self._metrics = tf.__internal__.nest.list_to_tuple(self._metrics) self._weighted_metrics = tf.__internal__.nest.list_to_tuple(self._weighted_metrics) # Convert to `Metric` objects, potentially disambiguating based on output # properties. self._metrics = tf.__internal__.nest.map_structure_up_to(y_pred, self._get_metric_objects, self._metrics, y_true, y_pred) self._weighted_metrics = tf.__internal__.nest.map_structure_up_to(y_pred, self._get_metric_objects, self._weighted_metrics, y_true, y_pred) self._metrics = tf.__internal__.nest.flatten_up_to(y_pred, self._metrics, check_types=False) self._weighted_metrics = tf.__internal__.nest.flatten_up_to( y_pred, self._weighted_metrics, check_types=False) # Assumes metrics, weighted_metrics have been flattened up to outputs. # # If we are loading a model that has been already serialized, we do not # want to re-apply any pre-processing metric renaming steps. if not self._from_serialized: self._set_metric_names() self._create_ordered_metrics() self._built = True @property def built(self): return self._built def _set_metric_names(self): """Sets unique metric names.""" # For multi-output models, prepend the output name to the metric name. # For weighted metrics, prepend "weighted_" if the name would be non-unique. # pylint: disable=protected-access metric_names = set() is_multi_output = len(self._output_names) > 1 zip_args = (self._output_names, self._metrics, self._weighted_metrics) for output_name, output_metrics, weighted_output_metrics in zip(*zip_args): for m in output_metrics: if m is None: continue if is_multi_output: m._name = output_name + '_' + m._name if m._name in metric_names: raise ValueError( f'Found two metrics with the same name: {m._name}.' 'All the metrics added to the model need to have unique names.') metric_names.add(m._name) for wm in weighted_output_metrics: if wm is None: continue if is_multi_output: if output_name + '_' + wm._name in metric_names: wm._name = output_name + '_weighted_' + wm._name else: wm._name = output_name + '_' + wm._name elif wm._name in metric_names: wm._name = 'weighted_' + wm._name if wm._name in metric_names: raise ValueError( f'Found two weighted metrics with the same name: {wm._name}.' 'All the metrics added to the model need to have unique names.') metric_names.add(wm._name) # pylint: enable=protected-access def _create_ordered_metrics(self): """Cache the flat order needed when returning metrics, for backwards compat.""" self._metrics_in_order = [] for output_metrics, output_weighted_metrics in zip(self._metrics, self._weighted_metrics): for m in tf.nest.flatten(output_metrics): if m is not None: self._metrics_in_order.append(m) for wm in tf.nest.flatten(output_weighted_metrics): if wm is not None: self._metrics_in_order.append(wm) def update_state(self, y_true, y_pred, sample_weight=None): """Updates the state of per-output metrics.""" y_true = self._conform_to_outputs(y_pred, y_true) sample_weight = self._conform_to_outputs(y_pred, sample_weight) if not self._built: self.build(y_pred, y_true) y_pred = tf.nest.flatten(y_pred) y_true = tf.nest.flatten(y_true) if y_true is not None else [] sample_weight = tf.nest.flatten(sample_weight) zip_args = (y_true, y_pred, sample_weight, self._metrics, self._weighted_metrics) for y_t, y_p, sw, metric_objs, weighted_metric_objs in zip(*zip_args): # Ok to have no metrics for an output. if (y_t is None or (all(m is None for m in metric_objs) and all(wm is None for wm in weighted_metric_objs))): continue y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw) mask = get_mask(y_p) sw = apply_mask(y_p, sw, mask) for metric_obj in metric_objs: if metric_obj is None: continue metric_obj.update_state(y_t, y_p, sample_weight=mask) for weighted_metric_obj in weighted_metric_objs: if weighted_metric_obj is None: continue weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw) def reset_state(self): """Resets the state of all `Metric`s in this container.""" if self._built: metrics = self._metrics_in_order else: # If the user supplied `Metric` objects directly, we should # reset those. This could also contain `str`s or `function`s # though. metrics = tf.nest.flatten(self._user_metrics) + tf.nest.flatten( self._user_weighted_metrics) for metric_obj in metrics: if isinstance(metric_obj, metrics_mod.Metric): metric_obj.reset_state() def _get_metric_objects(self, metrics, y_t, y_p): """Convert user-supplied metrics to `Metric` objects.""" metrics = tf.nest.flatten(metrics) return [self._get_metric_object(m, y_t, y_p) for m in metrics] def _get_metric_object(self, metric, y_t, y_p): """Converts user-supplied metric to a `Metric` object. Args: metric: A string, function, or `Metric` object. y_t: Sample of label. y_p: Sample of output. Returns: A `Metric` object. """ if metric is None: return None # Ok to have no metric for an output. # Convenience feature for selecting b/t binary, categorical, # and sparse categorical. if str(metric).lower() not in ['accuracy', 'acc', 'crossentropy', 'ce']: metric_obj = metrics_mod.get(metric) else: y_t_rank = len(y_t.shape.as_list()) y_p_rank = len(y_p.shape.as_list()) y_t_last_dim = y_t.shape.as_list()[-1] y_p_last_dim = y_p.shape.as_list()[-1] is_binary = y_p_last_dim == 1 is_sparse_categorical = ( y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1) if str(metric).lower() in ['accuracy', 'acc']: if is_binary: metric_obj = metrics_mod.binary_accuracy elif is_sparse_categorical: metric_obj = metrics_mod.sparse_categorical_accuracy else: metric_obj = metrics_mod.categorical_accuracy else: if is_binary: metric_obj = metrics_mod.binary_crossentropy elif is_sparse_categorical: metric_obj = metrics_mod.sparse_categorical_crossentropy else: metric_obj = metrics_mod.categorical_crossentropy if isinstance(metric_obj, losses_mod.Loss): metric_obj._allow_sum_over_batch_size = True # pylint: disable=protected-access if not isinstance(metric_obj, metrics_mod.Metric): if isinstance(metric, str): metric_name = metric else: metric_name = get_custom_object_name(metric) if metric_name is None: raise ValueError( f'Metric should be a callable, received: {metric}') metric_obj = metrics_mod.MeanMetricWrapper(metric_obj, name=metric_name) return metric_obj def _should_broadcast(self, obj): # e.g. 'mse'. if not tf.nest.is_nested(obj): return True # e.g. ['mse'] or ['mse', 'mae']. return (isinstance(obj, (list, tuple)) and not any(tf.nest.is_nested(o) for o in obj)) def _copy_object(self, obj): if isinstance(obj, metrics_mod.Metric): return obj.__class__.from_config(obj.get_config()) return obj # Can be a function or `None`. def create_pseudo_output_names(outputs): """Create pseudo output names for a subclassed Model.""" return _create_pseudo_names(outputs, prefix='output_') def create_pseudo_input_names(inputs): """Create pseudo input names for a subclassed Model.""" return _create_pseudo_names(inputs, prefix='input_') def _create_pseudo_names(tensors, prefix): """Creates pseudo {input | output} names for subclassed Models. Warning: this function should only be used to define default names for `Metics` and `SavedModel`. No other use cases should rely on a `Model`'s input or output names. Example with dict: `{'a': [x1, x2], 'b': x3}` becomes: `['a_1', 'a_2', 'b']` Example with list: `[x, y]` becomes: `['output_1', 'output_2']` Args: tensors: `Model`'s outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. Returns: Flattened list of pseudo names. """ def one_index(ele): # Start with "output_1" instead of "output_0". if isinstance(ele, int): return ele + 1 return ele flat_paths = list(tf.__internal__.nest.yield_flat_paths(tensors)) flat_paths = tf.nest.map_structure(one_index, flat_paths) names = [] for path in flat_paths: if not path: name = prefix + '1' # Single output. else: name = '_'.join(str(p) for p in path) if isinstance(path[0], int): name = prefix + name names.append(name) return names def map_to_output_names(y_pred, output_names, struct): """Maps a dict to a list using `output_names` as keys. This is a convenience feature only. When a `Model`'s outputs are a list, you can specify per-output losses and metrics as a dict, where the keys are the output names. If you specify per-output losses and metrics via the same structure as the `Model`'s outputs (recommended), no mapping is performed. For the Functional API, the output names are the names of the last layer of each output. For the Subclass API, the output names are determined by `create_pseudo_output_names` (For example: `['output_1', 'output_2']` for a list of outputs). This mapping preserves backwards compatibility for `compile` and `fit`. Args: y_pred: Sample outputs of the Model, to determine if this convenience feature should be applied (`struct` is returned unmodified if `y_pred` isn't a flat list). output_names: List. The names of the outputs of the Model. struct: The structure to map. Returns: `struct` mapped to a list in same order as `output_names`. """ single_output = not tf.nest.is_nested(y_pred) outputs_are_flat_list = (not single_output and isinstance(y_pred, (list, tuple)) and not any(tf.nest.is_nested(y_p) for y_p in y_pred)) if (single_output or outputs_are_flat_list) and isinstance(struct, dict): output_names = output_names or create_pseudo_output_names(y_pred) struct = copy.copy(struct) new_struct = [struct.pop(name, None) for name in output_names] if struct: raise ValueError( 'Found unexpected losses or metrics that do not correspond ' f'to any Model output: {struct.keys()}. ' f'Valid mode output names: {output_names}. ' f'Received struct is: {struct}.') if len(new_struct) == 1: return new_struct[0] return new_struct else: return struct def map_missing_dict_keys(y_pred, struct): """Replaces missing dict keys in `struct` with `None` placeholders.""" if not isinstance(y_pred, dict) or not isinstance(struct, dict): return struct for k in y_pred.keys(): if k not in struct: struct[k] = None return struct def match_dtype_and_rank(y_t, y_p, sw): """Match dtype and rank of predictions.""" if y_t.shape.rank == 1 and y_p.shape.rank == 2: y_t = tf.expand_dims(y_t, axis=-1) if sw is not None: if sw.shape.rank == 1 and y_p.shape.rank == 2: sw = tf.expand_dims(sw, axis=-1) # Dtype. # This is required mainly for custom loss functions which do not take care # casting dtypes. if ((y_t.dtype.is_floating and y_p.dtype.is_floating) or (y_t.dtype.is_integer and y_p.dtype.is_integer)): y_t = tf.cast(y_t, y_p.dtype) if sw is not None: sw = tf.cast(sw, y_p.dtype) return y_t, y_p, sw def get_mask(y_p): """Returns Keras mask from tensor.""" return getattr(y_p, '_keras_mask', None) def apply_mask(y_p, sw, mask): """Applies any mask on predictions to sample weights.""" if mask is not None: mask = tf.cast(mask, y_p.dtype) if sw is not None: mask, _, sw = ( losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=sw)) sw *= mask else: sw = mask return sw def get_custom_object_name(obj): """Returns the name to use for a custom loss or metric callable. Args: obj: Custom loss of metric callable Returns: Name to use, or `None` if the object was not recognized. """ if hasattr(obj, 'name'): # Accept `Loss` instance as `Metric`. return obj.name elif hasattr(obj, '__name__'): # Function. return obj.__name__ elif hasattr(obj, '__class__'): # Class instance. return generic_utils.to_snake_case(obj.__class__.__name__) else: # Unrecognized object. return None
25,665
34.207133
96
py
keras
keras-master/keras/engine/feature_columns_integration_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests specific to Feature Columns integration.""" import tensorflow.compat.v2 as tf import numpy as np import keras from keras import keras_parameterized from keras import metrics as metrics_module from keras import testing_utils from keras.feature_column import dense_features as df from keras.utils import np_utils class TestDNNModel(keras.models.Model): def __init__(self, feature_columns, units, name=None, **kwargs): super(TestDNNModel, self).__init__(name=name, **kwargs) self._input_layer = df.DenseFeatures(feature_columns, name='input_layer') self._dense_layer = keras.layers.Dense(units, name='dense_layer') def call(self, features): net = self._input_layer(features) net = self._dense_layer(net) return net class FeatureColumnsIntegrationTest(keras_parameterized.TestCase): """Most Sequential model API tests are covered in `training_test.py`. """ @keras_parameterized.run_all_keras_modes def test_sequential_model(self): columns = [tf.feature_column.numeric_column('a')] model = keras.models.Sequential([ df.DenseFeatures(columns), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(20, activation='softmax') ]) model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) x = {'a': np.random.random((10, 1))} y = np.random.randint(20, size=(10, 1)) y = np_utils.to_categorical(y, num_classes=20) model.fit(x, y, epochs=1, batch_size=5) model.fit(x, y, epochs=1, batch_size=5) model.evaluate(x, y, batch_size=5) model.predict(x, batch_size=5) @keras_parameterized.run_all_keras_modes def test_sequential_model_with_ds_input(self): columns = [tf.feature_column.numeric_column('a')] model = keras.models.Sequential([ df.DenseFeatures(columns), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(20, activation='softmax') ]) model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) y = np.random.randint(20, size=(100, 1)) y = np_utils.to_categorical(y, num_classes=20) x = {'a': np.random.random((100, 1))} ds1 = tf.data.Dataset.from_tensor_slices(x) ds2 = tf.data.Dataset.from_tensor_slices(y) ds = tf.data.Dataset.zip((ds1, ds2)).batch(5) model.fit(ds, steps_per_epoch=1) model.fit(ds, steps_per_epoch=1) model.evaluate(ds, steps=1) model.predict(ds, steps=1) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_sequential_model_with_crossed_column(self): feature_columns = [] age_buckets = tf.feature_column.bucketized_column( tf.feature_column.numeric_column('age'), boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) feature_columns.append(age_buckets) # indicator cols thal = tf.feature_column.categorical_column_with_vocabulary_list( 'thal', ['fixed', 'normal', 'reversible']) crossed_feature = tf.feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000) crossed_feature = tf.feature_column.indicator_column(crossed_feature) feature_columns.append(crossed_feature) feature_layer = df.DenseFeatures(feature_columns) model = keras.models.Sequential([ feature_layer, keras.layers.Dense(128, activation='relu'), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) age_data = np.random.randint(10, 100, size=100) thal_data = np.random.choice(['fixed', 'normal', 'reversible'], size=100) inp_x = {'age': age_data, 'thal': thal_data} inp_y = np.random.randint(0, 1, size=100) ds = tf.data.Dataset.from_tensor_slices((inp_x, inp_y)).batch(5) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'],) model.fit(ds, epochs=1) model.fit(ds, epochs=1) model.evaluate(ds) model.predict(ds) @keras_parameterized.run_all_keras_modes def test_subclassed_model_with_feature_columns(self): col_a = tf.feature_column.numeric_column('a') col_b = tf.feature_column.numeric_column('b') dnn_model = TestDNNModel([col_a, col_b], 20) dnn_model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))} y = np.random.randint(20, size=(10, 1)) y = np_utils.to_categorical(y, num_classes=20) dnn_model.fit(x=x, y=y, epochs=1, batch_size=5) dnn_model.fit(x=x, y=y, epochs=1, batch_size=5) dnn_model.evaluate(x=x, y=y, batch_size=5) dnn_model.predict(x=x, batch_size=5) @keras_parameterized.run_all_keras_modes def test_subclassed_model_with_feature_columns_with_ds_input(self): col_a = tf.feature_column.numeric_column('a') col_b = tf.feature_column.numeric_column('b') dnn_model = TestDNNModel([col_a, col_b], 20) dnn_model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) y = np.random.randint(20, size=(100, 1)) y = np_utils.to_categorical(y, num_classes=20) x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))} ds1 = tf.data.Dataset.from_tensor_slices(x) ds2 = tf.data.Dataset.from_tensor_slices(y) ds = tf.data.Dataset.zip((ds1, ds2)).batch(5) dnn_model.fit(ds, steps_per_epoch=1) dnn_model.fit(ds, steps_per_epoch=1) dnn_model.evaluate(ds, steps=1) dnn_model.predict(ds, steps=1) # TODO(kaftan) seems to throw an error when enabled. @keras_parameterized.run_all_keras_modes def DISABLED_test_function_model_feature_layer_input(self): col_a = tf.feature_column.numeric_column('a') col_b = tf.feature_column.numeric_column('b') feature_layer = df.DenseFeatures([col_a, col_b], name='fc') dense = keras.layers.Dense(4) # This seems problematic.... We probably need something for DenseFeatures # the way Input is for InputLayer. output = dense(feature_layer) model = keras.models.Model([feature_layer], [output]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=[metrics_module.CategoricalAccuracy(), 'mae'], loss_weights=loss_weights) data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20)) model.fit(*data, epochs=1) # TODO(kaftan) seems to throw an error when enabled. @keras_parameterized.run_all_keras_modes def DISABLED_test_function_model_multiple_feature_layer_inputs(self): col_a = tf.feature_column.numeric_column('a') col_b = tf.feature_column.numeric_column('b') col_c = tf.feature_column.numeric_column('c') fc1 = df.DenseFeatures([col_a, col_b], name='fc1') fc2 = df.DenseFeatures([col_b, col_c], name='fc2') dense = keras.layers.Dense(4) # This seems problematic.... We probably need something for DenseFeatures # the way Input is for InputLayer. output = dense(fc1) + dense(fc2) model = keras.models.Model([fc1, fc2], [output]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=[metrics_module.CategoricalAccuracy(), 'mae'], loss_weights=loss_weights) data_list = ([{ 'a': np.arange(10), 'b': np.arange(10) }, { 'b': np.arange(10), 'c': np.arange(10) }], np.arange(10, 100)) model.fit(*data_list, epochs=1) data_bloated_list = ([{ 'a': np.arange(10), 'b': np.arange(10), 'c': np.arange(10) }, { 'a': np.arange(10), 'b': np.arange(10), 'c': np.arange(10) }], np.arange(10, 100)) model.fit(*data_bloated_list, epochs=1) data_dict = ({ 'fc1': { 'a': np.arange(10), 'b': np.arange(10) }, 'fc2': { 'b': np.arange(10), 'c': np.arange(10) } }, np.arange(10, 100)) model.fit(*data_dict, epochs=1) data_bloated_dict = ({ 'fc1': { 'a': np.arange(10), 'b': np.arange(10), 'c': np.arange(10) }, 'fc2': { 'a': np.arange(10), 'b': np.arange(10), 'c': np.arange(10) } }, np.arange(10, 100)) model.fit(*data_bloated_dict, epochs=1) @keras_parameterized.run_all_keras_modes def test_string_input(self): x = {'age': np.random.random((1024, 1)), 'cabin': np.array(['a'] * 1024)} y = np.random.randint(2, size=(1024, 1)) ds1 = tf.data.Dataset.from_tensor_slices(x) ds2 = tf.data.Dataset.from_tensor_slices(y) dataset = tf.data.Dataset.zip((ds1, ds2)).batch(4) categorical_cols = [tf.feature_column.categorical_column_with_hash_bucket('cabin', 10)] feature_cols = ([tf.feature_column.numeric_column('age')] + [tf.feature_column.indicator_column(cc) for cc in categorical_cols]) layers = [df.DenseFeatures(feature_cols), keras.layers.Dense(128), keras.layers.Dense(1)] model = keras.models.Sequential(layers) model.compile(optimizer='sgd', loss=keras.losses.BinaryCrossentropy()) model.fit(dataset) if __name__ == '__main__': tf.test.main()
10,385
33.62
91
py
keras
keras-master/keras/engine/training_gpu_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from keras import backend from keras import combinations from keras import testing_utils from keras.engine import input_layer from keras.engine import training from keras.layers.convolutional import Conv2D class TrainingGPUTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_model_with_crossentropy_losses_channels_first(self): """Tests use of all crossentropy losses with `channels_first`. Tests `sparse_categorical_crossentropy`, `categorical_crossentropy`, and `binary_crossentropy`. Verifies that evaluate gives the same result with either `channels_first` or `channels_last` image_data_format. """ def prepare_simple_model(input_tensor, loss_name, target): axis = 1 if backend.image_data_format() == 'channels_first' else -1 loss = None num_channels = None activation = None if loss_name == 'sparse_categorical_crossentropy': loss = lambda y_true, y_pred: backend.sparse_categorical_crossentropy( # pylint: disable=g-long-lambda y_true, y_pred, axis=axis) num_channels = int(np.amax(target) + 1) activation = 'softmax' elif loss_name == 'categorical_crossentropy': loss = lambda y_true, y_pred: backend.categorical_crossentropy( # pylint: disable=g-long-lambda y_true, y_pred, axis=axis) num_channels = target.shape[axis] activation = 'softmax' elif loss_name == 'binary_crossentropy': loss = lambda y_true, y_pred: backend.binary_crossentropy( # pylint: disable=g-long-lambda, unnecessary-lambda y_true, y_pred) num_channels = target.shape[axis] activation = 'sigmoid' predictions = Conv2D(num_channels, 1, activation=activation, kernel_initializer='ones', bias_initializer='ones')(input_tensor) simple_model = training.Model(inputs=input_tensor, outputs=predictions) simple_model.compile(optimizer='rmsprop', loss=loss) return simple_model if tf.test.is_gpu_available(cuda_only=True): with testing_utils.use_gpu(): losses_to_test = ['sparse_categorical_crossentropy', 'categorical_crossentropy', 'binary_crossentropy'] data_channels_first = np.array([[[[8., 7.1, 0.], [4.5, 2.6, 0.55], [0.9, 4.2, 11.2]]]], dtype=np.float32) # Labels for testing 4-class sparse_categorical_crossentropy, 4-class # categorical_crossentropy, and 2-class binary_crossentropy: labels_channels_first = [np.array([[[[0, 1, 3], [2, 1, 0], [2, 2, 1]]]], dtype=np.float32), # pylint: disable=line-too-long np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 0]], [[1, 0, 0], [0, 0, 1], [0, 1, 0]], [[0, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 0, 0], [1, 0, 0]]]], dtype=np.float32), # pylint: disable=line-too-long np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 1], [1, 0, 1], [1, 1, 0]]]], dtype=np.float32)] # pylint: disable=line-too-long # Compute one loss for each loss function in the list `losses_to_test`: loss_channels_last = [0., 0., 0.] loss_channels_first = [0., 0., 0.] old_data_format = backend.image_data_format() # Evaluate a simple network with channels last, with all three loss # functions: backend.set_image_data_format('channels_last') data = np.moveaxis(data_channels_first, 1, -1) for index, loss_function in enumerate(losses_to_test): labels = np.moveaxis(labels_channels_first[index], 1, -1) inputs = input_layer.Input(shape=(3, 3, 1)) model = prepare_simple_model(inputs, loss_function, labels) loss_channels_last[index] = model.evaluate(x=data, y=labels, batch_size=1, verbose=0) # Evaluate the same network with channels first, with all three loss # functions: backend.set_image_data_format('channels_first') data = data_channels_first for index, loss_function in enumerate(losses_to_test): labels = labels_channels_first[index] inputs = input_layer.Input(shape=(1, 3, 3)) model = prepare_simple_model(inputs, loss_function, labels) loss_channels_first[index] = model.evaluate(x=data, y=labels, batch_size=1, verbose=0) backend.set_image_data_format(old_data_format) np.testing.assert_allclose( loss_channels_first, loss_channels_last, rtol=1e-06, err_msg='{}{}'.format('Computed different losses for ', 'channels_first and channels_last')) if __name__ == '__main__': tf.test.main()
5,980
46.468254
132
py
keras
keras-master/keras/engine/training_utils_v1_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training utility functions.""" import tensorflow.compat.v2 as tf import functools import multiprocessing.pool import time from absl.testing import parameterized import numpy as np from keras import backend from keras import keras_parameterized from keras import testing_utils from keras.engine import keras_tensor from keras.engine import training_utils_v1 from tensorflow.python.platform import tf_logging as logging class ModelInputsTest(tf.test.TestCase): def test_single_thing(self): a = np.ones(10) model_inputs = training_utils_v1.ModelInputs(a) self.assertEqual(['input_1'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tf.is_tensor(vals)) vals = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.assertEqual(1, len(vals)) self.assertTrue(tf.is_tensor(vals[0])) self.assertEqual(backend.floatx(), vals[0].dtype) def test_single_thing_eager(self): if not tf.executing_eagerly(): self.skipTest('Run in eager mode only.') a = np.ones(10, dtype=np.int32) model_inputs = training_utils_v1.ModelInputs(a) self.assertEqual(['input_1'], model_inputs.get_input_names()) val = model_inputs.get_symbolic_inputs() self.assertIsInstance(val, keras_tensor.KerasTensor) vals = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.assertEqual(1, len(vals)) self.assertIsInstance(vals[0], keras_tensor.KerasTensor) self.assertEqual(tf.int32, vals[0].dtype) def test_list(self): a = [np.ones(10), np.ones(20)] model_inputs = training_utils_v1.ModelInputs(a) self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tf.is_tensor(vals[0])) self.assertTrue(tf.is_tensor(vals[1])) def test_list_eager(self): if not tf.executing_eagerly(): self.skipTest('Run in eager mode only.') a = [np.ones(10), np.ones(20)] model_inputs = training_utils_v1.ModelInputs(a) self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertIsInstance(vals[0], keras_tensor.KerasTensor) self.assertIsInstance(vals[1], keras_tensor.KerasTensor) def test_dict(self): a = {'b': np.ones(10), 'a': np.ones(20)} model_inputs = training_utils_v1.ModelInputs(a) self.assertEqual(['a', 'b'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertTrue(tf.is_tensor(vals['a'])) self.assertTrue(tf.is_tensor(vals['b'])) def test_dict_eager(self): if not tf.executing_eagerly(): self.skipTest('Run in eager mode only.') a = {'b': np.ones(10), 'a': np.ones(20)} model_inputs = training_utils_v1.ModelInputs(a) self.assertEqual(['a', 'b'], model_inputs.get_input_names()) vals = model_inputs.get_symbolic_inputs() self.assertIsInstance(vals['a'], keras_tensor.KerasTensor) self.assertIsInstance(vals['b'], keras_tensor.KerasTensor) class DatasetUtilsTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( # pylint: disable=g-long-lambda ('Batch', lambda: tf.data.Dataset.range(5).batch(2)), ('Cache', lambda: tf.data.Dataset.range(5).cache()), ('Concatenate', lambda: tf.data.Dataset.range(5).concatenate( tf.data.Dataset.range(5))), ('FlatMap', lambda: tf.data.Dataset.range(5).flat_map( lambda _: tf.data.Dataset.from_tensors(0))), ('FlatMap_Shuffle', lambda: tf.data.Dataset.range(5).flat_map( lambda _: tf.data.Dataset.from_tensors(0).shuffle(1)), True), ('Filter', lambda: tf.data.Dataset.range(5).filter(lambda _: True)), ('FixedLengthRecordDatasetV2', lambda: tf.data.FixedLengthRecordDataset([], 42)), ('FromTensors', lambda: tf.data.Dataset.from_tensors(0)), ('FromTensorSlices', lambda: tf.data.Dataset.from_tensor_slices([0, 0, 0])), ('Interleave', lambda: tf.data.Dataset.range(5).interleave( lambda _: tf.data.Dataset.from_tensors(0), cycle_length=1)), ('Interleave_Shuffle', lambda: tf.data.Dataset.range(5).interleave( lambda _: tf.data.Dataset.from_tensors(0).shuffle(1), cycle_length=1), True), ('Map', lambda: tf.data.Dataset.range(5).map(lambda x: x)), ('Options', lambda: tf.data.Dataset.range(5).with_options(tf.data.Options()) ), ('PaddedBatch', lambda: tf.data.Dataset.range(5).padded_batch(2, [])), ('ParallelInterleave', lambda: tf.data.Dataset.range(5).interleave( lambda _: tf.data.Dataset.from_tensors(0), cycle_length=1, num_parallel_calls=1)), ('ParallelMap', lambda: tf.data.Dataset.range(5).map( lambda x: x, num_parallel_calls=1)), ('Prefetch', lambda: tf.data.Dataset.range(5).prefetch(1)), ('Range', lambda: tf.data.Dataset.range(0)), ('Repeat', lambda: tf.data.Dataset.range(0).repeat(0)), ('Shuffle', lambda: tf.data.Dataset.range(5).shuffle(1), True), ('Skip', lambda: tf.data.Dataset.range(5).skip(2)), ('Take', lambda: tf.data.Dataset.range(5).take(2)), ('TextLineDataset', lambda: tf.data.TextLineDataset([])), ('TFRecordDataset', lambda: tf.data.TFRecordDataset([])), ('Window', lambda: tf.data.Dataset.range(5).window(2)), ('Zip', lambda: tf.data.Dataset.zip(tf.data.Dataset.range(5))), # pylint: enable=g-long-lambda ) def test_verify_dataset_shuffled(self, dataset_fn, expect_shuffled=False): dataset = dataset_fn() if not expect_shuffled: with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: shuffled = training_utils_v1.verify_dataset_shuffled(dataset) self.assertRegex( str(mock_log.call_args), 'input dataset `x` is not shuffled.') self.assertFalse(shuffled) else: self.assertTrue(training_utils_v1.verify_dataset_shuffled(dataset)) class StandardizeWeightsTest(keras_parameterized.TestCase): def test_sample_weights(self): y = np.array([0, 1, 0, 0, 2]) sample_weights = np.array([0.5, 1., 1., 0., 2.]) weights = training_utils_v1.standardize_weights(y, sample_weights) self.assertAllClose(weights, sample_weights) def test_class_weights(self): y = np.array([0, 1, 0, 0, 2]) class_weights = {0: 0.5, 1: 1., 2: 1.5} weights = training_utils_v1.standardize_weights( y, class_weight=class_weights) self.assertAllClose(weights, np.array([0.5, 1., 0.5, 0.5, 1.5])) def test_sample_weights_and_class_weights(self): y = np.array([0, 1, 0, 0, 2]) sample_weights = np.array([0.5, 1., 1., 0., 2.]) class_weights = {0: 0.5, 1: 1., 2: 1.5} weights = training_utils_v1.standardize_weights(y, sample_weights, class_weights) expected = sample_weights * np.array([0.5, 1., 0.5, 0.5, 1.5]) self.assertAllClose(weights, expected) def test_dataset_with_class_weight(self): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) model.compile('rmsprop', 'mse') inputs = np.zeros((10, 3), np.float32) targets = np.zeros((10, 4), np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) class_weight_np = np.array([0.25, 0.25, 0.25, 0.25]) class_weight = dict(enumerate(class_weight_np)) model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=1, class_weight=class_weight) class MonitoredPool(multiprocessing.pool.ThreadPool): def __init__(self, *args, **kwargs): self._apply_counter = 0 self._func_wrapper = None super(MonitoredPool, self).__init__(*args, **kwargs) def apply_async(self, func, *args, **kwargs): self._apply_counter += 1 if self._func_wrapper: func = self._func_wrapper(func) # pylint: disable=not-callable return super(MonitoredPool, self).apply_async(func, *args, **kwargs) def add_sleep(f): @functools.wraps(f) def wrapped(*args, **kwargs): time.sleep(1.) return f(*args, **kwargs) return wrapped def cause_error(f): @functools.wraps(f) def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument # Induce a TypeError during assignment. return f(None, None, None, is_finished) return wrapped _TEST_DATA = np.array(( (3, 1, 3, 1, 2, 0, 3, 3, 1, 2), (0, 1, 2, 1, 3, 0, 0, 1, 3, 0), (3, 2, 1, 1, 1, 1, 1, 3, 2, 3), (2, 2, 0, 1, 0, 3, 3, 2, 1, 1), (3, 0, 3, 3, 3, 2, 1, 0, 0, 1), (1, 0, 3, 3, 3, 2, 1, 2, 3, 1),)) class AggregationTest(keras_parameterized.TestCase): def setUp(self): super(AggregationTest, self).setUp() self._old_pool = training_utils_v1._COPY_POOL self._old_threshold = ( training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD) self._old_timeout = training_utils_v1.SliceAggregator._MAX_COPY_SECONDS training_utils_v1._COPY_POOL = MonitoredPool( training_utils_v1._COPY_THREADS) def tearDown(self): super(AggregationTest, self).tearDown() training_utils_v1._COPY_POOL = self._old_pool training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = ( self._old_threshold) training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = self._old_timeout def _run_with_steps(self): aggregator = training_utils_v1.OutputsAggregator(use_steps=True) for i, batch in enumerate(np.array_split(_TEST_DATA, 4)): if i == 0: aggregator.create(batch) aggregator.aggregate(batch) assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils_v1.ConcatAggregator) aggregator.finalize() return aggregator.results def _run_without_steps(self): aggregator = training_utils_v1.OutputsAggregator( use_steps=False, num_samples=6) batch_start = 0 for i, batch in enumerate(np.array_split(_TEST_DATA, 4)): if i == 0: aggregator.create(batch) batch_end = batch_start + batch.shape[0] aggregator.aggregate(batch, batch_start, batch_end) batch_start = batch_end assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils_v1.SliceAggregator) aggregator.finalize() return aggregator.results def test_with_steps(self): self.assertAllEqual(self._run_with_steps(), _TEST_DATA) def test_without_steps(self): self.assertAllEqual(self._run_without_steps(), _TEST_DATA) def test_nested_aggregation(self): aggregator = training_utils_v1.OutputsAggregator( use_steps=False, num_samples=6) batches = np.array_split(_TEST_DATA, 4) batch_start = 0 for i, batch in enumerate(zip(batches, batches)): if i == 0: aggregator.create(batch) batch_end = batch_start + batch[0].shape[0] aggregator.aggregate(batch, batch_start, batch_end) batch_start = batch_end assert len(aggregator.results) == 2 aggregator.finalize() self.assertAllEqual(aggregator.results, (_TEST_DATA, _TEST_DATA)) def test_concat_single_batch(self): aggregator = training_utils_v1.OutputsAggregator(use_steps=True) data = _TEST_DATA.copy() aggregator.create(data) assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils_v1.ConcatAggregator) aggregator.aggregate(data) aggregator.finalize() assert aggregator.results is data # No copy. def test_slice_single_batch(self): aggregator = training_utils_v1.OutputsAggregator( use_steps=False, num_samples=6) data = _TEST_DATA.copy() aggregator.create(data) assert len(aggregator.results) == 1 assert isinstance(aggregator.results[0], training_utils_v1.SliceAggregator) aggregator.aggregate(data, 0, 6) aggregator.finalize() assert aggregator.results is data # No copy. def test_async_copy(self): training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15 self.assertAllEqual(self._run_without_steps(), _TEST_DATA) # Two of the four batches will have 20 elements and two will have 10. self.assertEqual(training_utils_v1._COPY_POOL._apply_counter, 2) def test_async_copy_timeout(self): training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15 training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = 0.1 training_utils_v1._COPY_POOL._func_wrapper = add_sleep with self.assertRaisesRegex(ValueError, 'Timed out waiting for copy'): self._run_without_steps() def test_async_copy_reraise(self): training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15 training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = 1. training_utils_v1._COPY_POOL._func_wrapper = cause_error with self.assertRaisesRegex(TypeError, 'NoneType'): self._run_without_steps() class CompositeTensorTestUtils(keras_parameterized.TestCase): def test_is_composite(self): # Validate that all composite tensor and value types return true. self.assertTrue( training_utils_v1.is_composite_or_composite_value( tf.SparseTensor([[0, 0]], [1], [1, 1]))) self.assertTrue( training_utils_v1.is_composite_or_composite_value( tf.compat.v1.SparseTensorValue([[0, 0]], [1], [1, 1]))) self.assertTrue( training_utils_v1.is_composite_or_composite_value( tf.RaggedTensor.from_row_splits( np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)))) self.assertTrue( training_utils_v1.is_composite_or_composite_value( tf.compat.v1.ragged.RaggedTensorValue( np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)))) # Test that numpy arrays and tensors return false. self.assertFalse( training_utils_v1.is_composite_or_composite_value(np.ndarray([0, 1]))) self.assertFalse( training_utils_v1.is_composite_or_composite_value( tf.convert_to_tensor([3, 1]))) def test_sparse_concatenation(self): tensor_1 = tf.SparseTensor([[0, 0]], [1], [1, 1]) tensor_2 = tf.SparseTensor([[0, 0]], [2], [1, 1]) concatenated_tensor = training_utils_v1._append_composite_tensor( tensor_1, tensor_2) evaluated_tensor = self.evaluate(concatenated_tensor) self.assertAllEqual(evaluated_tensor.indices, [[0, 0], [1, 0]]) self.assertAllEqual(evaluated_tensor.values, [1, 2]) self.assertAllEqual(evaluated_tensor.dense_shape, [2, 1]) def test_sparse_value_concatenation(self): tensor_1 = tf.compat.v1.SparseTensorValue([[0, 0]], [1], [1, 1]) tensor_2 = tf.compat.v1.SparseTensorValue([[0, 0]], [2], [1, 1]) concatenated_tensor = training_utils_v1._append_composite_tensor( tensor_1, tensor_2) self.assertAllEqual(concatenated_tensor.indices, [[0, 0], [1, 0]]) self.assertAllEqual(concatenated_tensor.values, [1, 2]) self.assertAllEqual(concatenated_tensor.dense_shape, [2, 1]) def test_ragged_concatenation(self): tensor_1 = tf.RaggedTensor.from_row_splits( np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)) tensor_2 = tf.RaggedTensor.from_row_splits( np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64)) concatenated_tensor = training_utils_v1._append_composite_tensor( tensor_1, tensor_2) evaluated_tensor = self.evaluate(concatenated_tensor) self.assertAllEqual(evaluated_tensor.values, [0, 1, 2, 3, 4, 5]) self.assertAllEqual(evaluated_tensor.row_splits, [0, 1, 3, 5, 6]) def test_ragged_value_concatenation(self): tensor_1 = tf.compat.v1.ragged.RaggedTensorValue( np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)) tensor_2 = tf.compat.v1.ragged.RaggedTensorValue( np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64)) concatenated_tensor = training_utils_v1._append_composite_tensor( tensor_1, tensor_2) self.assertAllEqual(concatenated_tensor.values, [0, 1, 2, 3, 4, 5]) self.assertAllEqual(concatenated_tensor.row_splits, [0, 1, 3, 5, 6]) if __name__ == '__main__': tf.test.main()
16,910
38.511682
101
py
keras
keras-master/keras/engine/input_layer.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Input layer code (`Input` and `InputLayer`).""" import tensorflow.compat.v2 as tf from keras import backend from keras.distribute import distributed_training_utils from keras.engine import base_layer from keras.engine import keras_tensor from keras.engine import node as node_module from keras.saving.saved_model import layer_serialization from keras.utils import tf_utils from keras.utils import traceback_utils from tensorflow.python.util.tf_export import keras_export def _assert_other_arg_none(arg_name, arg): if arg is not None: raise ValueError('When `type_spec` is not None, all other args ' 'except `name` must be None, ' 'but %s is not None.' % arg_name) @keras_export('keras.layers.InputLayer') class InputLayer(base_layer.Layer): """Layer to be used as an entry point into a Network (a graph of layers). It can either wrap an existing tensor (pass an `input_tensor` argument) or create a placeholder tensor (pass arguments `input_shape`, and optionally, `dtype`). It is generally recommend to use the Keras Functional model via `Input`, (which creates an `InputLayer`) without directly using `InputLayer`. When using `InputLayer` with the Keras Sequential model, it can be skipped by moving the `input_shape` parameter to the first layer after the `InputLayer`. This class can create placeholders for `tf.Tensors`, `tf.SparseTensors`, and `tf.RaggedTensors` by choosing `sparse=True` or `ragged=True`. Note that `sparse` and `ragged` can't be configured to `True` at the same time. Usage: ```python # With explicit InputLayer. model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=(4,)), tf.keras.layers.Dense(8)]) model.compile(tf.optimizers.RMSprop(0.001), loss='mse') model.fit(np.zeros((10, 4)), np.ones((10, 8))) # Without InputLayer and let the first layer to have the input_shape. # Keras will add a input for the model behind the scene. model = tf.keras.Sequential([ tf.keras.layers.Dense(8, input_shape=(4,))]) model.compile(tf.optimizers.RMSprop(0.001), loss='mse') model.fit(np.zeros((10, 4)), np.ones((10, 8))) ``` Args: input_shape: Shape tuple (not including the batch axis), or `TensorShape` instance (not including the batch axis). batch_size: Optional input batch size (integer or `None`). dtype: Optional datatype of the input. When not provided, the Keras default `float` type will be used. input_tensor: Optional tensor to use as layer input. If set, the layer will use the `tf.TypeSpec` of this tensor rather than creating a new placeholder tensor. sparse: Boolean, whether the placeholder created is meant to be sparse. Default to `False`. ragged: Boolean, whether the placeholder created is meant to be ragged. In this case, values of `None` in the `shape` argument represent ragged dimensions. For more information about `tf.RaggedTensor`, see [this guide](https://www.tensorflow.org/guide/ragged_tensor). Default to `False`. type_spec: A `tf.TypeSpec` object to create Input from. This `tf.TypeSpec` represents the entire batch. When provided, all other args except name must be `None`. name: Optional name of the layer (string). """ @traceback_utils.filter_traceback def __init__(self, input_shape=None, batch_size=None, dtype=None, input_tensor=None, sparse=None, name=None, ragged=None, type_spec=None, **kwargs): self._init_input_shape = input_shape self._init_batch_size = batch_size self._init_dtype = dtype self._init_sparse = sparse self._init_ragged = ragged self._init_type_spec = type_spec strategy = tf.distribute.get_strategy() if strategy and batch_size is not None and \ distributed_training_utils.global_batch_size_supported(strategy): if batch_size % strategy.num_replicas_in_sync != 0: raise ValueError('The `batch_size` argument ({}) must be divisible by ' 'the number of replicas ({})'.format( batch_size, strategy.num_replicas_in_sync)) batch_size = batch_size // strategy.num_replicas_in_sync if 'batch_input_shape' in kwargs: batch_input_shape = kwargs.pop('batch_input_shape') if input_shape and batch_input_shape: raise ValueError('Only provide the input_shape OR ' 'batch_input_shape argument to ' 'InputLayer, not both at the same time.') # Set the input shape and batch size from the batch_input_shape. # Note that batch_input_shape can be None (unknown rank) or [] (scalar), # in which case the batch size must be None. if batch_input_shape: batch_size = batch_input_shape[0] input_shape = batch_input_shape[1:] if kwargs: raise ValueError('Unrecognized keyword arguments:', kwargs.keys()) if sparse and ragged: raise ValueError( 'Cannot set both sparse and ragged to True in a Keras input.') if not name: prefix = 'input' name = prefix + '_' + str(backend.get_uid(prefix)) if not dtype: if input_tensor is None: dtype = backend.floatx() else: dtype = backend.dtype(input_tensor) elif input_tensor is not None and input_tensor.dtype != dtype: raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' % (input_tensor.dtype, dtype)) super(InputLayer, self).__init__(dtype=dtype, name=name) self.built = True self.sparse = True if sparse else False self.ragged = True if ragged else False self.batch_size = batch_size self.supports_masking = True if isinstance(input_shape, tf.TensorShape): input_shape = tuple(input_shape.as_list()) elif isinstance(input_shape, int): input_shape = (input_shape,) if type_spec is not None: args_that_must_be_none = [ ('(input_)shape', self._init_input_shape), ('batch_size', self._init_batch_size), ('dtype', self._init_dtype), ('input_tensor', input_tensor), ('sparse', self._init_sparse), ('ragged', self._init_ragged), ] for arg_name, arg in args_that_must_be_none: _assert_other_arg_none(arg_name, arg) if not tf.compat.v1.executing_eagerly_outside_functions(): raise ValueError('Creating Keras inputs from a type_spec is only ' 'supported when eager execution is enabled.') input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec) if isinstance(input_tensor, keras_tensor.SparseKerasTensor): self.sparse = True if isinstance(input_tensor, keras_tensor.RaggedKerasTensor): self.ragged = True self.is_placeholder = True try: self._batch_input_shape = tuple(input_tensor.shape.as_list()) except ValueError: # If the shape cannot be represented as a tuple (e.g. unknown rank) self._batch_input_shape = None elif input_tensor is None: if input_shape is not None: batch_input_shape = (batch_size,) + tuple(input_shape) else: batch_input_shape = None graph = backend.get_graph() with graph.as_default(): input_tensor = backend.placeholder( shape=batch_input_shape, dtype=dtype, name=self.name, sparse=sparse, ragged=ragged) self.is_placeholder = True self._batch_input_shape = batch_input_shape else: if tf.compat.v1.executing_eagerly_outside_functions(): if not isinstance(input_tensor, keras_tensor.KerasTensor): input_tensor = keras_tensor.keras_tensor_from_tensor(input_tensor) else: if not tf_utils.is_symbolic_tensor(input_tensor): raise ValueError('You should not pass an EagerTensor to `Input`. ' 'For example, instead of creating an ' 'InputLayer, you should instantiate your model and ' 'directly call it on your input.') self.is_placeholder = False try: self._batch_input_shape = tuple(input_tensor.shape.as_list()) except ValueError: # If the shape cannot be represented as a tuple (e.g. unknown rank) self._batch_input_shape = None # Create an input node. input_tensor._keras_mask = None node_module.Node(layer=self, outputs=input_tensor) # Store type spec if isinstance(input_tensor, keras_tensor.KerasTensor) or ( tf_utils.is_extension_type(input_tensor)): self._type_spec = input_tensor._type_spec # pylint: disable=protected-access else: self._type_spec = tf.TensorSpec( shape=input_tensor.shape, dtype=input_tensor.dtype, name=self.name) def get_config(self): if self._init_type_spec is not None: config = { 'name': self.name, 'type_spec': self._init_type_spec } else: config = { 'batch_input_shape': self._batch_input_shape, 'dtype': self.dtype, 'sparse': self.sparse, 'ragged': self.ragged, 'name': self.name, } return config @property def _trackable_saved_model_saver(self): return layer_serialization.InputLayerSavedModelSaver(self) @keras_export('keras.Input', 'keras.layers.Input') @traceback_utils.filter_traceback def Input( # pylint: disable=invalid-name shape=None, batch_size=None, name=None, dtype=None, sparse=None, tensor=None, ragged=None, type_spec=None, **kwargs): """`Input()` is used to instantiate a Keras tensor. A Keras tensor is a symbolic tensor-like object, which we augment with certain attributes that allow us to build a Keras model just by knowing the inputs and outputs of the model. For instance, if `a`, `b` and `c` are Keras tensors, it becomes possible to do: `model = Model(input=[a, b], output=c)` Args: shape: A shape tuple (integers), not including the batch size. For instance, `shape=(32,)` indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. batch_size: optional static batch size (integer). name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. dtype: The data type expected by the input, as a string (`float32`, `float64`, `int32`...) sparse: A boolean specifying whether the placeholder to be created is sparse. Only one of 'ragged' and 'sparse' can be True. Note that, if `sparse` is False, sparse tensors can still be passed into the input - they will be densified with a default value of 0. tensor: Optional existing tensor to wrap into the `Input` layer. If set, the layer will use the `tf.TypeSpec` of this tensor rather than creating a new placeholder tensor. ragged: A boolean specifying whether the placeholder to be created is ragged. Only one of 'ragged' and 'sparse' can be True. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see [this guide](https://www.tensorflow.org/guide/ragged_tensors). type_spec: A `tf.TypeSpec` object to create the input placeholder from. When provided, all other args except name must be None. **kwargs: deprecated arguments support. Supports `batch_shape` and `batch_input_shape`. Returns: A `tensor`. Example: ```python # this is a logistic regression in Keras x = Input(shape=(32,)) y = Dense(16, activation='softmax')(x) model = Model(x, y) ``` Note that even if eager execution is enabled, `Input` produces a symbolic tensor-like object (i.e. a placeholder). This symbolic tensor-like object can be used with lower-level TensorFlow ops that take tensors as inputs, as such: ```python x = Input(shape=(32,)) y = tf.square(x) # This op will be treated like a layer model = Model(x, y) ``` (This behavior does not work for higher-order TensorFlow APIs such as control flow and being directly watched by a `tf.GradientTape`). However, the resulting model will not track any variables that were used as inputs to TensorFlow ops. All variable usages must happen within Keras layers to make sure they will be tracked by the model's weights. The Keras Input can also create a placeholder from an arbitrary `tf.TypeSpec`, e.g: ```python x = Input(type_spec=tf.RaggedTensorSpec(shape=[None, None], dtype=tf.float32, ragged_rank=1)) y = x.values model = Model(x, y) ``` When passing an arbitrary `tf.TypeSpec`, it must represent the signature of an entire batch instead of just one example. Raises: ValueError: If both `sparse` and `ragged` are provided. ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are provided. ValueError: If `shape`, `tensor` and `type_spec` are None. ValueError: If arguments besides `type_spec` are non-None while `type_spec` is passed. ValueError: if any unrecognized parameters are provided. """ if sparse and ragged: raise ValueError( 'Cannot set both sparse and ragged to True in a Keras input.') input_layer_config = {'name': name, 'dtype': dtype, 'sparse': sparse, 'ragged': ragged, 'input_tensor': tensor, 'type_spec': type_spec} batch_input_shape = kwargs.pop('batch_input_shape', kwargs.pop('batch_shape', None)) if shape is not None and batch_input_shape is not None: raise ValueError('Only provide the `shape` OR `batch_input_shape` argument ' 'to Input, not both at the same time.') if (batch_input_shape is None and shape is None and tensor is None and type_spec is None): raise ValueError('Please provide to Input a `shape`' ' or a `tensor` or a `type_spec` argument. Note that ' '`shape` does not include the batch ' 'dimension.') if kwargs: raise ValueError('Unrecognized keyword arguments:', kwargs.keys()) if batch_input_shape: shape = batch_input_shape[1:] input_layer_config.update({'batch_input_shape': batch_input_shape}) else: input_layer_config.update( {'batch_size': batch_size, 'input_shape': shape}) input_layer = InputLayer(**input_layer_config) # Return tensor including `_keras_history`. # Note that in this case train_output and test_output are the same pointer. outputs = input_layer._inbound_nodes[0].outputs if isinstance(outputs, list) and len(outputs) == 1: return outputs[0] else: return outputs
16,070
39.686076
83
py
keras
keras-master/keras/engine/training_eager_v1.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras training and evaluation routines for eager execution.""" import tensorflow.compat.v2 as tf # pylint: disable=protected-access import numpy as np from tensorflow.python.eager.backprop import GradientTape from keras import backend from keras.engine import training_utils from keras.engine import training_utils_v1 from keras.mixed_precision import loss_scale_optimizer from keras.utils import losses_utils from tensorflow.python.platform import tf_logging as logging def _eager_loss_fn(outputs, targets, loss_fn, output_name): with backend.name_scope(output_name + '_loss'): loss = loss_fn(targets, outputs) return loss def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None): """Calculates the metrics for each output of the given model. Args: model: The model on which metrics are being calculated. outputs: The outputs of the given model. targets: The predictions or targets of the given model. sample_weights: Optional list of sample weights for each output. masks: Optional list of masks for each output. Returns: Returns the metric results for each output of the model. """ outputs = tf.nest.flatten(outputs) targets = tf.nest.flatten(targets) # Invoke all(weighted and unweighted) metrics. metric_results = [] if targets: # Insert None values corresponding to the targets that need to be skipped # on the model. if len(model._targets) != len(targets): new_targets = [ None if t is None else targets.pop(0) for t in model._targets ] targets = new_targets metric_results = model._handle_metrics( outputs, targets=targets, sample_weights=sample_weights, masks=masks, return_weighted_and_unweighted_metrics=True, skip_target_masks=model._prepare_skip_target_masks()) # Add metric results from the `add_metric` metrics. metric_results.extend([ m.result() for m in model.metrics if m not in model._compile_metric_functions ]) return metric_results def _model_loss(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False): """Calculates the loss for a given model. Args: model: The model on which metrics are being calculated. inputs: Either a dictionary of inputs to the model or a list of input arrays. targets: List of target arrays. output_loss_metrics: List of metrics that are used to aggregated output loss values. sample_weights: Optional list of sample weight arrays. training: Whether the model should be run in inference or training mode. Returns: Returns the model output, total loss, loss value calculated using the specified loss function and masks for each output. The total loss includes regularization losses and applies masking and sample weighting to the loss value. """ # TODO(psv): Dedup code here with graph mode prepare_total_loss() fn. # Used to keep track of the total loss value (stateless). # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) + # loss_weight_2 * output_2_loss_fn(...) + # layer losses. total_loss = 0 kwargs = {} if model._expects_training_arg: kwargs['training'] = training if len(inputs) == 1 and not isinstance(inputs, dict): inputs = inputs[0] # Allow mixed `NumPy` and `EagerTensor` input here. if any( isinstance(input_t, (np.ndarray, float, int)) for input_t in tf.nest.flatten(inputs)): inputs = tf.nest.map_structure(tf.convert_to_tensor, inputs) outs = model(inputs, **kwargs) outs = tf.nest.flatten(outs) if targets: targets = training_utils_v1.cast_if_floating_dtype_and_mismatch( targets, outs) # TODO(sallymatson/psv): check if we should do same mismatch fix for weights if sample_weights: sample_weights = [ training_utils_v1.cast_if_floating_dtype( tf.convert_to_tensor(val)) if val is not None else None for val in sample_weights ] masks = [getattr(t, '_keras_mask', None) for t in outs] targets = tf.nest.flatten(targets) # Used to keep track of individual output losses. output_losses = [] with backend.name_scope('loss'): loss_fns = [ loss_fn for loss_fn in model.loss_functions if loss_fn is not None ] custom_losses = model.losses # Regularization losses if not loss_fns and not custom_losses: if training: raise ValueError('The model cannot be trained ' 'because it has no loss to optimize.') else: raise ValueError('The model cannot be evaluated ' 'because it has no loss to compute.') for i, loss_fn in enumerate(loss_fns): weights = sample_weights[i] if sample_weights else None mask = masks[i] with backend.name_scope(model.output_names[i] + '_loss'): if mask is not None: mask = tf.cast(mask, outs[i].dtype) # Update weights with mask. if weights is None: weights = mask else: # Update dimensions of weights to match with mask if possible. weights = tf.cast(weights, outs[i].dtype) mask, _, weights = ( losses_utils.squeeze_or_expand_dimensions( mask, sample_weight=weights)) weights *= mask if hasattr(loss_fn, 'reduction'): per_sample_losses = loss_fn.call(targets[i], outs[i]) weighted_losses = losses_utils.compute_weighted_loss( per_sample_losses, sample_weight=weights, reduction=losses_utils.ReductionV2.NONE) loss_reduction = loss_fn.reduction # `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all # compile use cases. if loss_reduction == losses_utils.ReductionV2.AUTO: loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE # Compute the stateless loss value. output_loss = losses_utils.reduce_weighted_loss( weighted_losses, reduction=loss_reduction) else: # Compute the stateless loss value for a custom loss class. # Here we assume that the class takes care of loss reduction # because if this class returns a vector value we cannot # differentiate between use case where a custom optimizer # expects a vector loss value vs unreduced per-sample loss value. output_loss = loss_fn(targets[i], outs[i], sample_weight=weights) loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE # If the number of outputs is 1 then we don't append the loss metric # associated with each model output. When there are multiple outputs # associated with a model, each output's loss is calculated and returned # as part of the loss_metrics. if len(model.outputs) > 1: # Keep track of the stateful output loss result. output_losses.append(output_loss_metrics[i](output_loss)) # Scale output loss for distribution. For custom losses we assume # reduction was mean. if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: output_loss = losses_utils.scale_loss_for_distribution(output_loss) total_loss += model._loss_weights_list[i] * output_loss # Add regularization losses if custom_losses: total_loss += losses_utils.scale_loss_for_distribution( tf.add_n(custom_losses)) return outs, total_loss, output_losses, masks def _process_single_batch(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False): """Calculate the loss and gradient for one input batch. The model weights are updated if training is set to True. Args: model: Model whose loss has to be calculated. inputs: List of input arrays. targets: List of target arrays. output_loss_metrics: List of metrics that are used to aggregated output loss values. sample_weights: Optional list of sample weight arrays. training: The boolean represents if the weights of the model are updated. 'fit' methods will set this to True while 'evaluate' methods will set this to False. Returns: output of the model, total loss, the loss and the mask associated with each output. Raises: ValueError: If the model has no loss to optimize. """ with backend.eager_learning_phase_scope(1 if training else 0), \ training_utils.RespectCompiledTrainableState(model): with GradientTape() as tape: outs, total_loss, output_losses, masks = ( _model_loss( model, inputs, targets, output_loss_metrics=output_loss_metrics, sample_weights=sample_weights, training=training)) if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer): scaled_total_loss = model.optimizer.get_scaled_loss(total_loss) else: scaled_total_loss = total_loss if training: trainable_weights = model.trainable_weights if trainable_weights: # TODO(tanzheny) b/132690565: Provide mechanism for user to override # model.train_on_batch. if hasattr(model, '_backwards'): model._backwards(tape, scaled_total_loss) else: grads = tape.gradient(scaled_total_loss, trainable_weights) if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer): grads = model.optimizer.get_unscaled_gradients(grads) model.optimizer.apply_gradients(zip(grads, trainable_weights)) else: logging.warning('The list of trainable weights is empty. Make sure that' ' you are not setting model.trainable to False before ' 'compiling the model.') return outs, total_loss, output_losses, masks def train_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None): """Calculates the loss and gradient updates for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': list with a single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified. """ inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model) outs, total_loss, output_losses, masks = ( _process_single_batch( model, inputs, targets, sample_weights=sample_weights, training=True, output_loss_metrics=output_loss_metrics)) if not isinstance(outs, list): outs = [outs] metrics_results = _eager_metrics_fn( model, outs, targets, sample_weights=sample_weights, masks=masks) total_loss = tf.nest.flatten(total_loss) return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results} def test_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None): """Calculates the loss for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified. """ inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model) with backend.eager_learning_phase_scope(0): outs, total_loss, output_losses, masks = ( _model_loss( model, inputs, targets, sample_weights=sample_weights, training=False, output_loss_metrics=output_loss_metrics)) if not isinstance(outs, list): outs = [outs] metrics_results = _eager_metrics_fn( model, outs, targets, sample_weights=sample_weights, masks=masks) total_loss = tf.nest.flatten(total_loss) return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}
13,934
37.178082
80
py
keras
keras-master/keras/engine/node.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access # pylint: disable=g-classes-have-attributes """Contains the `Node` class.""" import tensorflow.compat.v2 as tf import collections import copy import json import numpy as np from keras import backend from keras.engine import base_layer_utils from keras.saving.saved_model import json_utils from keras.utils import tf_utils _CONSTANT_VALUE = '_CONSTANT_VALUE' class Node: """A `Node` describes a layer `__call__()` event. A Functional model is a DAG with `Node` instances as nodes, and `KerasTensor` instances as edges. Nodes aren't `Layer` instances, because a single layer could be called multiple times, which would result in graph cycles. A `__call__()` event involves input tensors (and other input arguments), the layer that was called, and the resulting output tensors. A `Node` will include all this information. Since a single `Layer` could be called multiple times, the `Node` instances are stored on layers as a list. Each time a layer is called a node is added to `layer._inbound_nodes`. Each time the output of a layer is used by another layer, a node is added to `layer._outbound_nodes`. Every `KerasTensor` instance has a `KerasHistory` object attached, which tracks the `Node` that records the `__call__()` event that created the tensor. By recursively walking through `Node` instances via the `KerasHistory` metadata of `KerasTensor` instances, once can retrieve the entire DAG of a Functional model. Args: layer: The layer that was called in the `Layer.__call__()` event that this node represents. call_args: The positional arguments the layer was called with. call_kwargs: The keyword arguments the layer was called with. outputs: The output tensors of the `Layer.__call__()` """ def __init__(self, layer, call_args=None, call_kwargs=None, outputs=None): call_args = [] if call_args is None else call_args call_kwargs = {} if call_kwargs is None else call_kwargs outputs = [] if outputs is None else outputs self.layer = layer self.is_input = not call_args and not call_kwargs # These arguments are user-provided. Copy the structures here so that # future user modifications do not affect the node's metadata. # We copy using map_structure rather than python's shallow or deep copy, # because the args can be data structures (so shallow copy is # insufficient), but individual values might not support copy.copy # or be too expensive to deep copy. call_args = tf.nest.map_structure(lambda t: t, call_args) call_kwargs = tf.nest.map_structure(lambda t: t, call_kwargs) self.outputs = tf.nest.map_structure(lambda t: t, outputs) self.call_args = call_args self.call_kwargs = call_kwargs # Cached for performance. self._flat_arguments = tf.nest.flatten((self.call_args, self.call_kwargs)) # Used to avoid expensive `nest` operations in the most common case. self._single_positional_tensor_passed = (not self.call_kwargs and len( self.call_args) == 1 and tf.is_tensor(self.call_args[0])) if not tf.compat.v1.executing_eagerly_outside_functions(): # Create TensorFlowOpLayers if needed (in TF1) for obj in self._flat_arguments: if (isinstance(obj, tf.Tensor) and base_layer_utils.needs_keras_history( obj, ignore_call_context=True)): base_layer_utils.create_keras_history(obj) self._keras_inputs = [] self._keras_inputs_ids_and_indices = [] for i, ele in enumerate(self._flat_arguments): if is_keras_tensor(ele): self._keras_inputs.append(ele) kt_id = str(id(ele)) kt_index = i self._keras_inputs_ids_and_indices.append((kt_id, kt_index)) # Wire up Node to Layers. self.layer._inbound_nodes.append(self) for kt in self.keras_inputs: inbound_layer = kt._keras_history.layer if inbound_layer is not None: # `None` for `Input` tensors. inbound_layer._outbound_nodes.append(self) # Set metadata on outputs. node_index = len(self.layer._inbound_nodes) - 1 for i, tensor in enumerate(tf.nest.flatten(outputs)): tensor._keras_history = KerasHistory( layer=layer, node_index=node_index, tensor_index=i) # Cached for performance. self.flat_input_ids = [str(id(t)) for t in self._keras_inputs] self.flat_output_ids = [str(id(t)) for t in tf.nest.flatten(self.outputs)] @property def keras_inputs(self): """Tensors input to this node that can be traced back to a `keras.Input`.""" return self._keras_inputs @property def parent_nodes(self): """Returns all the `Node`s whose output this node immediately depends on.""" node_deps = [] for kt in self.keras_inputs: layer = kt._keras_history.layer node_index = kt._keras_history.node_index if layer is not None: # `None` for `Input` tensors. node_deps.append(layer._inbound_nodes[node_index]) return node_deps def iterate_inbound(self): """Yields tuples representing the data inbound from other nodes. Yields: tuples like: (inbound_layer, node_index, tensor_index, tensor). """ for kt in self.keras_inputs: keras_history = kt._keras_history layer = keras_history.layer node_index = keras_history.node_index tensor_index = keras_history.tensor_index yield layer, node_index, tensor_index, kt def map_arguments(self, tensor_dict): """Maps Keras Tensors to computed Tensors using `tensor_dict`.""" if self._single_positional_tensor_passed: # Performance optimization for most common case. kt_id, _ = self._keras_inputs_ids_and_indices[0] return (tensor_dict[kt_id].pop(),), {} else: flat_arguments = copy.copy(self._flat_arguments) for kt_id, kt_index in self._keras_inputs_ids_and_indices: flat_arguments[kt_index] = tensor_dict[kt_id].pop() args, kwargs = tf.nest.pack_sequence_as((self.call_args, self.call_kwargs), flat_arguments) return args, kwargs def serialize(self, make_node_key, node_conversion_map): """Serializes `Node` for Functional API's `get_config`.""" # Serialization still special-cases first argument. args, kwargs = self.call_args, self.call_kwargs inputs, args, kwargs = self.layer._split_out_first_arg(args, kwargs) # Treat everything other than first argument as a kwarg. arguments = dict(zip(self.layer._call_fn_args[1:], args)) arguments.update(kwargs) kwargs = arguments def _serialize_keras_tensor(t): """Serializes a single Tensor passed to `call`.""" if hasattr(t, '_keras_history'): kh = t._keras_history node_index = kh.node_index node_key = make_node_key(kh.layer.name, node_index) new_node_index = node_conversion_map.get(node_key, 0) return [kh.layer.name, new_node_index, kh.tensor_index] if isinstance(t, np.ndarray): return t.tolist() if isinstance(t, tf.Tensor): return backend.get_value(t).tolist() return t kwargs = tf.nest.map_structure(_serialize_keras_tensor, kwargs) try: json.dumps(kwargs, default=json_utils.get_json_type) except TypeError: kwarg_types = tf.nest.map_structure(type, kwargs) raise TypeError('Layer ' + self.layer.name + ' was passed non-JSON-serializable arguments. ' + 'Arguments had types: ' + str(kwarg_types) + '. They cannot be serialized out ' 'when saving the model.') # `kwargs` is added to each Tensor in the first arg. This should be # changed in a future version of the serialization format. def serialize_first_arg_tensor(t): if is_keras_tensor(t): kh = t._keras_history node_index = kh.node_index node_key = make_node_key(kh.layer.name, node_index) new_node_index = node_conversion_map.get(node_key, 0) data = [kh.layer.name, new_node_index, kh.tensor_index, kwargs] else: # If an element in the first call argument did not originate as a # keras tensor and is a constant value, we save it using the format # ['_CONSTANT_VALUE', -1, serializaed_tensor_or_python_constant] # (potentially including serialized kwargs in an optional 4th argument data = [_CONSTANT_VALUE, -1, _serialize_keras_tensor(t), kwargs] return tf_utils.ListWrapper(data) data = tf.nest.map_structure(serialize_first_arg_tensor, inputs) if (not tf.nest.is_nested(data) and not self.layer._preserve_input_structure_in_config): data = [data] data = tf_utils.convert_inner_node_data(data) return data ############################################################# # Properties for Backwards compatibility. # These only check the first input argument # As nodes are internal, they may be removed in the future. ############################################################# @property def input_tensors(self): if self.is_input: return [self.outputs] # Used in `Layer.input`. return self.call_args[0] @property def output_tensors(self): if self.is_input: return [self.outputs] # Used in `Layer.input`. return self.outputs @property def input_shapes(self): input_shapes = tf.nest.map_structure(backend.int_shape, self.input_tensors) if len(input_shapes) == 1 and not self.is_input: return input_shapes[0] return input_shapes @property def output_shapes(self): return tf.nest.map_structure(backend.int_shape, self.output_tensors) @property def outbound_layer(self): return self.layer @property def inbound_layers(self): if self.is_input: return [] inbound_layers = tf.nest.map_structure(lambda t: t._keras_history.layer, self.call_args[0]) return inbound_layers class KerasHistory( collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])): """Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an `InputLayer`. This allows Keras to track how each Tensor was produced, and this information is later retraced by the `keras.engine.Network` class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Layer is called. The corresponding node that represents the call event that produced the Tensor can be found at `layer._inbound_nodes[node_index]`. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via `nest.flatten`. """ # Added to maintain memory and performance characteristics of `namedtuple` # while subclassing. __slots__ = () def is_keras_tensor(obj): return hasattr(obj, '_keras_history')
12,126
38.760656
81
py
keras
keras-master/keras/engine/partial_batch_padding_handler.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility object to handler partial batches for TPUStrategy.""" import tensorflow.compat.v2 as tf # pylint: disable=protected-access import numpy as np from keras import backend class PartialBatchPaddingHandler: """A container that holds info about partial batches for `predict()`.""" def __init__(self, output_shape): self.padded_batch_size = 0 self.padding_mask = tf.zeros(0) self.output_shape = output_shape def get_real_batch_size(self, dataset_batch): """Returns the number of elements in a potentially partial batch.""" if isinstance(dataset_batch, (tuple, list)): dataset_batch = dataset_batch[0] assert tf.nest.flatten(dataset_batch) def _find_any_tensor(batch_features): tensors = [ x for x in tf.nest.flatten(batch_features) if tf.is_tensor(x) ] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] return backend.cast(backend.shape(_find_any_tensor(dataset_batch))[0], dtype='int64') def update_mask(self, padding_mask, dataset_batch): """Calculate and cache the amount of padding required for a batch.""" original_batch_size = self.get_real_batch_size(dataset_batch) missing_count = self.padded_batch_size - original_batch_size mask = backend.concatenate([tf.ones(original_batch_size), tf.zeros(missing_count)], axis=0) return backend.concatenate([padding_mask, mask], axis=0) def pad_batch(self, *dataset_batch_elements): """Pads out the batch dimension of a tensor to the complete batch size.""" def _pad(batch): """Helper function to pad nested data within each batch elements.""" padded_dict_batch = {} if isinstance(batch, dict): for key, value in batch.items(): padded_dict_batch[key] = _pad(value) return padded_dict_batch rank = len(batch.shape) assert rank > 0 missing_count = (self.padded_batch_size - self.get_real_batch_size(batch)) padding = backend.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) return tf.pad(batch, padding, 'constant') if len(dataset_batch_elements) == 1: return _pad(dataset_batch_elements[0]) batch_elements = [] for batch_element in dataset_batch_elements: batch_elements.append(_pad(batch_element)) return tuple(batch_elements) def apply_mask(self, prediction_result): """Removes prediction output that corresponds to padded input.""" padding_mask = backend.get_value(self.padding_mask) assert len(padding_mask.shape) == 1 if len(self.output_shape) == 1: prediction = np.take(prediction_result, np.nonzero( padding_mask[:len(prediction_result)]), axis=0) if prediction.shape[0] == 1: prediction = np.squeeze(prediction, axis=0) return prediction else: predictions = [] for i in range(len(self.output_shape)): prediction = prediction_result[i] prediction = np.take(prediction, np.nonzero( padding_mask[:len(prediction)]), axis=0) predictions.append(np.squeeze(prediction)) return predictions
3,981
36.566038
80
py
keras
keras-master/keras/engine/training_arrays_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for model.fit calls with a Dataset object passed as validation_data.""" import tensorflow.compat.v2 as tf import io import sys from absl.testing import parameterized import numpy as np import keras from tensorflow.python.framework import test_util from keras import keras_parameterized from keras import testing_utils from keras.layers import core @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class ValidationDatasetNoLimitTest(keras_parameterized.TestCase): def create_dataset(self, num_samples, batch_size): input_data = np.random.rand(num_samples, 1) expected_data = input_data * 3 dataset = tf.data.Dataset.from_tensor_slices((input_data, expected_data)) return dataset.shuffle(10 * batch_size).batch(batch_size) def test_validation_dataset_with_no_step_arg(self): # Create a model that learns y=Mx. layers = [core.Dense(1)] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) model.compile(loss="mse", optimizer="adam", metrics=["mean_absolute_error"]) train_dataset = self.create_dataset(num_samples=200, batch_size=10) eval_dataset = self.create_dataset(num_samples=50, batch_size=25) history = model.fit(x=train_dataset, validation_data=eval_dataset, epochs=2) evaluation = model.evaluate(x=eval_dataset) # If the fit call used the entire dataset, then the final val MAE error # from the fit history should be equal to the final element in the output # of evaluating the model on the same eval dataset. self.assertAlmostEqual(history.history["val_mean_absolute_error"][-1], evaluation[-1], places=5) class PrintTrainingInfoTest(keras_parameterized.TestCase, parameterized.TestCase): @test_util.run_v1_only("Only relevant in graph mode.") def test_print_info_with_datasets(self): """Print training info should work with val datasets (b/133391839).""" model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(1,))]) model.compile(loss="mse", optimizer="sgd") dataset = tf.data.Dataset.from_tensors( ([1.], [1.])).repeat(100).batch(10) val_dataset = tf.data.Dataset.from_tensors( ([1.], [1.])).repeat(50).batch(10) mock_stdout = io.StringIO() with tf.compat.v1.test.mock.patch.object(sys, "stdout", mock_stdout): model.fit(dataset, epochs=2, validation_data=val_dataset) self.assertIn( "Train on 10 steps, validate on 5 steps", mock_stdout.getvalue()) @parameterized.named_parameters( ("with_validation", True), ("without_validation", False)) @test_util.run_v1_only("Only relevant in graph mode.") def test_print_info_with_numpy(self, do_validation): """Print training info should work with val datasets (b/133391839).""" model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(2,))]) model.compile(loss="mse", optimizer="sgd") dataset = np.arange(200).reshape(100, 2) if do_validation: val_data = (np.arange(100).reshape(50, 2), np.arange(50).reshape(50, 1)) else: val_data = None mock_stdout = io.StringIO() with tf.compat.v1.test.mock.patch.object(sys, "stdout", mock_stdout): model.fit(dataset, batch_size=10, epochs=2, validation_data=val_data) self.assertIn("Train on 100 samples", mock_stdout.getvalue()) if do_validation: self.assertIn(", validate on 50 samples", mock_stdout.getvalue()) @keras_parameterized.run_all_keras_modes def test_dict_float64_input(self): class MyModel(keras.Model): def __init__(self): super(MyModel, self).__init__(self) self.dense1 = keras.layers.Dense(10, activation="relu") self.dense2 = keras.layers.Dense(10, activation="relu") self.concat = keras.layers.Concatenate() self.dense3 = keras.layers.Dense(1, activation="sigmoid") def call(self, inputs): d1 = self.dense1(inputs["one"]) d2 = self.dense2(inputs["two"]) concat = self.concat([d1, d2]) return self.dense3(concat) model = MyModel() model.compile( loss="mae", optimizer="adam", run_eagerly=testing_utils.should_run_eagerly()) model.fit( x={ "one": np.random.rand(100, 10, 1), "two": np.random.rand(100, 10, 1) }, y=np.random.rand(100, 10, 1)) def test_dict_validation_input(self): """Test case for GitHub issue 30122.""" train_input_0 = np.random.rand(1000, 1) train_input_1 = np.random.rand(1000, 1) train_labels = np.random.rand(1000, 1) val_input_0 = np.random.rand(1000, 1) val_input_1 = np.random.rand(1000, 1) val_labels = np.random.rand(1000, 1) input_0 = keras.Input(shape=(None,), name="input_0") input_1 = keras.Input(shape=(None,), name="input_1") class my_model(keras.Model): def __init__(self): super(my_model, self).__init__(self) self.hidden_layer_0 = keras.layers.Dense(100, activation="relu") self.hidden_layer_1 = keras.layers.Dense(100, activation="relu") self.concat = keras.layers.Concatenate() self.out_layer = keras.layers.Dense(1, activation="sigmoid") def call(self, inputs=[input_0, input_1]): activation_0 = self.hidden_layer_0(inputs["input_0"]) activation_1 = self.hidden_layer_1(inputs["input_1"]) concat = self.concat([activation_0, activation_1]) return self.out_layer(concat) model = my_model() model.compile(loss="mae", optimizer="adam") model.fit( x={ "input_0": train_input_0, "input_1": train_input_1 }, y=train_labels, validation_data=({ "input_0": val_input_0, "input_1": val_input_1 }, val_labels)) if __name__ == "__main__": tf.test.main()
6,629
34.837838
80
py
keras
keras-master/keras/engine/training_generator_v1.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Part of the Keras training engine related to Python generators of array data. """ import tensorflow.compat.v2 as tf # pylint: disable=protected-access import functools import math import numpy as np from keras import backend from keras import callbacks as cbks from keras.engine import training_utils from keras.engine import training_utils_v1 from keras.utils import data_utils from keras.utils import generic_utils from keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging def model_iteration(model, data, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=False, initial_epoch=0, mode=ModeKeys.TRAIN, batch_size=None, steps_name='steps', **kwargs): """Loop function for arrays of data with modes TRAIN/TEST/PREDICT. Args: model: Keras Model instance. data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. epochs: Number of times to iterate over the data. verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of callbacks to be called during training. validation_data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. validation_steps: Total number of steps (batches of samples) before declaring validation finished. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. class_weight: Dictionary mapping class indices to a weight for the class. max_queue_size: Integer. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of each epoch. Only used with instances of `Sequence` (`keras.utils.Sequence`). Has no effect when `steps_per_epoch` is not `None`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run). mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. batch_size: Integer batch size or None if unknown. Will only be used if `data` is in NumPy/Tensor format. steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. **kwargs: Additional arguments for backwards compatibility. `steps` is accepted as an alias for `steps_per_epoch`. Returns: - In TRAIN mode: `History` object. - In TEST mode: Evaluation metrics. - In PREDICT mode: Outputs of the Model called on inputs. Raises: ValueError: in case of invalid arguments. """ if 'steps' in kwargs: steps_per_epoch = kwargs['steps'] # Determine the number of steps per epoch and whether we should reset the # dataset at the end of each epoch. reset_dataset_after_each_epoch = False original_dataset = None is_dataset = isinstance(data, (tf.data.Dataset, tf.compat.v1.data.Dataset)) if is_dataset: original_dataset = data if steps_per_epoch is None: reset_dataset_after_each_epoch = True steps_per_epoch = training_utils_v1.infer_steps_for_dataset( model, data, steps_per_epoch, epochs=epochs, steps_name=steps_name) # Convert to a format that supports `next(generator)`. generator, steps_per_epoch = convert_to_generator_like( data, steps_per_epoch=steps_per_epoch, batch_size=batch_size, epochs=epochs - initial_epoch, shuffle=shuffle) do_validation = validation_data is not None is_sequence = isinstance(generator, data_utils.Sequence) _validate_arguments(is_sequence, is_dataset, use_multiprocessing, workers, steps_per_epoch, validation_data, validation_steps, mode, kwargs) batch_function = _make_execution_function( model, mode, class_weight=class_weight) # Create the queue for the generator. enqueuer = None if not is_dataset: generator, enqueuer = _make_enqueued_generator( generator, workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=max_queue_size, shuffle=shuffle) num_samples_or_steps, use_steps = _get_num_samples_or_steps( data, steps_per_epoch) count_mode = 'steps' if use_steps else 'samples' callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, epochs=epochs, steps_per_epoch=steps_per_epoch, batch_size=batch_size, samples=num_samples_or_steps, count_mode=count_mode, verbose=verbose, mode=mode) if mode == ModeKeys.PREDICT: aggregator = training_utils_v1.OutputsAggregator( True, steps=steps_per_epoch) else: aggregator = training_utils_v1.MetricsAggregator( True, steps=steps_per_epoch) should_set_learning_phase = tf.executing_eagerly() and model.run_eagerly if should_set_learning_phase: learning_phase_scope = backend.eager_learning_phase_scope( 1 if mode == ModeKeys.TRAIN else 0) learning_phase_scope.__enter__() callbacks.model.stop_training = False callbacks._call_begin_hook(mode) initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode) for epoch in range(initial_epoch, epochs): if callbacks.model.stop_training: break # Setup work for each epoch. model.reset_metrics() epoch_logs = {} if mode == ModeKeys.TRAIN: callbacks.on_epoch_begin(epoch, epoch_logs) if steps_per_epoch is None: # Loop over dataset until `OutOfRangeError` is raised. target_steps = np.inf else: # Loop over dataset for the specified number of steps. target_steps = steps_per_epoch step = 0 while step < target_steps: batch_data = _get_next_batch(generator) if batch_data is None: if is_dataset: # The dataset passed by the user ran out of batches. # Now we know the cardinality of the dataset. # If steps_per_epoch was specified, then running out of data is # unexpected, so we stop training and inform the user. if steps_per_epoch: callbacks.model.stop_training = True logging.warning( 'Your dataset ran out of data; interrupting training. ' 'Make sure that your dataset can generate at least ' '`%s * epochs` batches (in this case, %d batches). ' 'You may need to use the repeat() function when ' 'building your dataset.' % (steps_name, steps_per_epoch * epochs)) elif step > 0: steps_per_epoch = step aggregator.steps = steps_per_epoch else: # We ran out of batches while the user passed an iterator (legacy). callbacks.model.stop_training = True logging.warning( 'Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your iterator ' 'can generate at least `%s * epochs` ' 'batches (in this case, %d batches). You may need to' 'use the repeat() function when building your ' 'dataset.' % (steps_name, steps_per_epoch * epochs)) break # `batch_size` used for validation data if validation # data is NumPy/EagerTensors. batch_size = int(tf.nest.flatten(batch_data)[0].shape[0]) # Callbacks batch begin. batch_logs = {'batch': step, 'size': batch_size} callbacks._call_batch_hook(mode, 'begin', step, batch_logs) is_deferred = not model._is_compiled batch_outs = batch_function(*batch_data) if not isinstance(batch_outs, list): batch_outs = [batch_outs] if step == 0: aggregator.create(batch_outs) if is_deferred: # Set callbacks params. We do this here when model is compiled only # in the first iteration of this loop (deferred build scenario). cbks.set_callback_parameters( callbacks, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=num_samples_or_steps, verbose=verbose, mode=mode) # Aggregate results. aggregator.aggregate(batch_outs) # Callbacks batch end. batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode) callbacks._call_batch_hook(mode, 'end', step, batch_logs) step += 1 if callbacks.model.stop_training: break aggregator.finalize() results = aggregator.results epoch_logs = cbks.make_logs(model, epoch_logs, results, mode) if len(results) == 1: results = results[0] # Run the test loop every epoch during training. if (do_validation and training_utils_v1.should_run_validation(validation_freq, epoch) and not callbacks.model.stop_training): val_results = model_iteration( model, validation_data, steps_per_epoch=validation_steps, batch_size=batch_size, class_weight=class_weight, workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=max_queue_size, callbacks=callbacks, verbose=verbose, mode=ModeKeys.TEST, steps_name='validation_steps') if not isinstance(val_results, list): val_results = [val_results] epoch_logs = cbks.make_logs( model, epoch_logs, val_results, mode, prefix='val_') if mode == ModeKeys.TRAIN: # Epochs only apply to `fit`. callbacks.on_epoch_end(epoch, epoch_logs) # Recreate dataset iterator for the next epoch. if reset_dataset_after_each_epoch and epoch < epochs - 1: generator = tf.compat.v1.data.make_one_shot_iterator(original_dataset) model._successful_loop_finish = True callbacks._call_end_hook(mode) if enqueuer is not None: enqueuer.stop() if should_set_learning_phase: learning_phase_scope.__exit__(None, None, None) if mode == ModeKeys.TRAIN: return model.history return results # Maintain compatibility with the existing names. fit_generator = functools.partial(model_iteration, mode=ModeKeys.TRAIN) evaluate_generator = functools.partial( model_iteration, mode=ModeKeys.TEST, shuffle=False) predict_generator = functools.partial( model_iteration, mode=ModeKeys.PREDICT, shuffle=False) def _get_next_batch(generator): """Retrieves the next batch of input data.""" try: generator_output = next(generator) except (StopIteration, tf.errors.OutOfRangeError): return None if not isinstance(generator_output, tuple): # Always wrap in a tuple. generator_output = (generator_output,) if len(generator_output) not in [1, 2, 3]: raise ValueError( 'Output of generator should be a tuple of 1 or 2 or 3 ' 'elements: (input,) or (input, target) or ' '(input, target, sample_weights). Received {}'.format(generator_output)) return generator_output def _validate_arguments(is_sequence, is_dataset, use_multiprocessing, workers, steps_per_epoch, validation_data, validation_steps, mode, kwargs): """Raises errors if arguments are invalid. Args: is_sequence: Boolean, whether data is a `keras.utils.data_utils.Sequence` instance. is_dataset: Boolean, whether data is a dataset instance. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. validation_steps: Total number of steps (batches of samples) before declaring validation finished. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. kwargs: Additional arguments for backwards compatibility. Raises: ValueError: If `steps_per_epoch` or `validation_steps` are not passed for data types that require them, or if unrecognized keyword arguments are passed. """ if not is_sequence and use_multiprocessing and workers > 1: logging.warning( UserWarning('Using a generator with `use_multiprocessing=True`' ' and multiple workers may duplicate your data.' ' Please consider using the `keras.utils.Sequence`' ' class.')) if steps_per_epoch is None and not is_dataset: arg_name = 'steps_per_epoch' if mode == ModeKeys.TRAIN else 'steps' raise ValueError('Please specify the number of steps via the ' '`{}` argument.'.format(arg_name)) val_gen = ( data_utils.is_generator_or_sequence(validation_data) or isinstance(validation_data, tf.data.Iterator)) if (val_gen and not isinstance(validation_data, data_utils.Sequence) and not validation_steps): raise ValueError('Please specify the `validation_steps` argument.') if any(k != 'steps' for k in kwargs): raise ValueError('Invalid arguments passed: {}'.format( [k for k in kwargs if k != 'steps'])) def convert_to_generator_like(data, batch_size=None, steps_per_epoch=None, epochs=1, shuffle=False): """Make a generator out of NumPy or EagerTensor inputs. Args: data: Either a generator or `keras.utils.data_utils.Sequence` object or `Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors. If a tuple, the elements represent `(x, y, sample_weights)` and may be `None` or `[None]`. batch_size: Used when creating a generator out of tuples of NumPy arrays or EagerTensors. steps_per_epoch: Steps of the generator to run each epoch. If `None` the number of steps will be read from the data (for `keras.utils.data_utils.Sequence` types). epochs: Total number of epochs to run. shuffle: Whether the data should be shuffled. Returns: - Generator, `keras.utils.data_utils.Sequence`, or `Iterator`. Raises: - ValueError: If `batch_size` is not provided for NumPy or EagerTensor inputs. """ if isinstance(data, tuple): # Scrub `Nones` that might have been passed for `targets`, `sample_weights`. data = tuple( ele for ele in data if not all(e is None for e in tf.nest.flatten(ele))) if data_utils.is_generator_or_sequence(data) or isinstance( data, tf.data.Iterator): if isinstance(data, data_utils.Sequence): if steps_per_epoch is None: steps_per_epoch = len(data) return data, steps_per_epoch if isinstance(data, tf.data.Dataset): return tf.compat.v1.data.make_one_shot_iterator(data), steps_per_epoch # Create generator from NumPy or EagerTensor Input. num_samples = int(tf.nest.flatten(data)[0].shape[0]) if batch_size is None: raise ValueError( 'When passing input data as arrays, do not specify ' '`steps_per_epoch`/`steps` argument. Please use `batch_size` instead.') steps_per_epoch = int(math.ceil(num_samples / batch_size)) def _gen(data): """Makes a generator out of a structure of NumPy/EagerTensors.""" index_array = np.arange(num_samples) for _ in range(epochs): if shuffle: np.random.shuffle(index_array) batches = generic_utils.make_batches(num_samples, batch_size) for (batch_start, batch_end) in batches: batch_ids = index_array[batch_start:batch_end] flat_batch_data = training_utils.slice_arrays( tf.nest.flatten(data), batch_ids, contiguous=(not shuffle)) yield tf.nest.pack_sequence_as(data, flat_batch_data) return _gen(data), steps_per_epoch def _make_enqueued_generator(generator, workers=1, use_multiprocessing=False, max_queue_size=10, shuffle=False): """Create a buffered queue of next elements of the generator.""" is_sequence = isinstance(generator, data_utils.Sequence) enqueuer = None if workers > 0: if is_sequence: enqueuer = data_utils.OrderedEnqueuer( generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle) else: enqueuer = data_utils.GeneratorEnqueuer( generator, use_multiprocessing=use_multiprocessing) enqueuer.start(workers=workers, max_queue_size=max_queue_size) output_generator = enqueuer.get() else: if is_sequence: output_generator = data_utils.iter_sequence_infinite(generator) else: output_generator = generator return output_generator, enqueuer def _make_execution_function(model, mode, class_weight=None): """Makes function to run one step of model execution.""" if mode == ModeKeys.TRAIN: f = functools.partial(model.train_on_batch, class_weight=class_weight) elif mode == ModeKeys.TEST: f = model.test_on_batch else: # Match signature of other modes to allow # 1, 2, or 3-tuples from generator def predict_on_batch(x, y=None, sample_weights=None): # pylint: disable=unused-argument return model.predict_on_batch(x) f = predict_on_batch # Maintain stateful metrics across batch-level calls. if mode != ModeKeys.PREDICT: f = functools.partial(f, reset_metrics=False) return f def _get_num_samples_or_steps(data, steps_per_epoch): """Returns number of samples or steps, and whether to use steps count mode.""" flat_inputs = tf.nest.flatten(data) if hasattr(flat_inputs[0], 'shape'): return int(flat_inputs[0].shape[0]), False return steps_per_epoch, True class GeneratorOrSequenceTrainingLoop(training_utils_v1.TrainingLoop): """Generator-like. Input is Python generator, or Sequence object. The difference between this class and `GeneratorLikeTrainingFunction` is that this class only handles inputs that with x, y and sample_weight fused into one param. """ def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False): model._validate_or_infer_batch_size(batch_size, steps_per_epoch, x) training_utils_v1.check_generator_arguments( y, sample_weight, validation_split=validation_split) return fit_generator( model, x, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch, steps_name='steps_per_epoch') def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): model._validate_or_infer_batch_size(batch_size, steps, x) training_utils_v1.check_generator_arguments(y, sample_weight) return evaluate_generator( model, x, steps=steps, verbose=verbose, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): model._validate_or_infer_batch_size(batch_size, steps, x) return predict_generator( model, x, steps=steps, verbose=verbose, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) class EagerDatasetOrIteratorTrainingLoop(training_utils_v1.TrainingLoop): """A non-distributed Dataset or iterator in eager execution.""" def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): model._validate_or_infer_batch_size(batch_size, steps_per_epoch, x) # Make sure that y, sample_weights, validation_split are not passed. training_utils_v1.validate_dataset_input(x, y, sample_weight, validation_split) if (isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)) and shuffle): training_utils_v1.verify_dataset_shuffled(x) return fit_generator( model, x, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, workers=0, shuffle=shuffle, initial_epoch=initial_epoch, steps_name='steps_per_epoch') def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): model._validate_or_infer_batch_size(batch_size, steps, x) # Make sure that y, sample_weights, validation_split are not passed. training_utils_v1.validate_dataset_input(x, y, sample_weight) return evaluate_generator( model, x, steps=steps, verbose=verbose, workers=0, callbacks=callbacks) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): model._validate_or_infer_batch_size(batch_size, steps, x) return predict_generator( model, x, steps=steps, verbose=verbose, workers=0, callbacks=callbacks) class GeneratorLikeTrainingLoop(training_utils_v1.TrainingLoop): """TrainingLoop that handle inputs like python generator. This is the default handler for most of the input data types, includes symbolic tensors or Numpy array-like, Datasets and iterators in graph mode (since they generate symbolic tensors). This Function is used to handle model with `run_eagerly` = True. """ def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps_per_epoch, x) x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=True, steps_name='steps_per_epoch', steps=steps_per_epoch, validation_split=validation_split, shuffle=shuffle) if validation_data: validation_data = model._prepare_validation_data(validation_data, batch_size, validation_steps) elif validation_split and 0. < validation_split < 1.: (x, y, sample_weights, val_x, val_y, val_sample_weights) = ( training_utils_v1.split_training_and_validation_data( x, y, sample_weights, validation_split)) validation_data = (val_x, val_y, val_sample_weights) else: if validation_steps: raise ValueError('`validation_steps` should not be specified if ' '`validation_data` is None.') return fit_generator( model, (x, y, sample_weights), steps_per_epoch=steps_per_epoch, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, workers=0, shuffle=shuffle, initial_epoch=initial_epoch, steps_name='steps_per_epoch') def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size, check_steps=True, steps_name='steps', steps=steps) return evaluate_generator( model, (x, y, sample_weights), steps=steps, batch_size=batch_size, verbose=verbose, workers=0, callbacks=callbacks) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) x, _, _ = model._standardize_user_data( x, check_steps=True, steps_name='steps', steps=steps) return predict_generator( model, x, steps=steps, batch_size=batch_size, verbose=verbose, workers=0, callbacks=callbacks)
30,465
35.928485
92
py
keras
keras-master/keras/engine/functional.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """A `Network` is way to compose layers: the topological form of a `Model`.""" import tensorflow.compat.v2 as tf import collections import copy import itertools import warnings from keras import backend from keras.engine import base_layer from keras.engine import base_layer_utils from keras.engine import functional_utils from keras.engine import input_layer as input_layer_module from keras.engine import input_spec from keras.engine import node as node_module from keras.engine import training as training_lib from keras.engine import training_utils from keras.saving.saved_model import network_serialization from keras.utils import generic_utils from keras.utils import tf_inspect from keras.utils import tf_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.tools.docs import doc_controls # pylint: disable=g-classes-have-attributes class Functional(training_lib.Model): """A `Functional` model is a `Model` defined as a directed graph of layers. Three types of `Model` exist: subclassed `Model`, `Functional` model, and `Sequential` (a special case of `Functional`). In general, more Keras features are supported with `Functional` than with subclassed `Model`s, specifically: - Model cloning (`keras.models.clone`) - Serialization (`model.get_config()/from_config`, `model.to_json()` - Whole-model saving (`model.save()`) A `Functional` model can be instantiated by passing two arguments to `__init__`. The first argument is the `keras.Input` Tensors that represent the inputs to the model. The second argument specifies the output tensors that represent the outputs of this model. Both arguments can be a nested structure of tensors. Example: ``` inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))} t = keras.layers.Dense(1, activation='relu')(inputs['x1']) outputs = keras.layers.Add()([t, inputs['x2']) model = keras.Model(inputs, outputs) ``` A `Functional` model constructed using the Functional API can also include raw TensorFlow functions, with the exception of functions that create Variables or assign ops. Example: ``` inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(1)(inputs) outputs = tf.nn.relu(x) model = keras.Model(inputs, outputs) ``` Args: inputs: List of input tensors (must be created via `tf.keras.Input()`). outputs: List of output tensors. name: String, optional. Name of the model. trainable: Boolean, optional. If the model's variables should be trainable. """ # See tf.Module for the usage of this property. # The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to # flatten the key since it is trying to convert Trackable/Layer to a string. _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain( ('_layer_call_argspecs', '_compiled_trainable_state', '_output_mask_cache', '_output_tensor_cache', '_output_shape_cache'), training_lib.Model._TF_MODULE_IGNORED_PROPERTIES )) @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__(self, inputs, outputs, name=None, trainable=True, **kwargs): # This is used by the Model class, since we have some logic to swap the # class in the __new__ method, which will lead to __init__ get invoked # twice. Using the skip_init to skip one of the invocation of __init__ to # avoid any side effects skip_init = kwargs.pop('skip_init', False) if skip_init: return generic_utils.validate_kwargs(kwargs, {}) super(Functional, self).__init__(name=name, trainable=trainable) # Check if the inputs contain any intermediate `KerasTensor` (not created # by tf.keras.Input()). In this case we need to clone the `Node` and # `KerasTensor` objects to mimic rebuilding a new model from new inputs. # This feature is only enabled in TF2 not in v1 graph mode. if tf.compat.v1.executing_eagerly_outside_functions(): if not all([functional_utils.is_input_keras_tensor(t) for t in tf.nest.flatten(inputs)]): inputs, outputs = functional_utils.clone_graph_nodes(inputs, outputs) self._init_graph_network(inputs, outputs) @tf.__internal__.tracking.no_automatic_dependency_tracking def _init_graph_network(self, inputs, outputs): base_layer.keras_api_gauge.get_cell('Functional').set(True) # This method is needed for Sequential to reinitialize graph network when # layer is added or removed. self._is_graph_network = True # Normalize and set self.inputs, self.outputs. if isinstance(inputs, list) and len(tf.nest.flatten(inputs)) == 1: inputs = inputs[0] if isinstance(outputs, list) and len(tf.nest.flatten(outputs)) == 1: outputs = outputs[0] self._nested_inputs = inputs self._nested_outputs = outputs self.inputs = tf.nest.flatten(inputs) self.outputs = tf.nest.flatten(outputs) # Models constructed with a single Tensor or list of Tensors can # be called with a dict, where the keys of the dict are the names # of the `Input` objects. Extra keys are ignored with warning. if not tf.nest.is_nested(self._nested_inputs): self._enable_dict_to_input_mapping = True elif (isinstance(self._nested_inputs, (list, tuple)) and not any(tf.nest.is_nested(t) for t in self._nested_inputs)): self._enable_dict_to_input_mapping = True elif (isinstance(self._nested_inputs, dict) and not any(tf.nest.is_nested(t) for t in self._nested_inputs.values())): self._enable_dict_to_input_mapping = True else: self._enable_dict_to_input_mapping = False if not tf.compat.v1.executing_eagerly_outside_functions(): if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs): base_layer_utils.create_keras_history(self._nested_outputs) self._validate_graph_inputs_and_outputs() # A Network does not create weights of its own, thus it is already # built. self.built = True self._build_input_shape = tf.nest.map_structure(lambda x: x.shape, inputs) self._compute_output_and_mask_jointly = True # `_expects_training_arg` is True since the `training` argument is always # present in the signature of the `call` method of a graph network. self._expects_training_arg = True self._expects_mask_arg = True # A graph network does not autocast inputs, as its layers will cast them # instead. self._autocast = False self._input_layers = [] self._output_layers = [] self._input_coordinates = [] self._output_coordinates = [] # This is for performance optimization when calling the Network on new # inputs. Every time the Network is called on a set on input tensors, # we compute the output tensors, output masks and output shapes in one pass, # then cache them here. When any of these outputs is queried later, we # retrieve it from there instead of recomputing it. self._output_mask_cache = {} self._output_tensor_cache = {} self._output_shape_cache = {} # Build self._output_layers: for x in self.outputs: layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access self._output_layers.append(layer) self._output_coordinates.append((layer, node_index, tensor_index)) # Build self._input_layers: for x in self.inputs: layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access # It's supposed to be an input layer, so only one node # and one tensor output. assert node_index == 0 assert tensor_index == 0 self._input_layers.append(layer) self._input_coordinates.append((layer, node_index, tensor_index)) # Keep track of the network's nodes and layers. nodes, nodes_by_depth, layers, _ = _map_graph_network( self.inputs, self.outputs) self._network_nodes = nodes self._nodes_by_depth = nodes_by_depth self._self_tracked_trackables = layers self._layer_call_argspecs = {} for layer in self._self_tracked_trackables: self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) # Build self.input_names and self.output_names. self._set_output_names() self.input_names = [] self._feed_input_names = [] self._feed_inputs = [] self._feed_input_shapes = [] for layer in self._input_layers: self.input_names.append(layer.name) if layer.is_placeholder: self._feed_input_names.append(layer.name) # Use batch_input_shape here because non-eager composite tensors may not # have a shape attribute that's meaningful (sparse, for instance, has # a tensor that's non-constant and needs to be fed). This means that # input layers that create placeholders will need to have the # batch_input_shape attr to allow for input shape validation. self._feed_input_shapes.append(layer._batch_input_shape) self._feed_inputs.append(layer.input) self._compute_tensor_usage_count() self._set_save_spec(self._nested_inputs) tf_utils.assert_no_legacy_layers(self.layers) @property def input(self): """Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found. """ return self._nested_inputs @property def input_shape(self): """Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode. """ return tf.nest.map_structure(backend.int_shape, self.input) @property def input_spec(self): if hasattr(self, '_manual_input_spec'): return self._manual_input_spec if (isinstance(self._nested_inputs, (dict, list, tuple)) and len(self._nested_inputs) != len(self.inputs)): # Case where we have a nested structure. # In such a case we can't safely run any checks. return None if isinstance(self._nested_inputs, dict): # Case where `_nested_inputs` is a plain dict of Inputs. names = sorted(self._nested_inputs.keys()) return [input_spec.InputSpec( shape=shape_with_no_batch_size(self._nested_inputs[name]), allow_last_axis_squeeze=True, name=name) for name in names] else: # Single input, or list / tuple of inputs. # The data may be passed as a dict keyed by input name. return [input_spec.InputSpec( shape=shape_with_no_batch_size(x), allow_last_axis_squeeze=True, name=x._keras_history.layer.name) for x in self.inputs] @input_spec.setter def input_spec(self, value): self._manual_input_spec = value @property def output(self): """Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode. """ return self._nested_outputs @property def output_shape(self): """Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode. """ return tf.nest.map_structure(backend.int_shape, self.output) def _set_output_names(self): """Assigns unique names to the Network's outputs. Output layers with multiple output tensors would otherwise lead to duplicate names in self.output_names. """ uniquified = [] output_names = set() prefix_count = {} for layer in self._output_layers: proposal = layer.name while proposal in output_names: existing_count = prefix_count.get(layer.name, 1) proposal = '{}_{}'.format(layer.name, existing_count) prefix_count[layer.name] = existing_count + 1 output_names.add(proposal) uniquified.append(proposal) self.output_names = uniquified @property def _layer_checkpoint_dependencies(self): """Dictionary of layer dependencies to be included in the checkpoint.""" weight_layer_index = 0 dependencies = collections.OrderedDict() for layer_index, layer in enumerate(self.layers): try: if layer.weights: # Keep a separate index for layers which have weights. This allows # users to insert Layers without weights anywhere in the network # without breaking checkpoints. dependencies['layer_with_weights-%d' % weight_layer_index] = layer weight_layer_index += 1 except ValueError: # The layer might have weights, but may not be built yet. We just treat # it as layer without weight. pass # Even if it doesn't have weights, we should still track everything in # case it has/will have Trackable dependencies. dependencies['layer-%d' % layer_index] = layer return dependencies @property def _checkpoint_dependencies(self): dependencies = [ tf.__internal__.tracking.TrackableReference(name=name, ref=layer) for name, layer in self._layer_checkpoint_dependencies.items()] dependencies.extend(super(Functional, self)._checkpoint_dependencies) return dependencies def _lookup_dependency(self, name): layer_dependencies = self._layer_checkpoint_dependencies if name in layer_dependencies: return layer_dependencies[name] return super(Functional, self)._lookup_dependency(name) def _handle_deferred_layer_dependencies(self, layers): """Handles layer checkpoint dependencies that are added after init.""" layer_checkpoint_dependencies = self._layer_checkpoint_dependencies layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()} for layer in layers: if layer in layer_to_name: self._handle_deferred_dependencies(name=layer_to_name[layer], trackable=layer) @property def _should_compute_mask(self): return True def compute_mask(self, inputs, mask): # TODO(omalleyt): b/123540974 This function is not really safe to call # by itself because it will duplicate any updates and losses in graph # mode by `call`ing the Layers again. output_tensors = self._run_internal_graph(inputs, mask=mask) return tf.nest.map_structure(lambda t: getattr(t, '_keras_mask', None), output_tensors) @doc_controls.do_not_doc_inheritable def call(self, inputs, training=None, mask=None): """Calls the model on new inputs. In this case `call` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Args: inputs: A tensor or list of tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs. """ return self._run_internal_graph( inputs, training=training, mask=mask) def compute_output_shape(self, input_shape): # Convert any shapes in tuple format to TensorShapes. input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) if (len(tf.nest.flatten(input_shape)) != len(tf.nest.flatten(self._input_layers))): raise ValueError(f'Invalid `input_shape` argument {input_shape}: ' f'the model expects {len(self._input_layers)} ' 'input tensors.') # Use the tuple of TensorShape as the cache key, since tuple is hashable # and can be used as hash key. try: cache_key = tuple(tf_utils.convert_shapes(input_shape, to_tuples=True)) if cache_key in self._output_shape_cache: # Cache hit. Return shapes as TensorShapes. return self._output_shape_cache[cache_key] except ValueError: # In case there are unknown TensorShape, eg for sparse tensor input, # We skip the caching since the shape is unknown. pass layers_to_output_shapes = {} for layer, shape in zip(self._input_layers, tf.nest.flatten(input_shape)): # It's an input layer: then `compute_output_shape` is identity, # and there is only one node and one tensor.. shape_key = layer.name + '_0_0' layers_to_output_shapes[shape_key] = shape depth_keys = list(self._nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Iterate over nodes, by depth level. if len(depth_keys) > 1: for depth in depth_keys: nodes = self._nodes_by_depth[depth] for node in nodes: layer = node.layer if layer in self._input_layers: # We've already covered the input layers # a few lines above. continue # Get the input shapes for the first argument of the node layer_input_shapes = [] layer_inputs = node.call_args[0] for layer_input in tf.nest.flatten(layer_inputs): kh = layer_input._keras_history input_layer_key = kh.layer.name + '_%s_%s' % (kh.node_index, kh.tensor_index) layer_input_shapes.append(layers_to_output_shapes[input_layer_key]) layer_input_shapes = tf.nest.pack_sequence_as(layer_inputs, layer_input_shapes) # Layers expect shapes to be tuples for `compute_output_shape`. layer_input_shapes = tf_utils.convert_shapes( layer_input_shapes, to_tuples=True) layer_output_shapes = layer.compute_output_shape(layer_input_shapes) # Convert back to TensorShapes. layer_output_shapes = tf_utils.convert_shapes( layer_output_shapes, to_tuples=False) node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access for j, shape in enumerate(tf.nest.flatten(layer_output_shapes)): shape_key = layer.name + '_%s_%s' % (node_index, j) layers_to_output_shapes[shape_key] = shape # Read final output shapes from layers_to_output_shapes. output_shapes = [] for i in range(len(self._output_layers)): layer, node_index, tensor_index = self._output_coordinates[i] shape_key = layer.name + '_%s_%s' % (node_index, tensor_index) output_shapes.append(layers_to_output_shapes[shape_key]) output_shapes = tf.nest.pack_sequence_as(self._nested_outputs, output_shapes) # Store in cache. self._output_shape_cache[cache_key] = output_shapes # Return shapes as TensorShapes. return output_shapes def _init_set_name(self, name, zero_based=True): if not name: cls_name = self.__class__.__name__ if self.__class__ == Functional: # Hide the functional class name from user, since its not a public # visible class. Use "Model" instead, cls_name = 'Model' self._name = backend.unique_object_name( generic_utils.to_snake_case(cls_name), zero_based=zero_based) else: self._name = name def _run_internal_graph(self, inputs, training=None, mask=None): """Computes output tensors for new inputs. # Note: - Can be run on non-Keras tensors. Args: inputs: Tensor or nested structure of Tensors. training: Boolean learning phase. mask: (Optional) Tensor or nested structure of Tensors. Returns: output_tensors """ inputs = self._flatten_to_reference_inputs(inputs) if mask is None: masks = [None] * len(inputs) else: masks = self._flatten_to_reference_inputs(mask) for input_t, mask in zip(inputs, masks): input_t._keras_mask = mask # Dictionary mapping reference tensors to computed tensors. tensor_dict = {} tensor_usage_count = self._tensor_usage_count for x, y in zip(self.inputs, inputs): y = self._conform_to_reference_input(y, ref_input=x) x_id = str(id(x)) tensor_dict[x_id] = [y] * tensor_usage_count[x_id] nodes_by_depth = self._nodes_by_depth depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) for depth in depth_keys: nodes = nodes_by_depth[depth] for node in nodes: if node.is_input: continue # Input tensors already exist. if any(t_id not in tensor_dict for t_id in node.flat_input_ids): continue # Node is not computable, try skipping. args, kwargs = node.map_arguments(tensor_dict) outputs = node.layer(*args, **kwargs) # Update tensor_dict. for x_id, y in zip(node.flat_output_ids, tf.nest.flatten(outputs)): tensor_dict[x_id] = [y] * tensor_usage_count[x_id] output_tensors = [] for x in self.outputs: x_id = str(id(x)) assert x_id in tensor_dict, 'Could not compute output ' + str(x) output_tensors.append(tensor_dict[x_id].pop()) return tf.nest.pack_sequence_as(self._nested_outputs, output_tensors) def _flatten_to_reference_inputs(self, tensors): """Maps `tensors` to their respective `keras.Input`.""" if self._enable_dict_to_input_mapping and isinstance(tensors, dict): ref_inputs = self._nested_inputs if not tf.nest.is_nested(ref_inputs): ref_inputs = [self._nested_inputs] if isinstance(ref_inputs, dict): # In the case that the graph is constructed with dict input tensors, # We will use the original dict key to map with the keys in the input # data. Note that the model.inputs is using nest.flatten to process the # input tensors, which means the dict input tensors are ordered by their # keys. ref_input_names = sorted(ref_inputs.keys()) else: ref_input_names = [inp._keras_history.layer.name for inp in ref_inputs] # Raise an warning if there are more input data comparing to input tensor if len(tensors) > len(ref_input_names): warnings.warn( 'Input dict contained keys {} which did not match any model input. ' 'They will be ignored by the model.'.format( [n for n in tensors.keys() if n not in ref_input_names]) ) try: # Flatten in the order `Input`s were passed during Model construction. return [tensors[n] for n in ref_input_names] except KeyError: # TODO(b/151582614) return tf.nest.flatten(tensors) # Otherwise both self.inputs and tensors will already be in same order. return tf.nest.flatten(tensors) def _conform_to_reference_input(self, tensor, ref_input): """Set shape and dtype based on `keras.Input`s.""" if isinstance(tensor, tf.Tensor): # Allow (None,) and (None, 1) Tensors to be passed interchangeably. Use # the shape specified by the `keras.Input`. t_shape = tensor.shape t_rank = t_shape.rank ref_shape = ref_input.shape ref_rank = ref_shape.rank keras_history = getattr(tensor, '_keras_history', None) if t_rank is not None and ref_rank is not None: # Should squeeze last dimension. # True if tensor is (BATCH, ..., 1) and reference is (BATCH, ...). if (t_rank == ref_rank + 1 and t_shape[-1] == 1): tensor = tf.squeeze(tensor, axis=-1) # Should expand last_dimension. # True if tensor is (BATCH, ...) and reference is (BATCH, ..., 1). elif (t_rank == ref_rank - 1 and ref_shape[-1] == 1): tensor = tf.expand_dims(tensor, axis=-1) if keras_history is not None: # Restore keras history. tensor._keras_history = keras_history # Add shape hints to Tensors that may have None shape dims but have shapes # defined by the `keras.Input` (not applicable in eager mode). if not tf.executing_eagerly(): try: tensor.set_shape(tensor.shape.merge_with(ref_input.shape)) except ValueError: logging.warning( 'Model was constructed with shape {} for input {}, but it was ' 'called on an input with incompatible shape {}.'.format( ref_input.shape, ref_input, tensor.shape)) # Dtype casting. tensor = tf.cast(tensor, dtype=ref_input.dtype) elif tf_utils.is_extension_type(tensor): # Dtype casting (If the extension type has a non-variant dtype and # supports being cast) ref_input_dtype = getattr(ref_input, 'dtype', None) if ref_input_dtype is not None and ref_input_dtype != tf.variant: tensor = tf.cast(tensor, dtype=ref_input_dtype) return tensor def get_config(self): return copy.deepcopy(get_network_config(self)) @classmethod def from_config(cls, config, custom_objects=None): """Instantiates a Model from its config (output of `get_config()`). Args: config: Model config dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A model instance. Raises: ValueError: In case of improperly formatted config dict. """ with generic_utils.SharedObjectLoadingScope(): input_tensors, output_tensors, created_layers = reconstruct_from_config( config, custom_objects) model = cls(inputs=input_tensors, outputs=output_tensors, name=config.get('name')) connect_ancillary_layers(model, created_layers) return model def _validate_graph_inputs_and_outputs(self): """Validates the inputs and outputs of a Graph Network.""" # Check for redundancy in inputs. if len({id(i) for i in self.inputs}) != len(self.inputs): raise ValueError('The list of inputs passed to the model ' 'contains the same input multiple times. ' 'All inputs should only appear once.' f'Received inputs={self.inputs}') for x in self.inputs: # Check that x has appropriate `_keras_history` metadata. if not hasattr(x, '_keras_history'): cls_name = self.__class__.__name__ raise ValueError( f'Input tensors to a {cls_name} model ' 'must come from `tf.keras.Input`. ' f'Received inputs={x} (missing previous layer metadata).') # Check that x is an input tensor. # pylint: disable=protected-access layer = x._keras_history.layer if len(layer._inbound_nodes) > 1 or ( layer._inbound_nodes and not layer._inbound_nodes[0].is_input): cls_name = self.__class__.__name__ logging.warning(f'{cls_name} model inputs must come from ' '`tf.keras.Input` (thus holding past layer metadata). ' 'They cannot be the output of ' 'a previous non-Input layer. ' 'Here, a tensor specified as ' f'input to "{self.name}" was not an Input tensor, ' f'it was generated by layer "{layer.name}".\n' 'Note that input tensors are ' 'instantiated via `tensor = tf.keras.Input(shape)`.\n' f'The tensor that caused the issue was: {x}') # Check compatibility of batch sizes of Input Layers. input_batch_sizes = set([ training_utils.get_static_batch_size(x._keras_history.layer) for x in self.inputs]) input_batch_sizes.discard(None) if len(input_batch_sizes) > 1: logging.warning('Found incompatiable static batch sizes among the ' f'inputs. Batch sizes: {sorted(input_batch_sizes)}') for x in self.outputs: if not hasattr(x, '_keras_history'): cls_name = self.__class__.__name__ raise ValueError(f'Output tensors of a {cls_name} model must be ' 'the output of a TensorFlow `Layer` ' f'(thus holding past layer metadata). Found: {x}') def _insert_layers(self, layers, relevant_nodes=None): """Inserts Layers into the Network after Network creation. This is only valid for Keras Graph Networks. Layers added via this function will be included in the `call` computation and `get_config` of this Network. They will not be added to the Network's outputs. Args: layers: Arbitrary nested structure of Layers. Layers must be reachable from one or more of the `keras.Input` Tensors that correspond to this Network's inputs. relevant_nodes: Nodes from the Layers that should be considered part of this Network. If `None`, all Nodes will be considered part of this Network. Raises: ValueError: If the layers depend on `Input`s not found in this Model. """ layers = tf.nest.flatten(layers) tf_utils.assert_no_legacy_layers(layers) node_to_depth = {} for depth, nodes in self._nodes_by_depth.items(): node_to_depth.update({node: depth for node in nodes}) # The nodes of these Layers that are relevant to this Network. If not # provided, assume all Nodes are relevant if not relevant_nodes: relevant_nodes = tf.nest.flatten( [layer._inbound_nodes for layer in layers]) network_nodes = set(relevant_nodes + list(node_to_depth.keys())) def _get_min_depth(node): """Gets the minimum depth at which node can be computed.""" min_depth = 0 for layer, node_id, _, _ in node.iterate_inbound(): inbound_node = layer._inbound_nodes[node_id] if inbound_node in node_to_depth: min_depth = min(min_depth, node_to_depth[inbound_node]) elif inbound_node not in network_nodes: continue else: # Previous relevant nodes haven't been processed yet. return None # New node is one shallower than its shallowest input. return min_depth - 1 # Insert nodes into `_nodes_by_depth` and other node attrs. unprocessed_nodes = copy.copy(relevant_nodes) i = 0 while unprocessed_nodes: i += 1 # Do a sanity check. This can occur if `Input`s from outside this Model # are being relied on. if i > 10000: raise ValueError('Layers could not be added due to missing ' 'dependencies.') node = unprocessed_nodes.pop(0) depth = _get_min_depth(node) if depth is None: # Defer until inbound nodes are processed. unprocessed_nodes.append(node) continue node_key = _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) if node_key not in self._network_nodes: node_to_depth[node] = depth self._network_nodes.add(node_key) self._nodes_by_depth[depth].append(node) # Insert layers and update other layer attrs. layer_set = set(self._self_tracked_trackables) deferred_layers = [] for layer in layers: if layer not in layer_set: self._self_tracked_trackables.append(layer) deferred_layers.append(layer) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) layer_set.add(layer) self._handle_deferred_layer_dependencies(deferred_layers) self._compute_tensor_usage_count() def _compute_tensor_usage_count(self): """Compute the #. of tensor usages for all the output tensors of layers. The computed tensor usage count is saved as `self._tensor_usage_count`. This is later used for saving memory in eager computation by releasing no-longer-needed tensors as early as possible. """ tensor_usage_count = collections.Counter() available_tensors = set(str(id(tensor)) for tensor in self.inputs) depth_keys = list(self._nodes_by_depth.keys()) depth_keys.sort(reverse=True) depth_keys = depth_keys[1:] for depth in depth_keys: for node in self._nodes_by_depth[depth]: input_tensors = { str(id(tensor)) for tensor in tf.nest.flatten(node.keras_inputs) } if input_tensors.issubset(available_tensors): for tensor in tf.nest.flatten(node.keras_inputs): tensor_usage_count[str(id(tensor))] += 1 for output_tensor in tf.nest.flatten(node.outputs): available_tensors.add(str(id(output_tensor))) for tensor in self.outputs: tensor_usage_count[str(id(tensor))] += 1 self._tensor_usage_count = tensor_usage_count def _assert_weights_created(self): # Override the implementation in Model. # The Functional model should always have weight created already. return def _graph_network_add_loss(self, symbolic_loss): new_nodes, new_layers = _map_subgraph_network(self.inputs, [symbolic_loss]) # Losses must be keyed on inputs no matter what in order to be supported in # DistributionStrategy. add_loss_layer = base_layer.AddLoss( unconditional=False, dtype=symbolic_loss.dtype) add_loss_layer(symbolic_loss) new_nodes.extend(add_loss_layer.inbound_nodes) new_layers.append(add_loss_layer) self._insert_layers(new_layers, new_nodes) def _graph_network_add_metric(self, value, aggregation, name): new_nodes, new_layers = _map_subgraph_network(self.inputs, [value]) add_metric_layer = base_layer.AddMetric( aggregation, name, dtype=value.dtype) add_metric_layer(value) new_nodes.extend(add_metric_layer.inbound_nodes) new_layers.append(add_metric_layer) self._insert_layers(new_layers, new_nodes) @property def _trackable_saved_model_saver(self): return network_serialization.NetworkSavedModelSaver(self) def _get_save_spec(self, dynamic_batch=True, inputs_only=True): if getattr(self, '_has_explicit_input_shape', True): # Functional models and Sequential models that have an explicit input # shape should use the batch size set by the input layer. dynamic_batch = False return super(Functional, self)._get_save_spec(dynamic_batch, inputs_only) def _make_node_key(layer_name, node_index): return layer_name + '_ib-' + str(node_index) def _map_graph_network(inputs, outputs): """Validates a network's topology and gather its layers and nodes. Args: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph). """ # "depth" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( f'Graph disconnected: cannot obtain value for tensor {x} ' f'at layer "{layer.name}". The following previous layers ' f'were accessed without issue: {layers_with_complete_input}') for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name "{name}" is used {all_names.count(name)} ' 'times in the model. All layer names should be unique.') return network_nodes, nodes_by_depth, layers, layers_by_depth def _build_map(outputs): """This method topologically sorts nodes in order from inputs to outputs. It uses a depth-first search to topologically sort nodes that appear in the _keras_history connectivity metadata of `outputs`. Args: outputs: the output tensors whose _keras_history metadata should be walked. This may be an arbitrary nested structure. Returns: A tuple like (ordered_nodes, layer_to_first_traversal_index) ordered_nodes: list of nodes appearing in the keras history, topologically sorted from original inputs to the `outputs`. (If outputs have different sets of ancestors, the inputs to one output may appear after a different output). layer_to_first_traversal_index: A dict mapping layer to the traversal index in the DFS where it is seen. Note: if a layer is shared by several nodes, the dict will only store the index corresponding to the *first* time the layer seen. """ finished_nodes = set() nodes_in_progress = set() nodes_in_decreasing_depth = [] # nodes from inputs -> outputs. layer_indices = {} # layer -> in traversal order. for output in tf.nest.flatten(outputs): _build_map_helper(output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices) return nodes_in_decreasing_depth, layer_indices def _build_map_helper(tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices): """Recursive helper for `_build_map`.""" layer, node_index, _ = tensor._keras_history # pylint: disable=protected-access node = layer._inbound_nodes[node_index] # pylint: disable=protected-access # Don't repeat work for shared subgraphs if node in finished_nodes: return # Prevent cycles. if node in nodes_in_progress: raise ValueError(f'Tensor {tensor} from layer "{layer.name}" ' 'is part of a cycle.') # Store the traversal order for layer sorting. if layer not in layer_indices: layer_indices[layer] = len(layer_indices) # Propagate to all previous tensors connected to this node. nodes_in_progress.add(node) if not node.is_input: for tensor in node.keras_inputs: _build_map_helper(tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices) finished_nodes.add(node) nodes_in_progress.remove(node) nodes_in_decreasing_depth.append(node) def _map_subgraph_network(inputs, outputs): """Returns the nodes and layers in the topology from `inputs` to `outputs`. Args: inputs: List of input tensors. outputs: List of output tensors. Returns: A tuple of List{Node] and List[Layer]. """ if not tf.compat.v1.executing_eagerly_outside_functions(): base_layer_utils.create_keras_history(outputs) # Keep only nodes and layers in the topology between inputs and outputs. _, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs) return tf.nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers def _should_skip_first_node(layer): """Returns True if the first layer node should not be saved or loaded.""" # Networks that are constructed with an Input layer/shape start with a # pre-existing node linking their input to output. This node is excluded from # the network config. if layer._self_tracked_trackables: return (isinstance(layer, Functional) and # Filter out Sequential models without an input shape. isinstance(layer._self_tracked_trackables[0], input_layer_module.InputLayer)) else: return isinstance(layer, Functional) def connect_ancillary_layers(model, created_layers): """Adds layers that are not connected to the outputs to the model.""" # Layers not connected to outputs, such as those added in `add_loss`. ancillary_layers = [ layer for layer in created_layers.values() if layer not in model.layers ] if ancillary_layers: relevant_nodes = tf.nest.flatten([ layer.inbound_nodes[1:] if _should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values() ]) model._insert_layers(ancillary_layers, relevant_nodes) return model def reconstruct_from_config(config, custom_objects=None, created_layers=None): """Reconstructs graph from config object. Args: config: Dictionary returned from Network.get_config() custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. created_layers: Optional dictionary mapping names to Layer objects. Any layer not in this dictionary will be created and added to the dict. This function will add new nodes to all layers (excluding InputLayers), instead of re-using pre-existing nodes in the layers. Returns: Tuple of (input tensors, output tensors, dictionary of created layers) """ # Layer instances created during the graph reconstruction process. created_layers = created_layers or collections.OrderedDict() # Maps input data (tuple of inbound layer name, node index) from the config # to node indices in the newly generated model. The node indices may be # different if the layers have already been called previously. node_index_map = {} node_count_by_layer = {} # Dictionary mapping layer instances to # node data that specifies a layer call. # It acts as a queue that maintains any unprocessed # layer call until it becomes possible to process it # (i.e. until the input tensors to the call all exist). unprocessed_nodes = collections.defaultdict(list) def get_node_index(layer, config_node_index): """Returns node index in layer (might differ from config_node_index).""" if isinstance(layer, input_layer_module.InputLayer): return 0 return node_index_map.get((layer.name, config_node_index), None) def _deserialize_keras_tensors(kwargs, layer_map): """Deserializes Keras Tensors passed to `call`..""" def _deserialize_keras_tensor(t): """Deserializes a single Keras Tensor passed to `call`.""" if isinstance(t, tf_utils.ListWrapper): t = t.as_list() layer_name = t[0] node_index = t[1] tensor_index = t[2] layer = layer_map[layer_name] new_node_index = get_node_index(layer, node_index) if new_node_index is None: # The inbound node may not have been processed yet, # (This can happen e.g. if it depends on a different set # of inputs than those that have been processed already). # raise an IndexError so that the current node puts itself # back on the unprocessed queue. # Caution: This may lead to infinite loops for malformed # network configurations! (or when there is a bug in # the network config loading code). raise IndexError node = layer._inbound_nodes[new_node_index] return tf.nest.flatten(node.outputs)[tensor_index] return t kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True) return tf.nest.map_structure(_deserialize_keras_tensor, kwargs) def process_node(layer, node_data): """Deserialize a node. Args: layer: layer instance. node_data: Nested structure of `ListWrapper`. Returns: Whether the node was processed (i.e. the layer was called on the inputs specified by the node data) Raises: ValueError: In case of improperly formatted `node_data`. """ input_tensors = [] for input_data in tf.nest.flatten(node_data): input_data = input_data.as_list() inbound_layer_name = input_data[0] inbound_node_index = input_data[1] inbound_tensor_index = input_data[2] if len(input_data) == 3: kwargs = {} elif len(input_data) == 4: kwargs = input_data[3] try: kwargs = _deserialize_keras_tensors(kwargs, created_layers) except IndexError: # Happens if keras tensors in kwargs are still unprocessed return False else: raise ValueError('Improperly formatted model config.') if inbound_layer_name != node_module._CONSTANT_VALUE: inbound_layer = created_layers[inbound_layer_name] inbound_node_index = get_node_index(inbound_layer, inbound_node_index) if inbound_node_index is None: return False inbound_node = inbound_layer._inbound_nodes[inbound_node_index] input_tensors.append( tf.nest.flatten(inbound_node.outputs)[inbound_tensor_index]) else: # We received a constant w/ no Keras history attached input_tensors.append(inbound_tensor_index) input_tensors = tf.nest.pack_sequence_as(node_data, input_tensors) # Call layer on its inputs, thus creating the node # and building the layer if needed. if input_tensors is not None: if not layer._preserve_input_structure_in_config: input_tensors = ( base_layer_utils.unnest_if_single_tensor(input_tensors)) output_tensors = layer(input_tensors, **kwargs) # Update node index map. output_index = (tf.nest.flatten(output_tensors)[0]. _keras_history.node_index) node_index_map[(layer.name, node_count_by_layer[layer])] = output_index node_count_by_layer[layer] += 1 return True def process_layer(layer_data): """Deserializes a layer, then call it on appropriate inputs. Args: layer_data: layer config dict. Raises: ValueError: In case of improperly formatted `layer_data` dict. """ layer_name = layer_data['name'] if layer_name in created_layers: layer = created_layers[layer_name] else: # Instantiate layer. from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top layer = deserialize_layer(layer_data, custom_objects=custom_objects) created_layers[layer_name] = layer node_count_by_layer[layer] = int(_should_skip_first_node(layer)) # Gather layer inputs and convert to `ListWrapper` objects. inbound_nodes_data = layer_data['inbound_nodes'] inbound_nodes_data = tf_utils.convert_inner_node_data( inbound_nodes_data, wrap=True) for node_data in inbound_nodes_data: # We don't process nodes (i.e. make layer calls) # on the fly because the inbound node may not yet exist, # in case of layer shared at different topological depths # (e.g. a model such as A(B(A(B(x))))) unprocessed_nodes[layer].append(node_data) # First, we create all layers and enqueue nodes to be processed for layer_data in config['layers']: process_layer(layer_data) # Then we process nodes in order of layer depth. # Nodes that cannot yet be processed (if the inbound node # does not yet exist) are re-enqueued, and the process # is repeated until all nodes are processed. while unprocessed_nodes: for layer_data in config['layers']: layer = created_layers[layer_data['name']] if layer in unprocessed_nodes: layer_nodes = unprocessed_nodes.pop(layer) while layer_nodes: node_data = layer_nodes[0] if process_node(layer, node_data): layer_nodes.pop(0) else: # If a node can't be processed, stop processing the nodes of # the current layer to maintain node ordering. unprocessed_nodes[layer] = layer_nodes break input_tensors = [] output_tensors = [] input_layers = tf_utils.convert_inner_node_data( config['input_layers'], wrap=True) for layer_data in tf.nest.flatten(input_layers): layer_name, node_index, tensor_index = layer_data.as_list() assert layer_name in created_layers layer = created_layers[layer_name] node_index = get_node_index(layer, node_index) layer_output_tensors = layer._inbound_nodes[node_index].output_tensors input_tensors.append(tf.nest.flatten(layer_output_tensors)[tensor_index]) output_layers = tf_utils.convert_inner_node_data( config['output_layers'], wrap=True) for layer_data in tf.nest.flatten(output_layers): layer_name, node_index, tensor_index = layer_data.as_list() assert layer_name in created_layers layer = created_layers[layer_name] node_index = get_node_index(layer, node_index) layer_output_tensors = layer._inbound_nodes[node_index].output_tensors output_tensors.append(tf.nest.flatten(layer_output_tensors)[tensor_index]) input_tensors = tf.nest.pack_sequence_as(input_layers, input_tensors) output_tensors = tf.nest.pack_sequence_as(output_layers, output_tensors) return input_tensors, output_tensors, created_layers def get_network_config(network, serialize_layer_fn=None): """Builds the config, which consists of the node graph and serialized layers. Args: network: A Network object. serialize_layer_fn: Function used to serialize layers. Returns: Config dictionary. """ serialize_layer_fn = ( serialize_layer_fn or generic_utils.serialize_keras_object) config = { 'name': network.name, } node_conversion_map = {} for layer in network.layers: kept_nodes = 1 if _should_skip_first_node(layer) else 0 for original_node_index, node in enumerate(layer._inbound_nodes): node_key = _make_node_key(layer.name, original_node_index) if node_key in network._network_nodes: node_conversion_map[node_key] = kept_nodes kept_nodes += 1 layer_configs = [] with generic_utils.SharedObjectSavingScope(): for layer in network.layers: # From the earliest layers on. filtered_inbound_nodes = [] for original_node_index, node in enumerate(layer._inbound_nodes): node_key = _make_node_key(layer.name, original_node_index) if node_key in network._network_nodes and not node.is_input: # The node is relevant to the model: # add to filtered_inbound_nodes. node_data = node.serialize(_make_node_key, node_conversion_map) filtered_inbound_nodes.append(node_data) layer_config = serialize_layer_fn(layer) layer_config['name'] = layer.name layer_config['inbound_nodes'] = filtered_inbound_nodes layer_configs.append(layer_config) config['layers'] = layer_configs # Gather info about inputs and outputs. model_inputs = [] for i in range(len(network._input_layers)): layer, node_index, tensor_index = network._input_coordinates[i] node_key = _make_node_key(layer.name, node_index) if node_key not in network._network_nodes: continue new_node_index = node_conversion_map[node_key] model_inputs.append( tf_utils.ListWrapper([layer.name, new_node_index, tensor_index])) model_inputs = tf.nest.pack_sequence_as(network._nested_inputs, model_inputs) # Preserve external Keras compat for Models with single input. if not tf.nest.is_nested(model_inputs): model_inputs = [model_inputs] model_inputs = tf_utils.convert_inner_node_data(model_inputs) config['input_layers'] = model_inputs model_outputs = [] for i in range(len(network._output_layers)): layer, node_index, tensor_index = network._output_coordinates[i] node_key = _make_node_key(layer.name, node_index) if node_key not in network._network_nodes: continue new_node_index = node_conversion_map[node_key] model_outputs.append( tf_utils.ListWrapper([layer.name, new_node_index, tensor_index])) model_outputs = tf.nest.pack_sequence_as(network._nested_outputs, model_outputs) # Preserve external Keras compat for Models with single output. if not tf.nest.is_nested(model_outputs): model_outputs = [model_outputs] model_outputs = tf_utils.convert_inner_node_data(model_outputs) config['output_layers'] = model_outputs return config def shape_with_no_batch_size(x): if x.shape.rank is None: return None shape = x.shape.as_list() if shape: shape[0] = None return shape class ModuleWrapper(base_layer.Layer): """Wrapper for `tf.Module`s to support the Functional and Sequential API.""" def __init__(self, module, method_name=None, **kwargs): """Initializes the wrapper Layer for this module. Args: module: The `tf.Module` instance to be wrapped. method_name: (Optional) str. The name of the method to use as the forward pass of the module. If not set, defaults to '__call__' if defined, or 'call'. **kwargs: Additional keywrod arguments. See `tf.keras.layers.Layer`. Raises: ValueError: If `method` is not defined on `module`. """ super(ModuleWrapper, self).__init__(**kwargs) if method_name is None: if hasattr(module, '__call__'): method_name = '__call__' elif hasattr(module, 'call'): method_name = 'call' if method_name is None or not hasattr(module, method_name): raise ValueError('{} is not defined on object {}'.format( method_name, module)) self._module = module self._method_name = method_name # Check if module.__call__ has a `training` arg or accepts `**kwargs`. method = getattr(module, method_name) method_arg_spec = tf_inspect.getfullargspec(method) self._expects_training_arg = ('training' in method_arg_spec.args or method_arg_spec.varkw is not None) self._expects_mask_arg = ('mask' in method_arg_spec.args or method_arg_spec.varkw is not None) def call(self, *args, **kwargs): if 'training' in kwargs and not self._expects_training_arg: kwargs.pop('training') if 'mask' in kwargs and not self._expects_mask_arg: kwargs.pop('mask') return getattr(self._module, self._method_name)(*args, **kwargs)
58,544
39.348036
102
py
keras
keras-master/keras/engine/training.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related part of the Keras engine.""" import tensorflow.compat.v2 as tf import copy import itertools import json import os import warnings import weakref from tensorflow.python.eager import context from keras import backend from keras import callbacks as callbacks_module from keras import optimizer_v1 from keras import optimizers from keras.engine import base_layer from keras.engine import base_layer_utils from keras.engine import compile_utils from keras.engine import data_adapter from keras.engine import training_utils from keras.mixed_precision import loss_scale_optimizer as lso from keras.mixed_precision import policy from keras.saving import hdf5_format from keras.saving import save from keras.saving import saving_utils from keras.saving import pickle_utils from keras.saving.saved_model import json_utils from keras.saving.saved_model import model_serialization from keras.utils import generic_utils from keras.utils import layer_utils from keras.utils import object_identity from keras.utils import tf_utils from keras.utils import traceback_utils from keras.utils import version_utils from keras.utils.io_utils import ask_to_proceed_with_overwrite from keras.utils.io_utils import path_to_string from keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls # pylint: disable=g-import-not-at-top try: import h5py except ImportError: h5py = None # pylint: enable=g-import-not-at-top @keras_export('keras.Model', 'keras.models.Model') class Model(base_layer.Layer, version_utils.ModelVersionSelector): """`Model` groups layers into an object with training and inference features. Args: inputs: The input(s) of the model: a `keras.Input` object or list of `keras.Input` objects. outputs: The output(s) of the model. See Functional API example below. name: String, the name of the model. There are two ways to instantiate a `Model`: 1 - With the "Functional API", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` Note: Only dicts, lists, and tuples of input tensors are supported. Nested inputs are not supported (e.g. lists of list or dicts of dict). 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__()` and you should implement the model's forward pass in `call()`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super().__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call()`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super().__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` Once the model is created, you can config the model with losses and metrics with `model.compile()`, train the model with `model.fit()`, or use the model to do prediction with `model.predict()`. """ _TF_MODULE_IGNORED_PROPERTIES = frozenset( itertools.chain(('_train_counter', '_test_counter', '_predict_counter', '_steps_per_execution'), base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES)) # pylint: disable=protected-access _SCALAR_UPRANKING_ON = False def __new__(cls, *args, **kwargs): # Signature detection if is_functional_model_init_params(args, kwargs) and cls == Model: # Functional model from keras.engine import functional # pylint: disable=g-import-not-at-top return functional.Functional(skip_init=True, *args, **kwargs) else: return super(Model, cls).__new__(cls, *args, **kwargs) @tf.__internal__.tracking.no_automatic_dependency_tracking @traceback_utils.filter_traceback def __init__(self, *args, **kwargs): self._is_model_for_instrumentation = True base_layer.keras_api_gauge.get_cell('model').set(True) # Special case for Subclassed Functional Model, which we couldn't detect # when __new__ is called. We only realize it is a functional model when it # calls super.__init__ with input and output tensor. from keras.engine import functional # pylint: disable=g-import-not-at-top if (is_functional_model_init_params(args, kwargs) and not isinstance(self, functional.Functional)): # Filter the kwargs for multiple inheritance. supported_kwargs = ['inputs', 'outputs', 'name', 'trainable', 'skip_init'] model_kwargs = {k: kwargs[k] for k in kwargs if k in supported_kwargs} other_kwargs = {k: kwargs[k] for k in kwargs if k not in supported_kwargs} inject_functional_model_class(self.__class__) functional.Functional.__init__(self, *args, **model_kwargs) # In case there is any multiple inheritance here, we need to call the # __init__ for any class that appears after the Functional class. clz_to_init = [] found_functional_class = False for clz in self.__class__.__bases__: if issubclass(clz, functional.Functional): found_functional_class = True continue if found_functional_class: clz_to_init.append(clz) if clz_to_init: for clz in clz_to_init: clz.__init__(self, *args, **other_kwargs) elif other_kwargs: # In case there are unused kwargs, we should raise an error to user, in # case they have a typo in the param name. raise TypeError( 'The following keyword arguments passed to `Model` aren\'t ' 'supported: {}.'.format(other_kwargs)) return base_layer.keras_api_gauge.get_cell('Model subclass').set(True) # The following are implemented as property functions: # self.trainable_weights # self.non_trainable_weights # `inputs` / `outputs` will only appear in kwargs if either are misspelled. generic_utils.validate_kwargs(kwargs, { 'trainable', 'dtype', 'dynamic', 'name', 'autocast', 'inputs', 'outputs' }) super(Model, self).__init__(**kwargs) # By default, Model is a subclass model, which is not in graph network. self._is_graph_network = False self.inputs = None self.outputs = None self.input_names = None self.output_names = None # stop_training is used by callback to stop training when error happens self.stop_training = False self.history = None # These objects are used in the default `Model.compile`. They are not # guaranteed to be set after `Model.compile` is called, as users can # override compile with custom logic. self.compiled_loss = None self.compiled_metrics = None # This is True for Sequential networks and Functional networks. self._compute_output_and_mask_jointly = False # Don't reset compilation if already done. This may occur if calling # `__init__` (or `_init_graph_network`) on an already-compiled model # such as a Sequential model. Sequential models may need to rebuild # themselves after compilation. self._maybe_create_attribute('_is_compiled', False) self._maybe_create_attribute('optimizer', None) # Model must be created under scope of DistStrat it will be trained with. if tf.distribute.has_strategy(): self._distribution_strategy = tf.distribute.get_strategy() else: self._distribution_strategy = None self._cluster_coordinator = None # Defaults to value of `tf.config.experimental_functions_run_eagerly`. self._run_eagerly = None # Initialize cache attrs. self._reset_compile_cache() # Fault-tolerance handler. Set in `ModelCheckpoint`. self._training_state = None self._saved_model_inputs_spec = None self._saved_model_arg_spec = None self._trackable_saver = saver_with_op_caching(self) self._steps_per_execution = None self._init_batch_counters() self._base_model_initialized = True @tf.__internal__.tracking.no_automatic_dependency_tracking def _init_batch_counters(self): # Untracked Variables, used to keep track of mini-batches seen in `fit`, # `evaluate`, and `predict`. agg = tf.VariableAggregation.ONLY_FIRST_REPLICA self._train_counter = tf.Variable(0, dtype='int64', aggregation=agg) self._test_counter = tf.Variable(0, dtype='int64', aggregation=agg) self._predict_counter = tf.Variable( 0, dtype='int64', aggregation=agg) def __setattr__(self, name, value): if not getattr(self, '_self_setattr_tracking', True): super(Model, self).__setattr__(name, value) return if all( isinstance(v, (base_layer.Layer, tf.Variable)) or base_layer_utils.has_weights(v) for v in tf.nest.flatten(value)): try: self._base_model_initialized except AttributeError: raise RuntimeError( 'It looks like you are subclassing `Model` and you ' 'forgot to call `super().__init__()`.' ' Always start with this line.') super(Model, self).__setattr__(name, value) def __reduce__(self): if self.built: return (pickle_utils.deserialize_model_from_bytecode, pickle_utils.serialize_model_as_bytecode(self)) else: # SavedModel (and hence serialize_model_as_bytecode) only support # built models, but if the model is not built, # it may be possible to serialize as a plain Python object, # as long as the constituent parts (layers, optimizers, losses, etc.) # can be serialized as plain Python objects. # Thus we call up the superclass hierarchy to get an implementation of # __reduce__ that can pickle this Model as a plain Python object. return super(Model, self).__reduce__() def __deepcopy__(self, memo): if self.built: new = pickle_utils.deserialize_model_from_bytecode( *pickle_utils.serialize_model_as_bytecode(self)) memo[id(self)] = new else: # See comment in __reduce__ for explanation deserializer, serialized, *rest = super(Model, self).__reduce__() new = deserializer(*serialized) memo[id(self)] = new if rest: state = copy.deepcopy(rest[0], memo=memo) new.__setstate__(state) return new def __copy__(self): return self.__deepcopy__({}) @generic_utils.default def build(self, input_shape): """Builds the model based on input shapes received. This is to be used for subclassed models, which do not know at instantiation time what their inputs look like. This method only exists for users who want to call `model.build()` in a standalone way (as a substitute for calling the model on real data to build it). It will never be called by the framework (and thus it will never throw unexpected errors in an unrelated workflow). Args: input_shape: Single tuple, `TensorShape` instance, or list/dict of shapes, where shapes are tuples, integers, or `TensorShape` instances. Raises: ValueError: 1. In case of invalid user-provided data (not of type tuple, list, `TensorShape`, or dict). 2. If the model requires call arguments that are agnostic to the input shapes (positional or keyword arg in call signature). 3. If not all layers were properly built. 4. If float type inputs are not supported within the layers. In each of these cases, the user should build their model by calling it on real tensor data. """ if self._is_graph_network: super(Model, self).build(input_shape) return if input_shape is None: raise ValueError('Input shape must be defined when calling `build` on a ' 'model subclass network.') valid_types = (tuple, list, tf.TensorShape, dict) if not isinstance(input_shape, valid_types): raise ValueError('Specified input shape is not one of the valid types. ' 'Please specify a batch input shape of type tuple or ' 'list of input shapes. User provided ' 'input type: {}.'.format(type(input_shape))) if input_shape and not self.inputs: # We create placeholders for the `None`s in the shape and build the model # in a Graph. Since tf.Variable is compatible with both eager execution # and graph building, the variables created after building the model in # a Graph are still valid when executing eagerly. if tf.executing_eagerly(): graph = tf.__internal__.FuncGraph('build_graph') else: graph = backend.get_graph() with graph.as_default(): if (isinstance(input_shape, list) and all(d is None or isinstance(d, int) for d in input_shape)): input_shape = tuple(input_shape) if isinstance(input_shape, list): x = [base_layer_utils.generate_placeholders_from_shape(shape) for shape in input_shape] elif isinstance(input_shape, dict): x = { k: base_layer_utils.generate_placeholders_from_shape(shape) for k, shape in input_shape.items() } else: x = base_layer_utils.generate_placeholders_from_shape(input_shape) kwargs = {} call_signature = self._call_full_argspec call_args = call_signature.args # Exclude `self`, `inputs`, and any argument with a default value. if len(call_args) > 2: if call_signature.defaults: call_args = call_args[2:-len(call_signature.defaults)] else: call_args = call_args[2:] for arg in call_args: if arg == 'training': # Case where `training` is a positional arg with no default. kwargs['training'] = False else: # Has invalid call signature with unknown positional arguments. raise ValueError( 'Currently, you cannot build your model if it has ' 'positional or keyword arguments that are not ' 'inputs to the model, but are required for its ' '`call()` method. Instead, in order to instantiate ' 'and build your model, `call()` your model on real ' 'tensor data with all expected call arguments. The argument ' 'for `call()` can be a single list/tuple that contains ' 'multiple inputs.') elif len(call_args) < 2: # Signature without `inputs`. raise ValueError( 'You can only call `build()` on a model if its `call()` ' 'method accepts an `inputs` argument.') try: self.call(x, **kwargs) except (tf.errors.InvalidArgumentError, TypeError) as e: raise ValueError('You cannot build your model by calling `build` ' 'if your layers do not support float type inputs. ' 'Instead, in order to instantiate and build your ' 'model, call your model on real tensor data (of ' 'the correct dtype).\n\nThe actual error from ' f'`call` is: {e}.') super(Model, self).build(input_shape) @doc_controls.doc_in_current_and_subclasses def call(self, inputs, training=None, mask=None): """Calls the model on new inputs. In this case `call()` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Note: This method should not be called directly. It is only meant to be overridden when subclassing `tf.keras.Model`. To call a model on an input, always use the `__call__()` method, i.e. `model(inputs)`, which relies on the underlying `call()` method. Args: inputs: Input tensor, or dict/list/tuple of input tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs. """ raise NotImplementedError('When subclassing the `Model` class, you should ' 'implement a `call()` method.') @traceback_utils.filter_traceback def compile(self, optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs): """Configures the model for training. Example: ```python model.compile(optimizer=tf.keras.optimizer.Adam(learning_rate=1e-3), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.FalseNegatives()]) ``` Args: optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: Loss function. Maybe be a string (name of loss function), or a `tf.keras.losses.Loss` instance. See `tf.keras.losses`. A loss function is any callable with the signature `loss = fn(y_true, y_pred)`, where `y_true` are the ground truth values, and `y_pred` are the model's predictions. `y_true` should have shape `(batch_size, d0, .. dN)` (except in the case of sparse loss functions such as sparse categorical crossentropy which expects integer arrays of shape `(batch_size, d0, .. dN-1)`). `y_pred` should have shape `(batch_size, d0, .. dN)`. The loss function should return a float tensor. If a custom `Loss` instance is used and reduction is set to `None`, return value has shape `(batch_size, d0, .. dN-1)` i.e. per-sample or per-timestep loss values; otherwise, it is a scalar. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses, unless `loss_weights` is specified. metrics: List of metrics to be evaluated by the model during training and testing. Each of this can be a string (name of a built-in function), function or a `tf.keras.metrics.Metric` instance. See `tf.keras.metrics`. Typically you will use `metrics=['accuracy']`. A function is any callable with the signature `result = fn(y_true, y_pred)`. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary, such as `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`. You can also pass a list to specify a metric or a list of metrics for each output, such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass the strings 'accuracy' or 'acc', we convert this to one of `tf.keras.metrics.BinaryAccuracy`, `tf.keras.metrics.CategoricalAccuracy`, `tf.keras.metrics.SparseCategoricalAccuracy` based on the loss function used and the model output shape. We do a similar conversion for the strings 'crossentropy' and 'ce' as well. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a dict, it is expected to map output names (strings) to scalar coefficients. weighted_metrics: List of metrics to be evaluated and weighted by `sample_weight` or `class_weight` during training and testing. run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s logic will not be wrapped in a `tf.function`. Recommended to leave this as `None` unless your `Model` cannot be run inside a `tf.function`. `run_eagerly=True` is not supported when using `tf.distribute.experimental.ParameterServerStrategy`. steps_per_execution: Int. Defaults to 1. The number of batches to run during each `tf.function` call. Running multiple batches inside a single `tf.function` call can greatly improve performance on TPUs or small models with a large Python overhead. At most, one full epoch will be run each execution. If a number larger than the size of the epoch is passed, the execution will be truncated to the size of the epoch. Note that if `steps_per_execution` is set to `N`, `Callback.on_batch_begin` and `Callback.on_batch_end` methods will only be called every `N` batches (i.e. before/after each `tf.function` execution). **kwargs: Arguments supported for backwards compatibility only. """ base_layer.keras_api_gauge.get_cell('compile').set(True) with self.distribute_strategy.scope(): if 'experimental_steps_per_execution' in kwargs: logging.warning('The argument `steps_per_execution` is no longer ' 'experimental. Pass `steps_per_execution` instead of ' '`experimental_steps_per_execution`.') if not steps_per_execution: steps_per_execution = kwargs.pop('experimental_steps_per_execution') # When compiling from an already-serialized model, we do not want to # reapply some processing steps (e.g. metric renaming for multi-output # models, which have prefixes added for each corresponding output name). from_serialized = kwargs.pop('from_serialized', False) self._validate_compile(optimizer, metrics, **kwargs) self._run_eagerly = run_eagerly self.optimizer = self._get_optimizer(optimizer) self.compiled_loss = compile_utils.LossesContainer( loss, loss_weights, output_names=self.output_names) self.compiled_metrics = compile_utils.MetricsContainer( metrics, weighted_metrics, output_names=self.output_names, from_serialized=from_serialized) self._configure_steps_per_execution(steps_per_execution or 1) # Initializes attrs that are reset each time `compile` is called. self._reset_compile_cache() self._is_compiled = True self.loss = loss or {} def _get_optimizer(self, optimizer): """Wraps `optimizer` in `LossScaleOptimizer` if necessary.""" # The deprecated PolicyV1 has a loss_scale, which we use for backwards # compatibility to match TF 2.3 behavior. The new Policy does not have a # loss_scale, so we use dynamic loss scaling if the mixed_float16 policy is # used. if isinstance(self._dtype_policy, policy.PolicyV1): loss_scale = self._dtype_policy.loss_scale elif self._dtype_policy.name == 'mixed_float16': loss_scale = 'dynamic' else: loss_scale = None def _get_single_optimizer(opt): opt = optimizers.get(opt) if (loss_scale is not None and not isinstance(opt, lso.LossScaleOptimizer)): if loss_scale == 'dynamic': opt = lso.LossScaleOptimizer(opt) else: opt = lso.LossScaleOptimizerV1(opt, loss_scale) return opt return tf.nest.map_structure(_get_single_optimizer, optimizer) @tf.__internal__.tracking.no_automatic_dependency_tracking def _reset_compile_cache(self): self.train_function = None self.test_function = None self.predict_function = None # Used to cache the `tf.function`'ed `train_function` to be logged in # TensorBoard, since the original `train_function` is not necessarily # a `tf.function` (e.g., with ParameterServerStrategy, the `train_function` # is a scheduling of the actual training function to a remote worker). self.train_tf_function = None # Used to cache `trainable` attr of `Layer`s for `fit`. self._compiled_trainable_state = self._get_trainable_state() @tf.__internal__.tracking.no_automatic_dependency_tracking def _configure_steps_per_execution(self, steps_per_execution): self._steps_per_execution = tf.Variable( steps_per_execution, dtype='int64', aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) @property def _should_compute_mask(self): return False @property def metrics(self): """Returns the model's metrics added using `compile()`, `add_metric()` APIs. Note: Metrics passed to `compile()` are available only after a `keras.Model` has been trained/evaluated on actual data. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer="Adam", loss="mse", metrics=["mae"]) >>> [m.name for m in model.metrics] [] >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> model.fit(x, y) >>> [m.name for m in model.metrics] ['loss', 'mae'] >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2, name='out') >>> output_1 = d(inputs) >>> output_2 = d(inputs) >>> model = tf.keras.models.Model( ... inputs=inputs, outputs=[output_1, output_2]) >>> model.add_metric( ... tf.reduce_sum(output_2), name='mean', aggregation='mean') >>> model.compile(optimizer="Adam", loss="mse", metrics=["mae", "acc"]) >>> model.fit(x, (y, y)) >>> [m.name for m in model.metrics] ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae', 'out_1_acc', 'mean'] """ metrics = [] if self._is_compiled: # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects # so that attr names are not load-bearing. if self.compiled_loss is not None: metrics += self.compiled_loss.metrics if self.compiled_metrics is not None: metrics += self.compiled_metrics.metrics for l in self._flatten_layers(): metrics.extend(l._metrics) # pylint: disable=protected-access return metrics @property def metrics_names(self): """Returns the model's display labels for all outputs. Note: `metrics_names` are available only after a `keras.Model` has been trained/evaluated on actual data. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer="Adam", loss="mse", metrics=["mae"]) >>> model.metrics_names [] >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> model.fit(x, y) >>> model.metrics_names ['loss', 'mae'] >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2, name='out') >>> output_1 = d(inputs) >>> output_2 = d(inputs) >>> model = tf.keras.models.Model( ... inputs=inputs, outputs=[output_1, output_2]) >>> model.compile(optimizer="Adam", loss="mse", metrics=["mae", "acc"]) >>> model.fit(x, (y, y)) >>> model.metrics_names ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae', 'out_1_acc'] """ # This property includes all output names including `loss` and per-output # losses for backward compatibility. return [m.name for m in self.metrics] @property def distribute_strategy(self): """The `tf.distribute.Strategy` this model was created under.""" return self._distribution_strategy or tf.distribute.get_strategy() @property def run_eagerly(self): """Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly. """ if self.dynamic and self._run_eagerly is False: # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError('Your model contains layers that can only be ' 'successfully run in eager execution (layers ' 'constructed with `dynamic=True`). ' 'You cannot set `run_eagerly=False`.') if self._cluster_coordinator and self._run_eagerly: raise ValueError('When using `Model` with `ParameterServerStrategy`, ' '`run_eagerly` is not supported.') # Run eagerly logic, by priority: # (1) Dynamic models must be run eagerly. # (2) Explicitly setting run_eagerly causes a Model to be run eagerly. # (3) Not explicitly setting run_eagerly defaults to TF's global setting. return (self.dynamic or self._run_eagerly or (tf.config.functions_run_eagerly() and self._run_eagerly is None)) @run_eagerly.setter def run_eagerly(self, value): self._run_eagerly = value def train_step(self, data): """The logic for one training step. This method can be overridden to support custom training logic. For concrete examples of how to override this method see [Customizing what happends in fit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit). This method is called by `Model.make_train_function`. This method should contain the mathematical logic for one step of training. This typically includes the forward pass, loss calculation, backpropagation, and metric updates. Configuration details for *how* this logic is run (e.g. `tf.function` and `tf.distribute.Strategy` settings), should be left to `Model.make_train_function`, which can also be overridden. Args: data: A nested structure of `Tensor`s. Returns: A `dict` containing values that will be passed to `tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the values of the `Model`'s metrics are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`. """ x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) # Run forward pass. with tf.GradientTape() as tape: y_pred = self(x, training=True) loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) if self.loss and y is None: raise TypeError( f'Target data is missing. Your model has `loss`: {self.loss}, ' 'and therefore expects target data to be passed in `fit()`.') # Run backwards pass. self.optimizer.minimize(loss, self.trainable_variables, tape=tape) self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def make_train_function(self, force=False): """Creates a function that executes one step of training. This method can be overridden to support custom training logic. This method is called by `Model.fit` and `Model.train_on_batch`. Typically, this method directly controls `tf.function` and `tf.distribute.Strategy` settings, and delegates the actual training logic to `Model.train_step`. This function is cached the first time `Model.fit` or `Model.train_on_batch` is called. The cache is cleared whenever `Model.compile` is called. You can skip the cache and generate again the function with `force=True`. Args: force: Whether to regenerate the train function and skip the cached function if available. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, and return a `dict` containing values that will be passed to `tf.keras.Callbacks.on_train_batch_end`, such as `{'loss': 0.2, 'accuracy': 0.7}`. """ if self.train_function is not None and not force: return self.train_function def step_function(model, iterator): """Runs a single training step.""" def run_step(data): outputs = model.train_step(data) # Ensure counter is updated only if `train_step` succeeds. with tf.control_dependencies(_minimum_control_deps(outputs)): model._train_counter.assign_add(1) # pylint: disable=protected-access return outputs data = next(iterator) outputs = model.distribute_strategy.run(run_step, args=(data,)) outputs = reduce_per_replica( outputs, self.distribute_strategy, reduction='first') write_scalar_summaries(outputs, step=model._train_counter) # pylint: disable=protected-access return outputs if (self._steps_per_execution is None or self._steps_per_execution.numpy().item() == 1): def train_function(iterator): """Runs a training execution with one step.""" return step_function(self, iterator) else: def train_function(iterator): """Runs a training execution with multiple steps.""" for _ in tf.range(self._steps_per_execution): outputs = step_function(self, iterator) return outputs if not self.run_eagerly: train_function = tf.function( train_function, experimental_relax_shapes=True) self.train_tf_function = train_function self.train_function = train_function if self._cluster_coordinator: self.train_function = lambda iterator: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda train_function, args=(iterator,)) return self.train_function @traceback_utils.filter_traceback def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose='auto', callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False): """Trains the model for a fixed number of epochs (iterations on a dataset). Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A generator or `keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A `tf.keras.utils.experimental.DatasetCreator`, which wraps a callable that takes a single argument of type `tf.distribute.InputContext`, and returns a `tf.data.Dataset`. `DatasetCreator` should be used when users prefer to specify the per-replica batching and sharding logic for the `Dataset`. See `tf.keras.utils.experimental.DatasetCreator` doc for more information. A more detailed description of unpacking behavior for iterator types (Dataset, generator, Sequence) is given below. If using `tf.distribute.experimental.ParameterServerStrategy`, only `DatasetCreator` type is supported for `x`. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator, or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from `x`). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. Note that in conjunction with `initial_epoch`, `epochs` is to be understood as "final epoch". The model is not trained for a number of iterations given by `epochs`, but merely until the epoch of index `epochs` is reached. verbose: 'auto', 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. 'auto' defaults to 1 for most cases, but 2 when used with `ParameterServerStrategy`. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See `tf.keras.callbacks`. Note `tf.keras.callbacks.ProgbarLogger` and `tf.keras.callbacks.History` callbacks are created automatically and need not be passed into `model.fit`. `tf.keras.callbacks.ProgbarLogger` is created or not based on `verbose` argument to `model.fit`. Callbacks with batch-level calls are currently unsupported with `tf.distribute.experimental.ParameterServerStrategy`, and users are advised to implement epoch-level calls instead with an appropriate `steps_per_epoch` value. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset, generator or `keras.utils.Sequence` instance. `validation_split` is not yet supported with `tf.distribute.experimental.ParameterServerStrategy`. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. Thus, note the fact that the validation loss of data provided using `validation_split` or `validation_data` is not affected by regularization layers like noise and dropout. `validation_data` will override `validation_split`. `validation_data` could be: - A tuple `(x_val, y_val)` of Numpy arrays or tensors. - A tuple `(x_val, y_val, val_sample_weights)` of NumPy arrays. - A `tf.data.Dataset`. - A Python generator or `keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, targets, sample_weights)`. `validation_data` is not yet supported with `tf.distribute.experimental.ParameterServerStrategy`. shuffle: Boolean (whether to shuffle the training data before each epoch) or str (for 'batch'). This argument is ignored when `x` is a generator or an object of tf.data.Dataset. 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. Has no effect when `steps_per_epoch` is not `None`. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class. sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. This argument is not supported when `x` is a dataset, generator, or `keras.utils.Sequence` instance, instead provide the sample_weights as the third element of `x`. initial_epoch: Integer. Epoch at which to start training (useful for resuming a previous training run). steps_per_epoch: Integer or `None`. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps_per_epoch' is None, the epoch will run until the input dataset is exhausted. When passing an infinitely repeating dataset, you must specify the `steps_per_epoch` argument. If `steps_per_epoch=-1` the training will run indefinitely with an infinitely repeating dataset. This argument is not supported with array inputs. When using `tf.distribute.experimental.ParameterServerStrategy`: * `steps_per_epoch=None` is not supported. validation_steps: Only relevant if `validation_data` is provided and is a `tf.data` dataset. Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch. If 'validation_steps' is None, validation will run until the `validation_data` dataset is exhausted. In the case of an infinitely repeated dataset, it will run into an infinite loop. If 'validation_steps' is specified and only part of the dataset will be consumed, the evaluation will start from the beginning of the dataset at each epoch. This ensures that the same validation samples are used every time. validation_batch_size: Integer or `None`. Number of samples per validation batch. If unspecified, will default to `batch_size`. Do not specify the `validation_batch_size` if your data is in the form of datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Unpacking behavior for iterator-like inputs: A common pattern is to pass a tf.data.Dataset, generator, or tf.keras.utils.Sequence to the `x` argument of fit, which will in fact yield not only features (x) but optionally targets (y) and sample weights. Keras requires that the output of such iterator-likes be unambiguous. The iterator should return a tuple of length 1, 2, or 3, where the optional second and third elements will be used for y and sample_weight respectively. Any other type provided will be wrapped in a length one tuple, effectively treating everything as 'x'. When yielding dicts, they should still adhere to the top-level tuple structure. e.g. `({"x0": x0, "x1": x1}, y)`. Keras will not attempt to separate features, targets, and weights from the keys of a single dict. A notable unsupported data type is the namedtuple. The reason is that it behaves like both an ordered datatype (tuple) and a mapping datatype (dict). So given a namedtuple of the form: `namedtuple("example_tuple", ["y", "x"])` it is ambiguous whether to reverse the order of the elements when interpreting the value. Even worse is a tuple of the form: `namedtuple("other_tuple", ["x", "y", "z"])` where it is unclear if the tuple was intended to be unpacked into x, y, and sample_weight or passed through as a single element to `x`. As a result the data processing code will simply raise a ValueError if it encounters a namedtuple. (Along with instructions to remedy the issue.) Returns: A `History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). Raises: RuntimeError: 1. If the model was never compiled or, 2. If `model.fit` is wrapped in `tf.function`. ValueError: In case of mismatch between the provided input data and what the model expects or when the input data is empty. """ base_layer.keras_api_gauge.get_cell('fit').set(True) # Legacy graph support is contained in `training_v1.Model`. version_utils.disallow_legacy_graph('Model', 'fit') self._assert_compile_was_called() self._check_call_args('fit') _disallow_inside_tf_function('fit') if verbose == 'auto': if self.distribute_strategy._should_use_with_coordinator: # pylint: disable=protected-access verbose = 2 # Default to epoch-level logging for PSStrategy. else: verbose = 1 # Default to batch-level logging otherwise. if validation_split: # Create the validation data using the training data. Only supported for # `Tensor` and `NumPy` input. (x, y, sample_weight), validation_data = ( data_adapter.train_validation_split( (x, y, sample_weight), validation_split=validation_split)) if validation_data: val_x, val_y, val_sample_weight = ( data_adapter.unpack_x_y_sample_weight(validation_data)) if self.distribute_strategy._should_use_with_coordinator: # pylint: disable=protected-access self._cluster_coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator( self.distribute_strategy) with self.distribute_strategy.scope(), \ training_utils.RespectCompiledTrainableState(self): # Creates a `tf.data.Dataset` and handles batch and epoch iteration. data_handler = data_adapter.get_data_handler( x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps_per_epoch, initial_epoch=initial_epoch, epochs=epochs, shuffle=shuffle, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, model=self, steps_per_execution=self._steps_per_execution) # Container that configures and calls `tf.keras.Callback`s. if not isinstance(callbacks, callbacks_module.CallbackList): callbacks = callbacks_module.CallbackList( callbacks, add_history=True, add_progbar=verbose != 0, model=self, verbose=verbose, epochs=epochs, steps=data_handler.inferred_steps) self.stop_training = False self.train_function = self.make_train_function() self._train_counter.assign(0) callbacks.on_train_begin() training_logs = None # Handle fault-tolerance for multi-worker. # TODO(omalleyt): Fix the ordering issues that mean this has to # happen after `callbacks.on_train_begin`. data_handler._initial_epoch = ( # pylint: disable=protected-access self._maybe_load_initial_epoch_from_ckpt(initial_epoch)) logs = None for epoch, iterator in data_handler.enumerate_epochs(): self.reset_metrics() callbacks.on_epoch_begin(epoch) with data_handler.catch_stop_iteration(): for step in data_handler.steps(): with tf.profiler.experimental.Trace( 'train', epoch_num=epoch, step_num=step, batch_size=batch_size, _r=1): callbacks.on_train_batch_begin(step) tmp_logs = self.train_function(iterator) if data_handler.should_sync: context.async_wait() logs = tmp_logs # No error, now safe to assign to logs. end_step = step + data_handler.step_increment callbacks.on_train_batch_end(end_step, logs) if self.stop_training: break logs = tf_utils.sync_to_numpy_or_python_type(logs) if logs is None: raise ValueError('Unexpected result of `train_function` ' '(Empty logs). Please use ' '`Model.compile(..., run_eagerly=True)`, or ' '`tf.config.run_functions_eagerly(True)` for more ' 'information of where went wrong, or file a ' 'issue/bug to `tf.keras`.') epoch_logs = copy.copy(logs) # Run validation. if validation_data and self._should_eval(epoch, validation_freq): # Create data_handler for evaluation and cache it. if getattr(self, '_eval_data_handler', None) is None: self._eval_data_handler = data_adapter.get_data_handler( x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps_per_epoch=validation_steps, initial_epoch=0, epochs=1, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, model=self, steps_per_execution=self._steps_per_execution) val_logs = self.evaluate( x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps=validation_steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, return_dict=True, _use_cached_eval_dataset=True) val_logs = {'val_' + name: val for name, val in val_logs.items()} epoch_logs.update(val_logs) callbacks.on_epoch_end(epoch, epoch_logs) training_logs = epoch_logs if self.stop_training: break # If eval data_hanlder exists, delete it after all epochs are done. if getattr(self, '_eval_data_handler', None) is not None: del self._eval_data_handler callbacks.on_train_end(logs=training_logs) return self.history def test_step(self, data): """The logic for one evaluation step. This method can be overridden to support custom evaluation logic. This method is called by `Model.make_test_function`. This function should contain the mathematical logic for one step of evaluation. This typically includes the forward pass, loss calculation, and metrics updates. Configuration details for *how* this logic is run (e.g. `tf.function` and `tf.distribute.Strategy` settings), should be left to `Model.make_test_function`, which can also be overridden. Args: data: A nested structure of `Tensor`s. Returns: A `dict` containing values that will be passed to `tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the values of the `Model`'s metrics are returned. """ x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) y_pred = self(x, training=False) # Updates stateful loss metrics. self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def make_test_function(self, force=False): """Creates a function that executes one step of evaluation. This method can be overridden to support custom evaluation logic. This method is called by `Model.evaluate` and `Model.test_on_batch`. Typically, this method directly controls `tf.function` and `tf.distribute.Strategy` settings, and delegates the actual evaluation logic to `Model.test_step`. This function is cached the first time `Model.evaluate` or `Model.test_on_batch` is called. The cache is cleared whenever `Model.compile` is called. You can skip the cache and generate again the function with `force=True`. Args: force: Whether to regenerate the test function and skip the cached function if available. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, and return a `dict` containing values that will be passed to `tf.keras.Callbacks.on_test_batch_end`. """ if self.test_function is not None and not force: return self.test_function def step_function(model, iterator): """Runs a single evaluation step.""" def run_step(data): outputs = model.test_step(data) # Ensure counter is updated only if `test_step` succeeds. with tf.control_dependencies(_minimum_control_deps(outputs)): model._test_counter.assign_add(1) # pylint: disable=protected-access return outputs data = next(iterator) outputs = model.distribute_strategy.run(run_step, args=(data,)) outputs = reduce_per_replica( outputs, self.distribute_strategy, reduction='first') return outputs if (self._steps_per_execution is None or self._steps_per_execution.numpy().item() == 1): def test_function(iterator): """Runs an evaluation execution with one step.""" return step_function(self, iterator) else: def test_function(iterator): """Runs an evaluation execution with multiple steps.""" for _ in tf.range(self._steps_per_execution): outputs = step_function(self, iterator) return outputs if not self.run_eagerly: test_function = tf.function( test_function, experimental_relax_shapes=True) self.test_function = test_function if self._cluster_coordinator: self.test_function = lambda iterator: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda test_function, args=(iterator,)) return self.test_function @traceback_utils.filter_traceback def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, return_dict=False, **kwargs): """Returns the loss value & metrics values for the model in test mode. Computation is done in batches (see the `batch_size` arg.) Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A generator or `keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, targets, sample_weights)`. A more detailed description of unpacking behavior for iterator types (Dataset, generator, Sequence) is given in the `Unpacking behavior for iterator-like inputs` section of `Model.fit`. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from the iterator/dataset). batch_size: Integer or `None`. Number of samples per batch of computation. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of a dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar. sample_weight: Optional Numpy array of weights for the test samples, used for weighting the loss function. You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. This argument is not supported when `x` is a dataset, instead pass sample weights as the third element of `x`. steps: Integer or `None`. Total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, 'evaluate' will run until the dataset is exhausted. This argument is not supported with array inputs. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. return_dict: If `True`, loss and metric results are returned as a dict, with each key being the name of the metric. If `False`, they are returned as a list. **kwargs: Unused at this time. See the discussion of `Unpacking behavior for iterator-like inputs` for `Model.fit`. `Model.evaluate` is not yet supported with `tf.distribute.experimental.ParameterServerStrategy`. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: RuntimeError: If `model.evaluate` is wrapped in a `tf.function`. """ base_layer.keras_api_gauge.get_cell('evaluate').set(True) version_utils.disallow_legacy_graph('Model', 'evaluate') self._assert_compile_was_called() self._check_call_args('evaluate') _disallow_inside_tf_function('evaluate') use_cached_eval_dataset = kwargs.pop('_use_cached_eval_dataset', False) if kwargs: raise TypeError(f'Invalid keyword arguments: {(kwargs,)}') if self.distribute_strategy._should_use_with_coordinator: # pylint: disable=protected-access self._cluster_coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator( self.distribute_strategy) with self.distribute_strategy.scope(): # Use cached evaluation data only when it's called in `Model.fit` if (use_cached_eval_dataset and getattr(self, '_eval_data_handler', None) is not None): data_handler = self._eval_data_handler else: # Creates a `tf.data.Dataset` and handles batch and epoch iteration. data_handler = data_adapter.get_data_handler( x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps, initial_epoch=0, epochs=1, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, model=self, steps_per_execution=self._steps_per_execution) # Container that configures and calls `tf.keras.Callback`s. if not isinstance(callbacks, callbacks_module.CallbackList): callbacks = callbacks_module.CallbackList( callbacks, add_history=True, add_progbar=verbose != 0, model=self, verbose=verbose, epochs=1, steps=data_handler.inferred_steps) logs = {} self.test_function = self.make_test_function() self._test_counter.assign(0) callbacks.on_test_begin() for _, iterator in data_handler.enumerate_epochs(): # Single epoch. self.reset_metrics() with data_handler.catch_stop_iteration(): for step in data_handler.steps(): with tf.profiler.experimental.Trace('test', step_num=step, _r=1): callbacks.on_test_batch_begin(step) tmp_logs = self.test_function(iterator) if data_handler.should_sync: context.async_wait() logs = tmp_logs # No error, now safe to assign to logs. end_step = step + data_handler.step_increment callbacks.on_test_batch_end(end_step, logs) logs = tf_utils.sync_to_numpy_or_python_type(logs) callbacks.on_test_end(logs=logs) if return_dict: return logs else: return flatten_metrics_in_order(logs, self.metrics_names) def predict_step(self, data): """The logic for one inference step. This method can be overridden to support custom inference logic. This method is called by `Model.make_predict_function`. This method should contain the mathematical logic for one step of inference. This typically includes the forward pass. Configuration details for *how* this logic is run (e.g. `tf.function` and `tf.distribute.Strategy` settings), should be left to `Model.make_predict_function`, which can also be overridden. Args: data: A nested structure of `Tensor`s. Returns: The result of one inference step, typically the output of calling the `Model` on data. """ x, _, _ = data_adapter.unpack_x_y_sample_weight(data) return self(x, training=False) def make_predict_function(self, force=False): """Creates a function that executes one step of inference. This method can be overridden to support custom inference logic. This method is called by `Model.predict` and `Model.predict_on_batch`. Typically, this method directly controls `tf.function` and `tf.distribute.Strategy` settings, and delegates the actual evaluation logic to `Model.predict_step`. This function is cached the first time `Model.predict` or `Model.predict_on_batch` is called. The cache is cleared whenever `Model.compile` is called. You can skip the cache and generate again the function with `force=True`. Args: force: Whether to regenerate the predict function and skip the cached function if available. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, and return the outputs of the `Model`. """ if self.predict_function is not None and not force: return self.predict_function def step_function(model, iterator): """Runs a single evaluation step.""" def run_step(data): outputs = model.predict_step(data) # Ensure counter is updated only if `test_step` succeeds. with tf.control_dependencies(_minimum_control_deps(outputs)): model._predict_counter.assign_add(1) # pylint: disable=protected-access return outputs data = next(iterator) outputs = model.distribute_strategy.run(run_step, args=(data,)) outputs = reduce_per_replica( outputs, self.distribute_strategy, reduction='concat') return outputs if (self._steps_per_execution is None or self._steps_per_execution.numpy().item() == 1): def predict_function(iterator): """Runs an evaluation execution with one step.""" return step_function(self, iterator) else: def predict_function(iterator): """Runs an evaluation execution with multiple steps.""" outputs = step_function(self, iterator) for _ in tf.range(self._steps_per_execution - 1): tf.autograph.experimental.set_loop_options( shape_invariants=[( t, tf_utils.get_tensor_spec(t, dynamic_batch=True).shape) for t in tf.nest.flatten(outputs)]) step_outputs = step_function(self, iterator) outputs = tf.nest.map_structure(lambda t1, t2: concat([t1, t2]), outputs, step_outputs) return outputs if not self.run_eagerly: predict_function = tf.function( predict_function, experimental_relax_shapes=True) self.predict_function = predict_function return self.predict_function @traceback_utils.filter_traceback def predict(self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Generates output predictions for the input samples. Computation is done in batches. This method is designed for performance in large scale inputs. For small amount of inputs that fit in one batch, directly using `__call__()` is recommended for faster execution, e.g., `model(x)`, or `model(x, training=False)` if you have layers such as `tf.keras.layers.BatchNormalization` that behaves differently during inference. Also, note the fact that test loss is not affected by regularization layers like noise and dropout. Args: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. A more detailed description of unpacking behavior for iterator types (Dataset, generator, Sequence) is given in the `Unpacking behavior for iterator-like inputs` section of `Model.fit`. batch_size: Integer or `None`. Number of samples per batch. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, `predict()` will run until the input dataset is exhausted. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during prediction. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. See the discussion of `Unpacking behavior for iterator-like inputs` for `Model.fit`. Note that Model.predict uses the same interpretation rules as `Model.fit` and `Model.evaluate`, so inputs must be unambiguous for all three methods. Returns: Numpy array(s) of predictions. Raises: RuntimeError: If `model.predict` is wrapped in a `tf.function`. ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size. """ base_layer.keras_api_gauge.get_cell('predict').set(True) version_utils.disallow_legacy_graph('Model', 'predict') self._check_call_args('predict') _disallow_inside_tf_function('predict') # TODO(yashkatariya): Cache model on the coordinator for faster prediction. # If running under PSS, then swap it with OneDeviceStrategy so that # execution will run on the coordinator. original_pss_strategy = None if self.distribute_strategy._should_use_with_coordinator: # pylint: disable=protected-access original_pss_strategy = self.distribute_strategy self._distribution_strategy = None # Cluster coordinator is set by `.fit()` and `.evaluate()` which is not # needed in `.predict()` because all the predictions happen on the # coordinator/locally. if self._cluster_coordinator: self._cluster_coordinator = None outputs = None with self.distribute_strategy.scope(): # Creates a `tf.data.Dataset` and handles batch and epoch iteration. dataset_types = (tf.compat.v1.data.Dataset, tf.data.Dataset) if (self._in_multi_worker_mode() or _is_tpu_multi_host( self.distribute_strategy)) and isinstance(x, dataset_types): try: options = tf.data.Options() data_option = tf.data.experimental.AutoShardPolicy.DATA options.experimental_distribute.auto_shard_policy = data_option x = x.with_options(options) except ValueError: warnings.warn('Using Model.predict with ' 'MultiWorkerDistributionStrategy or TPUStrategy and ' 'AutoShardPolicy.FILE might lead to out-of-order result' '. Consider setting it to AutoShardPolicy.DATA.') data_handler = data_adapter.get_data_handler( x=x, batch_size=batch_size, steps_per_epoch=steps, initial_epoch=0, epochs=1, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, model=self, steps_per_execution=self._steps_per_execution) # Container that configures and calls `tf.keras.Callback`s. if not isinstance(callbacks, callbacks_module.CallbackList): callbacks = callbacks_module.CallbackList( callbacks, add_history=True, add_progbar=verbose != 0, model=self, verbose=verbose, epochs=1, steps=data_handler.inferred_steps) self.predict_function = self.make_predict_function() self._predict_counter.assign(0) callbacks.on_predict_begin() batch_outputs = None for _, iterator in data_handler.enumerate_epochs(): # Single epoch. with data_handler.catch_stop_iteration(): for step in data_handler.steps(): callbacks.on_predict_batch_begin(step) tmp_batch_outputs = self.predict_function(iterator) if data_handler.should_sync: context.async_wait() batch_outputs = tmp_batch_outputs # No error, now safe to assign. if outputs is None: outputs = tf.nest.map_structure(lambda batch_output: [batch_output], batch_outputs) else: tf.__internal__.nest.map_structure_up_to( batch_outputs, lambda output, batch_output: output.append(batch_output), outputs, batch_outputs) end_step = step + data_handler.step_increment callbacks.on_predict_batch_end(end_step, {'outputs': batch_outputs}) if batch_outputs is None: raise ValueError('Unexpected result of `predict_function` ' '(Empty batch_outputs). Please use ' '`Model.compile(..., run_eagerly=True)`, or ' '`tf.config.run_functions_eagerly(True)` for more ' 'information of where went wrong, or file a ' 'issue/bug to `tf.keras`.') callbacks.on_predict_end() all_outputs = tf.__internal__.nest.map_structure_up_to(batch_outputs, concat, outputs) # If originally PSS strategy was used, then replace it back since predict # is running under `OneDeviceStrategy` after the swap and once its done # we need to replace it back to PSS again. if original_pss_strategy is not None: self._distribution_strategy = original_pss_strategy return tf_utils.sync_to_numpy_or_python_type(all_outputs) def reset_metrics(self): """Resets the state of all the metrics in the model. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer="Adam", loss="mse", metrics=["mae"]) >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> _ = model.fit(x, y, verbose=0) >>> assert all(float(m.result()) for m in model.metrics) >>> model.reset_metrics() >>> assert all(float(m.result()) == 0 for m in model.metrics) """ for m in self.metrics: m.reset_state() def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True, return_dict=False): """Runs a single gradient update on a single batch of data. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. return_dict: If `True`, loss and metric results are returned as a dict, with each key being the name of the metric. If `False`, they are returned as a list. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: RuntimeError: If `model.train_on_batch` is wrapped in a `tf.function`. """ self._assert_compile_was_called() self._check_call_args('train_on_batch') _disallow_inside_tf_function('train_on_batch') with self.distribute_strategy.scope(), \ training_utils.RespectCompiledTrainableState(self): iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x, y, sample_weight, class_weight) self.train_function = self.make_train_function() logs = self.train_function(iterator) if reset_metrics: self.reset_metrics() logs = tf_utils.sync_to_numpy_or_python_type(logs) if return_dict: return logs else: return flatten_metrics_in_order(logs, self.metrics_names) def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True, return_dict=False): """Test the model on a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. return_dict: If `True`, loss and metric results are returned as a dict, with each key being the name of the metric. If `False`, they are returned as a list. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: RuntimeError: If `model.test_on_batch` is wrapped in a `tf.function`. """ self._assert_compile_was_called() self._check_call_args('test_on_batch') _disallow_inside_tf_function('test_on_batch') with self.distribute_strategy.scope(): iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x, y, sample_weight) self.test_function = self.make_test_function() logs = self.test_function(iterator) if reset_metrics: self.reset_metrics() logs = tf_utils.sync_to_numpy_or_python_type(logs) if return_dict: return logs else: return flatten_metrics_in_order(logs, self.metrics_names) def predict_on_batch(self, x): """Returns predictions for a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). Returns: Numpy array(s) of predictions. Raises: RuntimeError: If `model.predict_on_batch` is wrapped in a `tf.function`. """ self._check_call_args('predict_on_batch') _disallow_inside_tf_function('predict_on_batch') with self.distribute_strategy.scope(): iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x) self.predict_function = self.make_predict_function() outputs = self.predict_function(iterator) return tf_utils.sync_to_numpy_or_python_type(outputs) @doc_controls.do_not_generate_docs def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """Fits the model on data yielded batch-by-batch by a Python generator. DEPRECATED: `Model.fit` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn('`Model.fit_generator` is deprecated and ' 'will be removed in a future version. ' 'Please use `Model.fit`, which supports generators.') return self.fit( generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch) @doc_controls.do_not_generate_docs def evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Evaluates the model on a data generator. DEPRECATED: `Model.evaluate` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn('`Model.evaluate_generator` is deprecated and ' 'will be removed in a future version. ' 'Please use `Model.evaluate`, which supports generators.') self._check_call_args('evaluate_generator') return self.evaluate( generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks) @doc_controls.do_not_generate_docs def predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Generates predictions for the input samples from a data generator. DEPRECATED: `Model.predict` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn('`Model.predict_generator` is deprecated and ' 'will be removed in a future version. ' 'Please use `Model.predict`, which supports generators.') return self.predict( generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks) ###################################################################### # Functions below are not training related. They are for model weights # tracking, save/load, serialization, etc. ###################################################################### @property def trainable_weights(self): self._assert_weights_created() if not self._trainable: return [] trainable_variables = [] for trackable_obj in self._self_tracked_trackables: trainable_variables += trackable_obj.trainable_variables trainable_variables += self._trainable_weights return self._dedup_weights(trainable_variables) @property def non_trainable_weights(self): self._assert_weights_created() non_trainable_variables = [] for trackable_obj in self._self_tracked_trackables: non_trainable_variables += trackable_obj.non_trainable_variables if not self._trainable: # Return order is all trainable vars, then all non-trainable vars. trainable_variables = [] for trackable_obj in self._self_tracked_trackables: trainable_variables += trackable_obj.trainable_variables non_trainable_variables = ( trainable_variables + self._trainable_weights + non_trainable_variables + self._non_trainable_weights) else: non_trainable_variables = ( non_trainable_variables + self._non_trainable_weights) return self._dedup_weights(non_trainable_variables) def get_weights(self): """Retrieves the weights of the model. Returns: A flat list of Numpy arrays. """ with self.distribute_strategy.scope(): return super(Model, self).get_weights() @traceback_utils.filter_traceback def save(self, filepath, overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None, save_traces=True): # pylint: disable=line-too-long """Saves the model to Tensorflow SavedModel or a single HDF5 file. Please see `tf.keras.models.save_model` or the [Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/) for details. Args: filepath: String, PathLike, path to SavedModel or H5 file to save the model. overwrite: Whether to silently overwrite any existing file at the target location, or provide the user with a manual prompt. include_optimizer: If True, save optimizer's state together. save_format: Either `'tf'` or `'h5'`, indicating whether to save the model to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in TF 1.X. signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the `signatures` argument in `tf.saved_model.save` for details. options: (only applies to SavedModel format) `tf.saved_model.SaveOptions` object that specifies options for saving to SavedModel. save_traces: (only applies to SavedModel format) When enabled, the SavedModel will store the function traces for each layer. This can be disabled, so that only the configs of each layer are stored. Defaults to `True`. Disabling this will decrease serialization time and reduce file size, but it requires that all custom layers/models implement a `get_config()` method. Example: ```python from keras.models import load_model model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model # deletes the existing model # returns a compiled model # identical to the previous one model = load_model('my_model.h5') ``` """ # pylint: enable=line-too-long save.save_model(self, filepath, overwrite, include_optimizer, save_format, signatures, options, save_traces) @traceback_utils.filter_traceback def save_weights(self, filepath, overwrite=True, save_format=None, options=None): """Saves all layer weights. Either saves in HDF5 or in TensorFlow format based on the `save_format` argument. When saving in HDF5 format, the weight file has: - `layer_names` (attribute), a list of strings (ordered names of model layers). - For every layer, a `group` named `layer.name` - For every such layer group, a group attribute `weight_names`, a list of strings (ordered names of weights tensor of the layer). - For every weight in the layer, a dataset storing the weight value, named after the weight tensor. When saving in TensorFlow format, all objects referenced by the network are saved in the same format as `tf.train.Checkpoint`, including any `Layer` instances or `Optimizer` instances assigned to object attributes. For networks constructed from inputs and outputs using `tf.keras.Model(inputs, outputs)`, `Layer` instances used by the network are tracked/saved automatically. For user-defined classes which inherit from `tf.keras.Model`, `Layer` instances must be assigned to object attributes, typically in the constructor. See the documentation of `tf.train.Checkpoint` and `tf.keras.Model` for details. While the formats are the same, do not mix `save_weights` and `tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be loaded using `Model.load_weights`. Checkpoints saved using `tf.train.Checkpoint.save` should be restored using the corresponding `tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over `save_weights` for training checkpoints. The TensorFlow format matches objects and variables by starting at a root object, `self` for `save_weights`, and greedily matching attribute names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this is the `Checkpoint` even if the `Checkpoint` has a model attached. This means saving a `tf.keras.Model` using `save_weights` and loading into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match the `Model`'s variables. See the [guide to training checkpoints](https://www.tensorflow.org/guide/checkpoint) for details on the TensorFlow format. Args: filepath: String or PathLike, path to the file to save the weights to. When saving in TensorFlow format, this is the prefix used for checkpoint files (multiple files are generated). Note that the '.h5' suffix causes weights to be saved in HDF5 format. overwrite: Whether to silently overwrite any existing file at the target location, or provide the user with a manual prompt. save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or '.keras' will default to HDF5 if `save_format` is `None`. Otherwise `None` defaults to 'tf'. options: Optional `tf.train.CheckpointOptions` object that specifies options for saving weights. Raises: ImportError: If `h5py` is not available when attempting to save in HDF5 format. """ self._assert_weights_created() filepath = path_to_string(filepath) filepath_is_h5 = saving_utils.is_hdf5_filepath(filepath) if save_format is None: if filepath_is_h5: save_format = 'h5' else: save_format = 'tf' else: user_format = save_format.lower().strip() if user_format in ('tensorflow', 'tf'): save_format = 'tf' elif user_format in ('hdf5', 'h5', 'keras'): save_format = 'h5' else: raise ValueError( f'Unknown format. Received: `save_format`={save_format}. Was ' 'expecting one of {"tf", "h5"}.') if save_format == 'tf' and filepath_is_h5: raise ValueError( 'save_weights got save_format="tf"/"tensorflow", but the ' f'filepath ({filepath}) looks like an HDF5 file. ' 'Omit the ".h5"/".keras" when saving in TensorFlow format.') if save_format == 'h5' and h5py is None: raise ImportError( '`save_weights` requires h5py when saving in hdf5, but h5py is not ' 'available. Try installing h5py package.') if save_format == 'tf': check_filepath = filepath + '.index' else: check_filepath = filepath # If file exists and should not be overwritten: if not overwrite and os.path.isfile(check_filepath): proceed = ask_to_proceed_with_overwrite(check_filepath) if not proceed: return if save_format == 'h5': with h5py.File(filepath, 'w') as f: hdf5_format.save_weights_to_hdf5_group(f, self) else: if tf.executing_eagerly(): session = None else: session = backend.get_session() self._trackable_saver.save(filepath, session=session, options=options) # Record this checkpoint so it's visible from tf.train.latest_checkpoint. tf.__internal__.train.update_checkpoint_state( save_dir=os.path.dirname(filepath), model_checkpoint_path=filepath, save_relative_paths=True, all_model_checkpoint_paths=[filepath]) @traceback_utils.filter_traceback def load_weights(self, filepath, by_name=False, skip_mismatch=False, options=None): """Loads all layer weights, either from a TensorFlow or an HDF5 weight file. If `by_name` is False weights are loaded based on the network's topology. This means the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. Only topological loading (`by_name=False`) is supported when loading weights from the TensorFlow format. Note that topological loading differs slightly between TensorFlow and HDF5 formats for user-defined classes inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. Args: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). This can also be a path to a SavedModel saved from `model.save`. by_name: Boolean, whether to load weights by name or by topological order. Only topological loading is supported for weight files in TensorFlow format. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name=True`). options: Optional `tf.train.CheckpointOptions` object that specifies options for loading weights. Returns: When loading a weight file in TensorFlow format, returns the same status object as `tf.train.Checkpoint.restore`. When graph building, restore ops are run automatically as soon as the network is built (on first call for user-defined classes inheriting from `Model`, immediately if it is already built). When loading weights in HDF5 format, returns `None`. Raises: ImportError: If `h5py` is not available and the weight file is in HDF5 format. ValueError: If `skip_mismatch` is set to `True` when `by_name` is `False`. """ if backend.is_tpu_strategy(self._distribution_strategy): if (self._distribution_strategy.extended.steps_per_run > 1 and (not saving_utils.is_hdf5_filepath(filepath))): spr = self._distribution_strategy.extended.steps_per_run raise ValueError('Load weights is not implemented with TPUStrategy ' 'with `steps_per_run` greater than 1. The ' f'`steps_per_run` is {spr}') if skip_mismatch and not by_name: raise ValueError( 'When calling model.load_weights, skip_mismatch can only be set to ' 'True when by_name is True.') filepath, save_format = _detect_save_format(filepath) if save_format == 'tf': status = self._trackable_saver.restore(filepath, options) if by_name: raise NotImplementedError( 'Weights may only be loaded based on topology into Models when ' 'loading TensorFlow-formatted weights (got by_name=True to ' 'load_weights).') if not tf.executing_eagerly(): session = backend.get_session() # Restore existing variables (if any) immediately, and set up a # streaming restore for any variables created in the future. tf.__internal__.tracking.streaming_restore(status=status, session=session) status.assert_nontrivial_match() else: status = None if h5py is None: raise ImportError( '`load_weights` requires h5py package when loading weights from ' 'HDF5. Try installing h5py.') if not self._is_graph_network and not self.built: raise ValueError( 'Unable to load weights saved in HDF5 format into a subclassed ' 'Model which has not created its variables yet. Call the Model ' 'first, then load the weights.') self._assert_weights_created() with h5py.File(filepath, 'r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] if by_name: hdf5_format.load_weights_from_hdf5_group_by_name( f, self, skip_mismatch) else: hdf5_format.load_weights_from_hdf5_group(f, self) # Perform any layer defined finalization of the layer state. for layer in self.layers: layer.finalize_state() return status def _updated_config(self): """Util shared between different serialization methods. Returns: Model config with Keras version information added. """ from keras import __version__ as keras_version # pylint: disable=g-import-not-at-top config = self.get_config() model_config = { 'class_name': self.__class__.__name__, 'config': config, 'keras_version': keras_version, 'backend': backend.backend() } return model_config def get_config(self): raise NotImplementedError @classmethod def from_config(cls, config, custom_objects=None): # `from_config` assumes `cls` is either `Functional` or a child class of # `Functional`. In the case that `cls` is meant to behave like a child class # of `Functional` but only inherits from the `Model` class, we have to call # `cls(...)` instead of `Functional.from_config`. from keras.engine import functional # pylint: disable=g-import-not-at-top with generic_utils.SharedObjectLoadingScope(): input_tensors, output_tensors, created_layers = ( functional.reconstruct_from_config(config, custom_objects)) # Initialize a model belonging to `cls`, which can be user-defined or # `Functional`. model = cls(inputs=input_tensors, outputs=output_tensors, name=config.get('name')) functional.connect_ancillary_layers(model, created_layers) return model def to_json(self, **kwargs): """Returns a JSON string containing the network configuration. To load a network from a JSON save file, use `keras.models.model_from_json(json_string, custom_objects={})`. Args: **kwargs: Additional keyword arguments to be passed to `json.dumps()`. Returns: A JSON string. """ model_config = self._updated_config() return json.dumps( model_config, default=json_utils.get_json_type, **kwargs) def to_yaml(self, **kwargs): """Returns a yaml string containing the network configuration. Note: Since TF 2.6, this method is no longer supported and will raise a RuntimeError. To load a network from a yaml save file, use `keras.models.model_from_yaml(yaml_string, custom_objects={})`. `custom_objects` should be a dictionary mapping the names of custom losses / layers / etc to the corresponding functions / classes. Args: **kwargs: Additional keyword arguments to be passed to `yaml.dump()`. Returns: A YAML string. Raises: RuntimeError: announces that the method poses a security risk """ raise RuntimeError( 'Method `model.to_yaml()` has been removed due to security risk of ' 'arbitrary code execution. Please use `model.to_json()` instead.' ) def reset_states(self): for layer in self.layers: if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False): layer.reset_states() @property @doc_controls.do_not_generate_docs def state_updates(self): """Deprecated, do NOT use! Returns the `updates` from all layers that are stateful. This is useful for separating training updates and state updates, e.g. when we need to update a layer's internal state during prediction. Returns: A list of update ops. """ warnings.warn('`Model.state_updates` will be removed in a future version. ' 'This property should not be used in TensorFlow 2.0, ' 'as `updates` are applied automatically.') state_updates = [] for layer in self.layers: if getattr(layer, 'stateful', False): if hasattr(layer, 'updates'): state_updates += layer.updates return state_updates @property def weights(self): """Returns the list of all layer variables/weights. Note: This will not track the weights of nested `tf.Modules` that are not themselves Keras layers. Returns: A list of variables. """ return self._dedup_weights(self._undeduplicated_weights) @property def _undeduplicated_weights(self): """Returns the undeduplicated list of all layer variables/weights.""" self._assert_weights_created() weights = [] for layer in self._self_tracked_trackables: weights += layer.variables weights += (self._trainable_weights + self._non_trainable_weights) return weights def summary(self, line_length=None, positions=None, print_fn=None): """Prints a string summary of the network. Args: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. Defaults to `print`. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. Raises: ValueError: if `summary()` is called before the model is built. """ if not self.built: raise ValueError('This model has not yet been built. ' 'Build the model first by calling `build()` or calling ' '`fit()` with some data, or specify ' 'an `input_shape` argument in the first layer(s) for ' 'automatic build.') layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn) @property def layers(self): return list(self._flatten_layers(include_self=False, recursive=False)) @layers.setter def layers(self, _): raise AttributeError( '`Model.layers` attribute is reserved and should not be used. ' 'Please use another name.') def get_layer(self, name=None, index=None): """Retrieves a layer based on either its name (unique) or index. If `name` and `index` are both provided, `index` will take precedence. Indices are based on order of horizontal graph traversal (bottom-up). Args: name: String, name of layer. index: Integer, index of layer. Returns: A layer instance. """ # TODO(fchollet): We could build a dictionary based on layer names # since they are constant, but we have not done that yet. if index is not None and name is not None: raise ValueError('Provide only a layer name or a layer index. Received: ' f'index={index}, name={name}.') if index is not None: if len(self.layers) <= index: raise ValueError(f'Was asked to retrieve layer at index {str(index)}' f' but model only has {str(len(self.layers))}' ' layers.') else: return self.layers[index] if name is not None: for layer in self.layers: if layer.name == name: return layer raise ValueError(f'No such layer: {name}. Existing layers are ' f'{self.layers}.') raise ValueError('Provide either a layer name or layer index at ' '`get_layer`.') @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_save_spec(self, inputs, args=None, kwargs=None): """Defines the save spec so that serialization is able to trace model call. The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are saved into a tuple of `([inputs] + args, kwargs)`. The input `TensorSpec` names are updated to match the built `input_names`. The specs can be retrieved with the `save_spec` property. Args: inputs: possibly nested inputs passed into the call function. args: a list of positional arguments passed into call. kwargs: a dictionary of keyword arguments passed into call. """ if self._saved_model_inputs_spec is not None: return # Already set. args = args or [] kwargs = kwargs or {} input_names = self.input_names if not input_names: input_names = compile_utils.create_pseudo_input_names(inputs) flat_inputs = tf.nest.flatten(inputs) inputs_spec = [] for name, tensor in zip(input_names, flat_inputs): inputs_spec.append( tf_utils.get_tensor_spec(tensor, dynamic_batch=False, name=name)) inputs_spec = tf.nest.pack_sequence_as(inputs, inputs_spec) super(Model, self)._set_save_spec(inputs_spec, args, kwargs) # Store the input shapes if (self.__class__.__name__ == 'Sequential' and self._build_input_shape is None): self._build_input_shape = tf.nest.map_structure( lambda x: None if x is None else x.shape, inputs_spec) def save_spec(self, dynamic_batch=True): """Returns the `tf.TensorSpec` of call inputs as a tuple `(args, kwargs)`. This value is automatically defined after calling the model for the first time. Afterwards, you can use it when exporting the model for serving: ```python model = tf.keras.Model(...) @tf.function def serve(*args, **kwargs): outputs = model(*args, **kwargs) # Apply postprocessing steps, or add additional outputs. ... return outputs # arg_specs is `[tf.TensorSpec(...), ...]`. kwarg_specs, in this example, is # an empty dict since functional models do not use keyword arguments. arg_specs, kwarg_specs = model.save_spec() model.save(path, signatures={ 'serving_default': serve.get_concrete_function(*arg_specs, **kwarg_specs) }) ``` Args: dynamic_batch: Whether to set the batch sizes of all the returned `tf.TensorSpec` to `None`. (Note that when defining functional or Sequential models with `tf.keras.Input([...], batch_size=X)`, the batch size will always be preserved). Defaults to `True`. Returns: If the model inputs are defined, returns a tuple `(args, kwargs)`. All elements in `args` and `kwargs` are `tf.TensorSpec`. If the model inputs are not defined, returns `None`. The model inputs are automatically set when calling the model, `model.fit`, `model.evaluate` or `model.predict`. """ return self._get_save_spec(dynamic_batch, inputs_only=False) def _assert_weights_created(self): """Asserts that all the weights for the model have been created. For a non-dynamic model, the weights must already be created after the layer has been called. For a dynamic model, the exact list of weights can never be known for certain since it may change at any time during execution. We run this check right before accessing weights or getting the Numpy value for the current weights. Otherwise, if the layer has never been called, the user would just get an empty list, which is misleading. Raises: ValueError: if the weights of the network have not yet been created. """ if self.dynamic: return if ('build' in self.__class__.__dict__ and self.__class__ != Model and not self.built): # For any model that has customized build() method but hasn't # been invoked yet, this will cover both sequential and subclass model. # Also make sure to exclude Model class itself which has build() defined. raise ValueError(f'Weights for model {self.name} have not yet been ' 'created. ' 'Weights are created when the Model is first called on ' 'inputs or `build()` is called with an `input_shape`.') def _check_call_args(self, method_name): """Check that `call()` has only one positional arg.""" # Always allow first arg, regardless of arg name. fullargspec = self._call_full_argspec if fullargspec.defaults: positional_args = fullargspec.args[:-len(fullargspec.defaults)] else: positional_args = fullargspec.args if 'training' in positional_args: positional_args.remove('training') # self and first arg can be positional. if len(positional_args) > 2: extra_args = positional_args[2:] raise ValueError( f'Models passed to `{method_name}` can only have `training` ' 'and the first argument in `call()` as positional arguments, ' f'found: {str(extra_args)}.') def _validate_compile(self, optimizer, metrics, **kwargs): """Performs validation checks for the default `compile()`.""" if any( isinstance(opt, optimizer_v1.Optimizer) for opt in tf.nest.flatten(optimizer)): raise ValueError( f'`tf.compat.v1.keras` Optimizer ({optimizer}) is ' 'not supported when eager execution is enabled. Use a ' '`tf.keras` Optimizer instead, or disable eager ' 'execution.') kwargs.pop('cloning', None) # Legacy DistStrat argument, never used. kwargs.pop('experimental_run_tf_function', None) # Always `True`. distribute_arg = kwargs.pop('distribute', None) if distribute_arg is not None: raise ValueError( '`distribute` argument in compile is not available in TF 2.0. Please ' 'create the model under the `strategy.scope()`. Received: ' f'{distribute_arg}.') target_tensor_arg = kwargs.pop('target_tensors', None) if target_tensor_arg is not None: raise ValueError( '`target_tensors` argument is not supported when executing eagerly. ' f'Received: {target_tensor_arg}.') invalid_kwargs = set(kwargs) - {'sample_weight_mode'} if invalid_kwargs: raise TypeError('Invalid keyword argument(s) in `compile()`: ' f'{(invalid_kwargs,)}. Valid keyword arguments include ' '"cloning", "experimental_run_tf_function", "distribute",' ' "target_tensors", or "sample_weight_mode".') # Model must be created and compiled with the same DistStrat. if self.built and tf.distribute.has_strategy(): strategy = tf.distribute.get_strategy() for v in self.variables: if not strategy.extended.variable_created_in_scope(v): raise ValueError( f'Variable ({v}) was not created in the distribution strategy ' f'scope of ({strategy}). It is most likely because some ' 'layers, model, or optimizer was being created outside the ' 'distribution strategy scope. Try to make sure your code looks ' 'similar to the following.\n' 'with strategy.scope():\n' ' model=_create_model()\n' ' model.compile(...)') # Model metrics must be created in the same distribution strategy scope # as the model. strategy = self.distribute_strategy for metric in tf.nest.flatten(metrics): for v in getattr(metric, 'variables', []): if not strategy.extended.variable_created_in_scope(v): raise ValueError( f'Metric ({metric}) passed to `model.compile` was created inside ' 'a different distribution strategy scope than the model. All ' 'metrics must be created in the same distribution strategy ' f'scope as the model (in this case {strategy}). If you pass in a ' 'string identifier for a metric to compile, the metric will ' 'automatically be created in the correct distribution ' 'strategy scope.' ) # Model metrics must be created in the same distribution strategy scope # as the model. for opt in tf.nest.flatten(optimizer): for v in getattr(opt, '_weights', []): if not strategy.extended.variable_created_in_scope(v): raise ValueError( f'Optimizer ({optimizer}) passed to `model.compile` was created ' 'inside a different distribution strategy scope than the model. ' 'All optimizers must be created in the same distribution ' f'strategy scope as the model (in this case {strategy}). If you ' 'pass in a string identifier for an optimizer to compile, the ' 'optimizer will automatically be created in the correct ' 'distribution strategy scope.' ) def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch): """Maybe load initial epoch from ckpt considering possible worker recovery. Refer to tensorflow/python/keras/distribute/worker_training_state.py for more information. Args: initial_epoch: The original initial_epoch user passes in in `fit()`. Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the `initial_epoch` the user passes in. """ if self._training_state is not None: return self._training_state.maybe_load_initial_epoch_from_ckpt( initial_epoch, mode=ModeKeys.TRAIN) return initial_epoch def _assert_compile_was_called(self): # Checks whether `compile` has been called. If it has been called, # then the optimizer is set. This is different from whether the # model is compiled # (i.e. whether the model is built and its inputs/outputs are set). if not self._is_compiled: raise RuntimeError('You must compile your model before ' 'training/testing. ' 'Use `model.compile(optimizer, loss)`.') def _set_inputs(self, inputs, outputs=None, training=None): """This method is for compat with Modelv1. Only inputs are needed here.""" self._set_save_spec(inputs) @property def _trackable_saved_model_saver(self): return model_serialization.ModelSavedModelSaver(self) def _list_functions_for_serialization(self, serialization_cache): # SavedModel needs to ignore the execution functions. train_function = self.train_function test_function = self.test_function predict_function = self.predict_function train_tf_function = self.train_tf_function self.train_function = None self.test_function = None self.predict_function = None self.train_tf_function = None functions = super( Model, self)._list_functions_for_serialization(serialization_cache) self.train_function = train_function self.test_function = test_function self.predict_function = predict_function self.train_tf_function = train_tf_function return functions def _should_eval(self, epoch, validation_freq): epoch = epoch + 1 # one-index the user-facing epoch. if isinstance(validation_freq, int): return epoch % validation_freq == 0 elif isinstance(validation_freq, list): return epoch in validation_freq else: raise ValueError('Expected `validation_freq` to be a list or int. ' f'Received: validation_freq={validation_freq} of the ' f'type {type(validation_freq)}.') ###################################################################### # Functions below exist only as v1 / v2 compatibility shims. ###################################################################### def _get_compile_args(self, user_metrics=True): """Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or `Metric` objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model. """ self._assert_compile_was_called() # pylint: disable=protected-access saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = { 'optimizer': self.optimizer, 'loss': self.compiled_loss._user_losses, 'metrics': saved_metrics, 'weighted_metrics': saved_weighted_metrics, 'loss_weights': self.compiled_loss._user_loss_weights, } # pylint: enable=protected-access return compile_args def _get_callback_model(self): return self def _in_multi_worker_mode(self): return self.distribute_strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access @property def _compile_was_called(self): return self._is_compiled def reduce_per_replica(values, strategy, reduction='first'): """Reduce PerReplica objects. Args: values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are returned as-is. strategy: `tf.distribute.Strategy` object. reduction: One of 'first', 'concat'. Returns: Structure of `Tensor`s. """ def _reduce(v): """Reduce a single `PerReplica` object.""" if reduction == 'concat' and _collective_all_reduce_multi_worker(strategy): return _multi_worker_concat(v, strategy) if not _is_per_replica_instance(v): return v elif reduction == 'first': return strategy.unwrap(v)[0] elif reduction == 'concat': if _is_tpu_multi_host(strategy): return _tpu_multi_host_concat(v, strategy) else: return concat(strategy.unwrap(v)) else: raise ValueError('`reduction` must be "first" or "concat". Received: ' f'reduction={reduction}.') return tf.nest.map_structure(_reduce, values) def concat(tensors, axis=0): """Concats `tensor`s along `axis`.""" if isinstance(tensors[0], tf.SparseTensor): return tf.sparse.concat(axis=axis, sp_inputs=tensors) return tf.concat(tensors, axis=axis) def _is_tpu_multi_host(strategy): return (backend.is_tpu_strategy(strategy) and strategy.extended.num_hosts > 1) def _tpu_multi_host_concat(v, strategy): """Correctly order TPU PerReplica objects.""" replicas = strategy.unwrap(v) # When distributed datasets are created from Tensors / NumPy, # TPUStrategy.experimental_distribute_dataset shards data in # (Replica, Host) order, and TPUStrategy.unwrap returns it in # (Host, Replica) order. # TODO(b/150317897): Figure out long-term plan here. num_replicas_per_host = strategy.extended.num_replicas_per_host ordered_replicas = [] for replica_id in range(num_replicas_per_host): ordered_replicas += replicas[replica_id::num_replicas_per_host] return concat(ordered_replicas) def _collective_all_reduce_multi_worker(strategy): return (isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) ) and strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access # TODO(wxinyi): merge this with _tpu_multi_host_concat once we have all_gather # for all strategies def _multi_worker_concat(v, strategy): """Order PerReplica objects for CollectiveAllReduceStrategy and concat.""" replicas = strategy.gather(v, axis=0) # v might not have the same shape on different replicas if _is_per_replica_instance(v): shapes = tf.concat([ tf.expand_dims(tf.shape(single_value)[0], axis=0) for single_value in v.values ], axis=0) all_shapes = strategy.gather(shapes, axis=0) else: # v is a tensor. This may happen when, say, we have 2x1 multi-worker. all_shapes = strategy.gather( tf.expand_dims(tf.shape(v)[0], axis=0), axis=0) replicas = tf.split( replicas, num_or_size_splits=all_shapes, num=strategy.num_replicas_in_sync) ordered_replicas = [] num_replicas_per_worker = len(strategy.extended.worker_devices) for replica_id in range(num_replicas_per_worker): ordered_replicas += replicas[replica_id::num_replicas_per_worker] return concat(ordered_replicas) def _is_scalar(x): return isinstance(x, (tf.Tensor, tf.Variable)) and x.shape.rank == 0 def write_scalar_summaries(logs, step): for name, value in logs.items(): if _is_scalar(value): tf.summary.scalar('batch_' + name, value, step=step) def _minimum_control_deps(outputs): """Returns the minimum control dependencies to ensure step succeeded.""" if tf.executing_eagerly(): return [] # Control dependencies not needed. outputs = tf.nest.flatten(outputs, expand_composites=True) for out in outputs: # Variables can't be control dependencies. if not isinstance(out, tf.Variable): return [out] # Return first Tensor or Op from outputs. return [] # No viable Tensor or Op to use for control deps. def _disallow_inside_tf_function(method_name): if tf.inside_function(): error_msg = ( 'Detected a call to `Model.{method_name}` inside a `tf.function`. ' '`Model.{method_name} is a high-level endpoint that manages its own ' '`tf.function`. Please move the call to `Model.{method_name}` outside ' 'of all enclosing `tf.function`s. Note that you can call a `Model` ' 'directly on `Tensor`s inside a `tf.function` like: `model(x)`.' ).format(method_name=method_name) raise RuntimeError(error_msg) def _detect_save_format(filepath): """Returns path to weights file and save format.""" filepath = path_to_string(filepath) if saving_utils.is_hdf5_filepath(filepath): return filepath, 'h5' # Filepath could be a TensorFlow checkpoint file prefix or SavedModel # directory. It's possible for filepath to be both a prefix and directory. # Prioritize checkpoint over SavedModel. if _is_readable_tf_checkpoint(filepath): save_format = 'tf' elif tf.saved_model.contains_saved_model(filepath): ckpt_path = os.path.join(filepath, tf.saved_model.VARIABLES_DIRECTORY, tf.saved_model.VARIABLES_FILENAME) if _is_readable_tf_checkpoint(ckpt_path): filepath = ckpt_path save_format = 'tf' else: raise ValueError('Unable to load weights. filepath {} appears to be a ' 'SavedModel directory, but checkpoint either doesn\'t ' 'exist, or is incorrectly formatted.'.format(filepath)) else: # Not a TensorFlow checkpoint. This filepath is likely an H5 file that # doesn't have the hdf5/keras extensions. save_format = 'h5' return filepath, save_format def _is_readable_tf_checkpoint(filepath): try: tf.compat.v1.train.NewCheckpointReader(filepath) return True except tf.errors.DataLossError: # The checkpoint is not readable in TensorFlow format. return False def flatten_metrics_in_order(logs, metrics_names): """Turns the `logs` dict into a list as per key order of `metrics_names`.""" results = [] for name in metrics_names: if name in logs: results.append(logs[name]) for key in sorted(logs.keys()): if key not in metrics_names: results.append(logs[key]) if len(results) == 1: return results[0] return results def _is_per_replica_instance(obj): return (isinstance(obj, tf.distribute.DistributedValues) and isinstance(obj, tf.__internal__.CompositeTensor)) def saver_with_op_caching(obj): if tf.executing_eagerly(): saveables_cache = None else: saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary() return tf.__internal__.tracking.TrackableSaver( tf.__internal__.tracking.ObjectGraphView( weakref.ref(obj), saveables_cache=saveables_cache)) def disable_multi_worker(method): """Decorator that disallows multi-worker use of `method`.""" def _method_wrapper(self, *args, **kwargs): if self._in_multi_worker_mode(): # pylint: disable=protected-access raise ValueError(f'{method.__name__} is not supported in multi-worker ' 'mode. Please use a non-multi-worker ' '`tf.distribute.Strategy` such as ' '`tf.distribute.MirroredStrategy`.') return method(self, *args, **kwargs) return tf.__internal__.decorator.make_decorator( target=method, decorator_func=_method_wrapper) def inject_functional_model_class(cls): """Inject `Functional` into the hierarchy of this class if needed.""" from keras.engine import functional # pylint: disable=g-import-not-at-top from keras.engine import training_v1 # pylint: disable=g-import-not-at-top if cls == Model or cls == training_v1.Model: return functional.Functional # In case there is any multiple inheritance, we stop injecting the # class if keras model is not in its class hierarchy. if cls == object: return object cls.__bases__ = tuple(inject_functional_model_class(base) for base in cls.__bases__) # Trigger any `__new__` class swapping that needed to happen on `Functional` # but did not because functional was not in the class hierarchy. cls.__new__(cls) return cls def is_functional_model_init_params(args, kwargs): return (len(args) == 2 or len(args) == 1 and 'outputs' in kwargs or 'inputs' in kwargs and 'outputs' in kwargs)
133,798
41.706352
113
py
keras
keras-master/keras/engine/sequential.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Home of the `Sequential` model.""" import tensorflow.compat.v2 as tf import copy from keras import layers as layer_module from keras.engine import base_layer from keras.engine import functional from keras.engine import input_layer from keras.engine import training_utils from keras.saving.saved_model import model_serialization from keras.utils import generic_utils from keras.utils import layer_utils from keras.utils import tf_inspect from keras.utils import tf_utils from keras.utils import traceback_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export SINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have ' 'a single output tensor. For multi-output ' 'layers, use the functional API.') @keras_export('keras.Sequential', 'keras.models.Sequential') class Sequential(functional.Functional): """`Sequential` groups a linear stack of layers into a `tf.keras.Model`. `Sequential` provides training and inference features on this model. Examples: ```python # Optionally, the first layer can receive an `input_shape` argument: model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(8, input_shape=(16,))) # Afterwards, we do automatic shape inference: model.add(tf.keras.layers.Dense(4)) # This is identical to the following: model = tf.keras.Sequential() model.add(tf.keras.Input(shape=(16,))) model.add(tf.keras.layers.Dense(8)) # Note that you can also omit the `input_shape` argument. # In that case the model doesn't have any weights until the first call # to a training/evaluation method (since it isn't yet built): model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(8)) model.add(tf.keras.layers.Dense(4)) # model.weights not created yet # Whereas if you specify the input shape, the model gets built # continuously as you are adding layers: model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(8, input_shape=(16,))) model.add(tf.keras.layers.Dense(4)) len(model.weights) # Returns "4" # When using the delayed-build pattern (no input shape specified), you can # choose to manually build your model by calling # `build(batch_input_shape)`: model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(8)) model.add(tf.keras.layers.Dense(4)) model.build((None, 16)) len(model.weights) # Returns "4" # Note that when using the delayed-build pattern (no input shape specified), # the model gets built the first time you call `fit`, `eval`, or `predict`, # or the first time you call the model on some input data. model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(8)) model.add(tf.keras.layers.Dense(1)) model.compile(optimizer='sgd', loss='mse') # This builds the model for the first time: model.fit(x, y, batch_size=32, epochs=10) ``` """ @tf.__internal__.tracking.no_automatic_dependency_tracking @traceback_utils.filter_traceback def __init__(self, layers=None, name=None): """Creates a `Sequential` model instance. Args: layers: Optional list of layers to add to the model. name: Optional name for the model. """ # Skip the init in FunctionalModel since model doesn't have input/output yet super(functional.Functional, self).__init__( # pylint: disable=bad-super-call name=name, autocast=False) base_layer.keras_api_gauge.get_cell('Sequential').set(True) self.supports_masking = True self._compute_output_and_mask_jointly = True self._auto_track_sub_layers = False self._inferred_input_shape = None self._has_explicit_input_shape = False self._input_dtype = None self._layer_call_argspecs = {} self._created_nodes = set() # Flag that indicate whether the sequential network topology has been # created. It is false when there isn't any layer, or the layers don't # have an input shape. self._graph_initialized = False # Unfortunately some Sequential models using custom layers or FeatureColumn # layers have multiple inputs. This is fundamentally incompatible with # most of the Sequential API, and we have to disable a number of features # for such models. self._use_legacy_deferred_behavior = False # Add to the model any layers passed to the constructor. if layers: if not isinstance(layers, (list, tuple)): layers = [layers] for layer in layers: self.add(layer) @property def layers(self): # Historically, `sequential.layers` only returns layers that were added # via `add`, and omits the auto-generated `InputLayer` that comes at the # bottom of the stack. # `Trackable` manages the `_layers` attributes and does filtering # over it. layers = super(Sequential, self).layers if layers and isinstance(layers[0], input_layer.InputLayer): return layers[1:] return layers[:] @tf.__internal__.tracking.no_automatic_dependency_tracking @traceback_utils.filter_traceback def add(self, layer): """Adds a layer instance on top of the layer stack. Args: layer: layer instance. Raises: TypeError: If `layer` is not a layer instance. ValueError: In case the `layer` argument does not know its input shape. ValueError: In case the `layer` argument has multiple output tensors, or is already connected somewhere else (forbidden in `Sequential` models). """ # If we are passed a Keras tensor created by keras.Input(), we can extract # the input layer from its keras history and use that without any loss of # generality. if hasattr(layer, '_keras_history'): origin_layer = layer._keras_history[0] if isinstance(origin_layer, input_layer.InputLayer): layer = origin_layer if isinstance(layer, tf.Module): if not isinstance(layer, base_layer.Layer): layer = functional.ModuleWrapper(layer) else: raise TypeError('The added layer must be an instance of class Layer. ' f'Received: layer={layer} of type {type(layer)}.') tf_utils.assert_no_legacy_layers([layer]) if not self._is_layer_name_unique(layer): raise ValueError( 'All layers added to a Sequential model ' f'should have unique names. Name "{layer.name}" is already the name ' 'of a layer in this model. Update the `name` argument ' 'to pass a unique name.') self.built = False set_inputs = False self._maybe_create_attribute('_self_tracked_trackables', []) if not self._self_tracked_trackables: if isinstance(layer, input_layer.InputLayer): # Case where the user passes an Input or InputLayer layer via `add`. set_inputs = True else: batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer) if batch_shape: # Instantiate an input layer. x = input_layer.Input( batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input') # This will build the current layer # and create the node connecting the current layer # to the input layer we just created. layer(x) set_inputs = True if set_inputs: outputs = tf.nest.flatten(layer._inbound_nodes[-1].outputs) if len(outputs) != 1: raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) self.outputs = outputs self.inputs = layer_utils.get_source_inputs(self.outputs[0]) self.built = True self._has_explicit_input_shape = True elif self.outputs: # If the model is being built continuously on top of an input layer: # refresh its output. output_tensor = layer(self.outputs[0]) if len(tf.nest.flatten(output_tensor)) != 1: raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) self.outputs = [output_tensor] self.built = True if set_inputs or self._graph_initialized: self._init_graph_network(self.inputs, self.outputs) self._graph_initialized = True else: self._self_tracked_trackables.append(layer) self._handle_deferred_layer_dependencies([layer]) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) @tf.__internal__.tracking.no_automatic_dependency_tracking @traceback_utils.filter_traceback def pop(self): """Removes the last layer in the model. Raises: TypeError: if there are no layers in the model. """ if not self.layers: raise TypeError('There are no layers in the model.') layer = self._self_tracked_trackables.pop() self._layer_call_argspecs.pop(layer) if not self.layers: self.outputs = None self.inputs = None self.built = False self._inferred_input_shape = None self._has_explicit_input_shape = False self._graph_initialized = False elif self._graph_initialized: self.layers[-1]._outbound_nodes = [] self.outputs = [self.layers[-1].output] self._init_graph_network(self.inputs, self.outputs) self.built = True @tf.__internal__.tracking.no_automatic_dependency_tracking def _build_graph_network_for_inferred_shape(self, input_shape, input_dtype=None): if input_shape is None or not self.layers: return if not tf.__internal__.tf2.enabled() or not tf.compat.v1.executing_eagerly_outside_functions(): # This behavior is disabled in V1 or when eager execution is disabled. return if (not self._has_explicit_input_shape and not self._use_legacy_deferred_behavior): # Determine whether the input shape is novel, i.e. whether the model # should be rebuilt. input_shape = tuple(input_shape) if self._inferred_input_shape is None: new_shape = input_shape else: new_shape = relax_input_shape(self._inferred_input_shape, input_shape) if (new_shape is not None and new_shape != self._inferred_input_shape): # A novel shape has been received: we need to rebuild the model. # In case we are inside a graph function, we step out of it. with tf.init_scope(): inputs = input_layer.Input( batch_shape=new_shape, dtype=input_dtype, name=self.layers[0].name + '_input') layer_input = inputs created_nodes = set() for layer in self.layers: # Clear nodes previously created via this method. This prevents # node accumulation and ensures that e.g. `layer.output` is # always connected to `model.inputs` # (this is important e.g. for the feature extraction use case). # We don't just do `layer._inbound_nodes = []` in order # not to break shared layers added to Sequential models (which is # technically illegal as per the `add()` docstring, # but wasn't previously disabled). clear_previously_created_nodes(layer, self._created_nodes) try: # Create Functional API connection by calling the current layer layer_output = layer(layer_input) except: # pylint:disable=bare-except # Functional API calls may fail for a number of reasons: # 1) The layer may be buggy. In this case it will be easier for # the user to debug if we fail on the first call on concrete data, # instead of our own call on a symbolic input. # 2) The layer is dynamic (graph-incompatible) and hasn't # overridden `compute_output_shape`. In this case, it is # impossible to build a graph network. # 3) The layer is otherwise incompatible with the Functional API # (e.g. this is the case for some probabilistic layers that rely # on hacks and that do not return tensors). # In all these cases, we should avoid creating a graph network # (or we simply can't). self._use_legacy_deferred_behavior = True return if len(tf.nest.flatten(layer_output)) != 1: raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) # Keep track of nodes just created above track_nodes_created_by_last_call(layer, created_nodes) layer_input = layer_output outputs = layer_output self._created_nodes = created_nodes try: # Initialize a graph Network. This call will never fail for # a stack of valid Keras layers. # However some users have layers that are fundamentally incompatible # with the Functional API, which do not return tensors. In this # case, we fall back to the legacy deferred behavior. # TODO(fchollet): consider raising here, as we should not be # supporting such layers. self._init_graph_network(inputs, outputs) self._graph_initialized = True except: # pylint:disable=bare-except self._use_legacy_deferred_behavior = True self._inferred_input_shape = new_shape @generic_utils.default def build(self, input_shape=None): if self._graph_initialized: self._init_graph_network(self.inputs, self.outputs) else: if input_shape is None: raise ValueError('You must provide an `input_shape` argument.') self._build_graph_network_for_inferred_shape(input_shape) if not self.built: input_shape = tuple(input_shape) self._build_input_shape = input_shape super(Sequential, self).build(input_shape) self.built = True def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name # If applicable, update the static input shape of the model. if not self._has_explicit_input_shape: if not tf.is_tensor(inputs) and not isinstance( inputs, tf.Tensor): # This is a Sequential with multiple inputs. This is technically an # invalid use case of Sequential, but we tolerate it for backwards # compatibility. self._use_legacy_deferred_behavior = True self._build_input_shape = tf.nest.map_structure(_get_shape_tuple, inputs) if tf.__internal__.tf2.enabled(): logging.warning('Layers in a Sequential model should only have a ' 'single input tensor, but we receive a %s input: %s' '\nConsider rewriting this model with the Functional ' 'API.' % (type(inputs), inputs)) else: self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype) if self._graph_initialized: if not self.built: self._init_graph_network(self.inputs, self.outputs) return super(Sequential, self).call(inputs, training=training, mask=mask) outputs = inputs # handle the corner case where self.layers is empty for layer in self.layers: # During each iteration, `inputs` are the inputs to `layer`, and `outputs` # are the outputs of `layer` applied to `inputs`. At the end of each # iteration `inputs` is set to `outputs` to prepare for the next layer. kwargs = {} argspec = self._layer_call_argspecs[layer].args if 'mask' in argspec: kwargs['mask'] = mask if 'training' in argspec: kwargs['training'] = training outputs = layer(inputs, **kwargs) if len(tf.nest.flatten(outputs)) != 1: raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) # `outputs` will be the inputs to the next layer. inputs = outputs mask = getattr(outputs, '_keras_mask', None) return outputs def compute_output_shape(self, input_shape): shape = input_shape for layer in self.layers: shape = layer.compute_output_shape(shape) return shape def compute_mask(self, inputs, mask): # TODO(omalleyt): b/123540974 This function is not really safe to call # by itself because it will duplicate any updates and losses in graph # mode by `call`ing the Layers again. outputs = self.call(inputs, mask=mask) # pylint: disable=unexpected-keyword-arg return getattr(outputs, '_keras_mask', None) def get_config(self): layer_configs = [] for layer in super(Sequential, self).layers: # `super().layers` include the InputLayer if available (it is filtered out # of `self.layers`). Note that `self._self_tracked_trackables` is managed # by the tracking infrastructure and should not be used. layer_configs.append(generic_utils.serialize_keras_object(layer)) config = { 'name': self.name, 'layers': copy.deepcopy(layer_configs) } if not self._is_graph_network and self._build_input_shape is not None: config['build_input_shape'] = self._build_input_shape return config @classmethod def from_config(cls, config, custom_objects=None): if 'name' in config: name = config['name'] build_input_shape = config.get('build_input_shape') layer_configs = config['layers'] else: name = None build_input_shape = None layer_configs = config model = cls(name=name) for layer_config in layer_configs: layer = layer_module.deserialize(layer_config, custom_objects=custom_objects) model.add(layer) if (not model.inputs and build_input_shape and isinstance(build_input_shape, (tuple, list))): model.build(build_input_shape) return model @property def input_spec(self): if hasattr(self, '_manual_input_spec'): return self._manual_input_spec if self._has_explicit_input_shape: return super().input_spec return None @input_spec.setter def input_spec(self, value): self._manual_input_spec = value @property def _trackable_saved_model_saver(self): return model_serialization.SequentialSavedModelSaver(self) def _is_layer_name_unique(self, layer): for ref_layer in self.layers: if layer.name == ref_layer.name and ref_layer is not layer: return False return True def _assert_weights_created(self): if self._graph_initialized: return # When the graph has not been initialized, use the Model's implementation to # to check if the weights has been created. super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call def _get_shape_tuple(t): if hasattr(t, 'shape'): shape = t.shape if isinstance(shape, tuple): return shape if shape.rank is not None: return tuple(shape.as_list()) return None return None def relax_input_shape(shape_1, shape_2): if shape_1 is None or shape_2 is None: return None if len(shape_1) != len(shape_2): return None return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2)) def clear_previously_created_nodes(layer, created_nodes): """Remove nodes from `created_nodes` from the layer's inbound_nodes.""" for node in layer._inbound_nodes: prev_layers = node.inbound_layers for prev_layer in tf.nest.flatten(prev_layers): prev_layer._outbound_nodes = [ n for n in prev_layer._outbound_nodes if n not in created_nodes] layer._inbound_nodes = [ n for n in layer._inbound_nodes if n not in created_nodes] def track_nodes_created_by_last_call(layer, created_nodes): """Adds to `created_nodes` the nodes created by the last call to `layer`.""" if not layer._inbound_nodes: return created_nodes.add(layer._inbound_nodes[-1]) prev_layers = layer._inbound_nodes[-1].inbound_layers for prev_layer in tf.nest.flatten(prev_layers): if prev_layer._outbound_nodes: created_nodes.add(prev_layer._outbound_nodes[-1])
20,756
39.383268
99
py
keras
keras-master/keras/engine/functional_utils_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #,============================================================================ """Tests for functional_utils.""" import collections import os from keras import keras_parameterized from keras import layers from keras import models from keras.engine import functional_utils from keras.engine import input_layer as input_layer_lib import numpy as np import tensorflow.compat.v2 as tf class FunctionalModelSlideTest(keras_parameterized.TestCase): def test_find_nodes_by_inputs_and_outputs(self): inputs = input_layer_lib.Input((10,)) unconnected_inputs = input_layer_lib.Input((10,)) x = layers.Dense(8)(inputs) y = layers.Dense(6)(x) output = layers.Dense(4)(y) nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs( x, output) self.assertLen(nodes_in_graph, 2) expected_nodes = [output.node, y.node] self.assertCountEqual(nodes_in_graph, expected_nodes) # Make sure we raise error if we specify invalid input/output pair with self.assertRaisesRegex( ValueError, 'Found input tensor cannot be reached'): functional_utils.find_nodes_by_inputs_and_outputs(output, x) with self.assertRaisesRegex( ValueError, 'Found input tensor cannot be reached'): functional_utils.find_nodes_by_inputs_and_outputs(unconnected_inputs, output) with self.assertRaisesRegex( ValueError, 'Found unvisited input tensors that are disconnected'): functional_utils.find_nodes_by_inputs_and_outputs( [inputs, unconnected_inputs], output) def test_find_nodes_by_inputs_and_outputs_with_complicated_network(self): input1 = input_layer_lib.Input((10,)) input2 = input_layer_lib.Input((10,)) input3 = input_layer_lib.Input((10,)) unconnected_input = input_layer_lib.Input((10,)) dense1 = layers.Dense(4, name='dense1') dense2 = layers.Dense(4, name='dense2') # dense1 are shared between input1 and input2 a = dense1(input1) b = dense1(input2) c = layers.Add()([a, b]) d = dense2(input3) e = layers.Add()([c, d]) # There are 5 nodes (invoke of __call__) in the graph. nodes = functional_utils.find_nodes_by_inputs_and_outputs(input1, a) self.assertCountEqual(nodes, [a.node]) nodes = functional_utils.find_nodes_by_inputs_and_outputs(input2, b) self.assertCountEqual(nodes, [b.node]) nodes = functional_utils.find_nodes_by_inputs_and_outputs([input2, input1], c) # This should contains 2 dense call and 1 add self.assertCountEqual(nodes, [a.node, b.node, c.node]) # Missing input3 with self.assertRaisesRegex( ValueError, 'Found input tensor cannot be reached'): functional_utils.find_nodes_by_inputs_and_outputs([input1, input2], e) nodes = functional_utils.find_nodes_by_inputs_and_outputs( [input1, input2, input3], e) self.assertCountEqual(nodes, [a.node, b.node, c.node, d.node, e.node]) # Make sure we can create from intermediate tensors nodes = functional_utils.find_nodes_by_inputs_and_outputs([a, b, input3], e) self.assertCountEqual(nodes, [c.node, d.node, e.node]) # Also make sure we can add intermediate outputs nodes = functional_utils.find_nodes_by_inputs_and_outputs([a, b, input3], [d, e]) self.assertCountEqual(nodes, [c.node, d.node, e.node]) # input1 and 2 are not needed for computing d with self.assertRaisesRegex( ValueError, 'Found unvisited input tensors that are disconnected'): functional_utils.find_nodes_by_inputs_and_outputs( [input1, input2, input3], d) with self.assertRaisesRegex( ValueError, 'Found unvisited input tensors that are disconnected'): functional_utils.find_nodes_by_inputs_and_outputs( [a, b, input3, unconnected_input], [e, d, c]) def test_build_model_from_intermediate_tensor(self): batch_size = 4 inputs = input_layer_lib.Input(shape=(8,)) layer1 = layers.Dense(32) layer2 = layers.Dense(16) x = layer1(inputs) y = layer2(x) model = models.Model(x, y) # Make sure a new node is attached to layer2, which mimic y = layer2(x) self.assertLen(layer2.inbound_nodes, 2) self.assertIsInstance(model, models.Model) # The model only contains 1 dense layer and 1 input layer. self.assertLen(model.layers, 2) self.assertIs(model.layers[1], layer2) model.compile('rmsprop', 'mse') model.fit(np.random.randn(batch_size, 32), np.random.randn(batch_size, 16)) # Test for model saving output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model') model.save(output_path, save_format='tf') loaded_model = models.load_model(output_path) self.assertEqual(model.summary(), loaded_model.summary()) # Also make sure the orignal inputs and y can still be used to build model new_model = models.Model(inputs, y) # Make sure no new node is attached to layer2 self.assertLen(layer2.inbound_nodes, 2) self.assertLen(new_model.layers, 3) self.assertIs(new_model.layers[1], layer1) self.assertIs(new_model.layers[2], layer2) def test_build_model_from_intermediate_tensor_with_complicated_model(self): # The topology is like below: # input1 -> dense1 -> a # + -> c - + --> d - + --> output # input2 -> dense1 -> b -------^ ^ # input3 -> dense2 -> e -----------------| batch_size = 8 input1 = input_layer_lib.Input((2,)) input2 = input_layer_lib.Input((2,)) input3 = input_layer_lib.Input((8,)) dense1 = layers.Dense(8, name='dense1') dense2 = layers.Dense(8, name='dense2') # dense1 are shared between input1 and input2 a = dense1(input1) b = dense1(input2) c = layers.Add()([a, b]) # d has a residual connection from b. d = layers.Add()([b, c]) e = dense2(input3) output = layers.Add()([d, e]) # We skip the input2 here and use b instead. model = models.Model([input1, b, input3], output) # Make sure we have 8 layers, 3 for inputs, 2 for dense and 3 for Add. # Note that dense1 is still in use by input1. self.assertLen(model.layers, 8) # Since the layers are not ordered, let's check class of the layers to make # sure it match the expectation. class_count = collections.Counter([l.__class__ for l in model.layers]) self.assertEqual(class_count[input_layer_lib.InputLayer], 3) self.assertEqual(class_count[layers.Dense], 2) self.assertEqual(class_count[layers.Add], 3) model.compile('rmsprop', 'mse') model.fit([np.random.randn(batch_size, 2), np.random.randn(batch_size, 8), # The shape of b is (batch, 8) np.random.randn(batch_size, 8)], np.random.randn(batch_size, 8)) output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model') model.save(output_path, save_format='tf') loaded_model = models.load_model(output_path) self.assertEqual(model.summary(), loaded_model.summary()) model2 = models.Model([a, b], d) # 2 input layers and 2 Add layer. self.assertLen(model2.layers, 4) class_count = collections.Counter([l.__class__ for l in model2.layers]) self.assertEqual(class_count[input_layer_lib.InputLayer], 2) self.assertEqual(class_count[layers.Add], 2) model2.compile('rmsprop', 'mse') model2.fit([np.random.randn(batch_size, 8), np.random.randn(batch_size, 8)], np.random.randn(batch_size, 8)) if __name__ == '__main__': tf.test.main()
8,300
38.528571
80
py
keras
keras-master/keras/engine/input_layer_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #,============================================================================ """Tests for InputLayer construction.""" import tensorflow.compat.v2 as tf from tensorflow.python.framework import type_spec from keras import backend from keras import combinations from keras import keras_parameterized from keras.engine import functional from keras.engine import input_layer as input_layer_lib from keras.layers import core from keras.saving import model_config class TwoTensors(tf.__internal__.CompositeTensor): """A simple value type to test TypeSpec. Contains two tensors (x, y) and a string (color). The color value is a stand-in for any extra type metadata we might need to store. This value type contains no single dtype. """ def __init__(self, x, y, color='red', assign_variant_dtype=False): assert isinstance(color, str) self.x = tf.convert_to_tensor(x) self.y = tf.convert_to_tensor(y) self.color = color self.shape = tf.TensorShape(None) self._shape = tf.TensorShape(None) if assign_variant_dtype: self.dtype = tf.variant self._assign_variant_dtype = assign_variant_dtype def _type_spec(self): return TwoTensorsSpecNoOneDtype( self.x.shape, self.x.dtype, self.y.shape, self.y.dtype, color=self.color, assign_variant_dtype=self._assign_variant_dtype) def as_shape(shape): """Converts the given object to a TensorShape.""" if isinstance(shape, tf.TensorShape): return shape else: return tf.TensorShape(shape) @type_spec.register('tf.TwoTensorsSpec') class TwoTensorsSpecNoOneDtype(tf.TypeSpec): """A TypeSpec for the TwoTensors value type.""" def __init__( self, x_shape, x_dtype, y_shape, y_dtype, color='red', assign_variant_dtype=False): self.x_shape = as_shape(x_shape) self.x_dtype = tf.as_dtype(x_dtype) self.y_shape = as_shape(y_shape) self.y_dtype = tf.as_dtype(y_dtype) self.color = color self.shape = tf.TensorShape(None) self._shape = tf.TensorShape(None) if assign_variant_dtype: self.dtype = tf.variant self._assign_variant_dtype = assign_variant_dtype value_type = property(lambda self: TwoTensors) @property def _component_specs(self): return (tf.TensorSpec(self.x_shape, self.x_dtype), tf.TensorSpec(self.y_shape, self.y_dtype)) def _to_components(self, value): return (value.x, value.y) def _from_components(self, components): x, y = components return TwoTensors(x, y, self.color) def _serialize(self): return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype, self.color) @classmethod def from_value(cls, value): return cls(value.x.shape, value.x.dtype, value.y.shape, value.y.dtype, value.color) type_spec.register_type_spec_from_value_converter( TwoTensors, TwoTensorsSpecNoOneDtype.from_value) class InputLayerTest(keras_parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testBasicOutputShapeNoBatchSize(self): # Create a Keras Input x = input_layer_lib.Input(shape=(32,), name='input_a') self.assertAllEqual(x.shape.as_list(), [None, 32]) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2.0) self.assertAllEqual(model(tf.ones((3, 32))), tf.ones((3, 32)) * 2.0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testBasicOutputShapeWithBatchSize(self): # Create a Keras Input x = input_layer_lib.Input(batch_size=6, shape=(32,), name='input_b') self.assertAllEqual(x.shape.as_list(), [6, 32]) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2.0) self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0) @combinations.generate(combinations.combine(mode=['eager'])) def testBasicOutputShapeNoBatchSizeInTFFunction(self): model = None @tf.function def run_model(inp): nonlocal model if not model: # Create a Keras Input x = input_layer_lib.Input(shape=(8,), name='input_a') self.assertAllEqual(x.shape.as_list(), [None, 8]) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2.0) return model(inp) self.assertAllEqual(run_model(tf.ones((10, 8))), tf.ones((10, 8)) * 2.0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testInputTensorArg(self): # Create a Keras Input x = input_layer_lib.Input(tensor=tf.zeros((7, 32))) self.assertAllEqual(x.shape.as_list(), [7, 32]) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2.0) self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0) @combinations.generate(combinations.combine(mode=['eager'])) def testInputTensorArgInTFFunction(self): # We use a mutable model container instead of a model python variable, # because python 2.7 does not have `nonlocal` model_container = {} @tf.function def run_model(inp): if not model_container: # Create a Keras Input x = input_layer_lib.Input(tensor=tf.zeros((10, 16))) self.assertAllEqual(x.shape.as_list(), [10, 16]) # Verify you can construct and use a model w/ this input model_container['model'] = functional.Functional(x, x * 3.0) return model_container['model'](inp) self.assertAllEqual(run_model(tf.ones((10, 16))), tf.ones((10, 16)) * 3.0) @combinations.generate(combinations.combine(mode=['eager'])) def testCompositeInputTensorArg(self): # Create a Keras Input rt = tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) x = input_layer_lib.Input(tensor=rt) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2) # And that the model works rt = tf.RaggedTensor.from_row_splits( values=[3, 21, 4, 1, 53, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) self.assertAllEqual(model(rt), rt * 2) @combinations.generate(combinations.combine(mode=['eager'])) def testCompositeInputTensorArgInTFFunction(self): # We use a mutable model container instead of a model python variable, # because python 2.7 does not have `nonlocal` model_container = {} @tf.function def run_model(inp): if not model_container: # Create a Keras Input rt = tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) x = input_layer_lib.Input(tensor=rt) # Verify you can construct and use a model w/ this input model_container['model'] = functional.Functional(x, x * 3) return model_container['model'](inp) # And verify the model works rt = tf.RaggedTensor.from_row_splits( values=[3, 21, 4, 1, 53, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) self.assertAllEqual(run_model(rt), rt * 3) @combinations.generate(combinations.combine(mode=['eager'])) def testNoMixingArgsWithTypeSpecArg(self): with self.assertRaisesRegexp( ValueError, 'all other args except `name` must be None'): input_layer_lib.Input( shape=(4, 7), type_spec=tf.TensorSpec((2, 7, 32), tf.float32)) with self.assertRaisesRegexp( ValueError, 'all other args except `name` must be None'): input_layer_lib.Input( batch_size=4, type_spec=tf.TensorSpec((7, 32), tf.float32)) with self.assertRaisesRegexp( ValueError, 'all other args except `name` must be None'): input_layer_lib.Input( dtype=tf.int64, type_spec=tf.TensorSpec((7, 32), tf.float32)) with self.assertRaisesRegexp( ValueError, 'all other args except `name` must be None'): input_layer_lib.Input( sparse=True, type_spec=tf.TensorSpec((7, 32), tf.float32)) with self.assertRaisesRegexp( ValueError, 'all other args except `name` must be None'): input_layer_lib.Input( ragged=True, type_spec=tf.TensorSpec((7, 32), tf.float32)) @combinations.generate(combinations.combine(mode=['eager'])) def testTypeSpecArg(self): # Create a Keras Input x = input_layer_lib.Input( type_spec=tf.TensorSpec((7, 32), tf.float32)) self.assertAllEqual(x.shape.as_list(), [7, 32]) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2.0) self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0) # Test serialization / deserialization model = functional.Functional.from_config(model.get_config()) self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0) model = model_config.model_from_json(model.to_json()) self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0) @combinations.generate(combinations.combine(mode=['eager'])) def testTypeSpecArgInTFFunction(self): # We use a mutable model container instead of a model python variable, # because python 2.7 does not have `nonlocal` model_container = {} @tf.function def run_model(inp): if not model_container: # Create a Keras Input x = input_layer_lib.Input( type_spec=tf.TensorSpec((10, 16), tf.float32)) self.assertAllEqual(x.shape.as_list(), [10, 16]) # Verify you can construct and use a model w/ this input model_container['model'] = functional.Functional(x, x * 3.0) return model_container['model'](inp) self.assertAllEqual(run_model(tf.ones((10, 16))), tf.ones((10, 16)) * 3.0) @combinations.generate(combinations.combine(mode=['eager'])) def testCompositeTypeSpecArg(self): # Create a Keras Input rt = tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) x = input_layer_lib.Input(type_spec=rt._type_spec) # Verify you can construct and use a model w/ this input model = functional.Functional(x, x * 2) # And that the model works rt = tf.RaggedTensor.from_row_splits( values=[3, 21, 4, 1, 53, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) self.assertAllEqual(model(rt), rt * 2) # Test serialization / deserialization model = functional.Functional.from_config(model.get_config()) self.assertAllEqual(model(rt), rt * 2) model = model_config.model_from_json(model.to_json()) self.assertAllEqual(model(rt), rt * 2) @combinations.generate(combinations.combine(mode=['eager'])) def testCompositeTypeSpecArgInTFFunction(self): # We use a mutable model container instead of a model pysthon variable, # because python 2.7 does not have `nonlocal` model_container = {} @tf.function def run_model(inp): if not model_container: # Create a Keras Input rt = tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) x = input_layer_lib.Input(type_spec=rt._type_spec) # Verify you can construct and use a model w/ this input model_container['model'] = functional.Functional(x, x * 3) return model_container['model'](inp) # And verify the model works rt = tf.RaggedTensor.from_row_splits( values=[3, 21, 4, 1, 53, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) self.assertAllEqual(run_model(rt), rt * 3) @combinations.generate(combinations.combine(mode=['eager'])) def testCompositeTypeSpecArgWithoutDtype(self): for assign_variant_dtype in [False, True]: # Create a Keras Input spec = TwoTensorsSpecNoOneDtype( (1, 2, 3), tf.float32, (1, 2, 3), tf.int64, assign_variant_dtype=assign_variant_dtype) x = input_layer_lib.Input(type_spec=spec) def lambda_fn(tensors): return (tf.cast(tensors.x, tf.float64) + tf.cast(tensors.y, tf.float64)) # Verify you can construct and use a model w/ this input model = functional.Functional(x, core.Lambda(lambda_fn)(x)) # And that the model works two_tensors = TwoTensors(tf.ones((1, 2, 3)) * 2.0, tf.ones(1, 2, 3)) self.assertAllEqual(model(two_tensors), lambda_fn(two_tensors)) # Test serialization / deserialization model = functional.Functional.from_config(model.get_config()) self.assertAllEqual(model(two_tensors), lambda_fn(two_tensors)) model = model_config.model_from_json(model.to_json()) self.assertAllEqual(model(two_tensors), lambda_fn(two_tensors)) def test_serialize_with_unknown_rank(self): inp = backend.placeholder(shape=None, dtype=tf.string) x = input_layer_lib.InputLayer(input_tensor=inp, dtype=tf.string) loaded = input_layer_lib.InputLayer.from_config(x.get_config()) self.assertIsNone(loaded._batch_input_shape) if __name__ == '__main__': tf.test.main()
13,834
36.595109
79
py
keras
keras-master/keras/engine/control_flow_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dynamic control flow behavior with Keras.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import testing_utils from keras.engine import base_layer from keras.optimizer_v2 import rmsprop class ControlFlowLayer1(base_layer.Layer): """Layer with an `if` condition in call.""" def call(self, inputs): if tf.reduce_sum(inputs) > 0: return tf.sqrt(inputs) else: return tf.square(inputs) class ControlFlowLayer2(base_layer.Layer): """Layer with a `for` loop in call.""" def call(self, inputs): samples = tf.TensorArray( dtype=tf.float32, size=tf.shape(inputs)[0]) i = 0 for sample in inputs: samples = samples.write(i, tf.square(sample)) i += 1 return samples.stack() class NestedControlFlowLayer(base_layer.Layer): """Layer nested with a control flow layer.""" def __init__(self, **kwargs): super(NestedControlFlowLayer, self).__init__(**kwargs) self.layer = ControlFlowLayer1() def call(self, inputs): return self.layer(inputs) class ControlFlowModel(keras.Model): """Model with an `if` condition in call.""" def call(self, inputs): if tf.reduce_sum(inputs) > 0: return tf.sqrt(inputs) else: return tf.square(inputs) class NestedControlFlowModel(keras.Model): """Model with an `if` condition in call using a control flow layer.""" def __init__(self, **kwargs): super(NestedControlFlowModel, self).__init__(**kwargs) self.layer = NestedControlFlowLayer() def call(self, inputs): inputs = self.layer(inputs) if tf.reduce_sum(inputs) > 0: return tf.sqrt(inputs) else: return tf.square(inputs) class FunctionControlFlowModel(keras.Model): """Model with control flow where `call` is wrapped in function already.""" @tf.function def call(self, inputs): if tf.reduce_sum(inputs) > 0: return tf.sqrt(inputs) else: return tf.square(inputs) @keras_parameterized.run_all_keras_modes class AutographWrapperTest(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @parameterized.named_parameters(('with_if', ControlFlowLayer1), ('with_for', ControlFlowLayer2), ('nested', NestedControlFlowLayer)) def test_control_flow_layer(self, layer_class): model = testing_utils.get_model_from_layers([layer_class()], input_shape=(3,)) model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) @parameterized.named_parameters( ('with_if', ControlFlowModel), ('nested', NestedControlFlowModel), ('wrapped_in_function', FunctionControlFlowModel)) def test_control_flow_model(self, model_class): model = model_class() model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) def test_control_flow_in_deferred_sequential_model(self): model = keras.Sequential( [ControlFlowLayer1(), keras.layers.Dense(3), ControlFlowLayer2()]) model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) if __name__ == '__main__': tf.test.main()
4,129
30.526718
80
py
keras
keras-master/keras/engine/training_utils_v1.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related utilities.""" import tensorflow.compat.v2 as tf import abc import atexit import collections import functools import multiprocessing.pool import threading import time import numpy as np from keras import backend from keras import callbacks as cbks from keras import losses from keras import metrics as metrics_module from keras.utils import data_utils from keras.utils import generic_utils from keras.utils import losses_utils from keras.utils import tf_inspect from tensorflow.python.platform import tf_logging as logging def is_composite_or_composite_value(tensor): """Returns true if 'tensor' is a CompositeTensor or a CT Value object.""" # TODO(b/125094323): This should be isinstance(CompositeTensor) or # isinstance(CompositeTensorValue) once we support that. return isinstance( tensor, (tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue, tf.compat.v1.ragged.RaggedTensorValue)) class Aggregator(object, metaclass=abc.ABCMeta): """Abstract base class used to aggregate batch-level outputs of a loop. Attributes: use_steps: Whether the loop is using `step` or `batch_size`. num_samples: Total number of samples: `batch_size * num_batches`. steps: Total number of steps. batch_size: Batch size. It is used for validation checks between inputs and outputs. results: What to return at the end of the aggregation loop. """ def __init__(self, use_steps, num_samples=None, steps=None, batch_size=None): self.use_steps = use_steps self.num_samples = num_samples self.steps = steps self.batch_size = batch_size self.results = [] @abc.abstractmethod def create(self, batch_outs): """Creates the initial results from the first batch outputs. Args: batch_outs: A list of batch-level outputs. """ raise NotImplementedError('Must be implemented in subclasses.') @abc.abstractmethod def aggregate(self, batch_outs, batch_start=None, batch_end=None): """Aggregates batch-level results into total results. Args: batch_outs: A list of batch-level outputs. batch_start: The start index of this batch. Always `None` if `use_steps` is `True`. batch_end: The end index of this batch. Always `None` if `use_steps` is `True`. """ raise NotImplementedError('Must be implemented in subclasses.') @abc.abstractmethod def finalize(self): """Prepares the total results to be returned.""" raise NotImplementedError('Must be implemented in subclasses.') class MetricsAggregator(Aggregator): """Aggregator that calculates loss and metrics info. Attributes: use_steps: Whether the loop is using `step` or `batch_size`. num_samples: Total number of samples: `batch_size*num_batches`. steps: Total number of steps, ie number of times to iterate over a dataset to cover all samples. """ def __init__(self, use_steps, num_samples=None, steps=None): super(MetricsAggregator, self).__init__( use_steps=use_steps, num_samples=num_samples, steps=steps, batch_size=None) def create(self, batch_outs): self.results = [0.] * len(batch_outs) def aggregate(self, batch_outs, batch_start=None, batch_end=None): # Loss. if self.use_steps: self.results[0] += batch_outs[0] else: self.results[0] += batch_outs[0] * (batch_end - batch_start) # Metrics (always stateful, just grab current values.) self.results[1:] = batch_outs[1:] def finalize(self): if not self.results: raise ValueError('Empty training data.') self.results[0] /= (self.num_samples or self.steps) def _append_sparse_tensor_value(target, to_append): """Append sparse tensor value objects.""" # Make sure the sparse tensors are of the same size (except for the 0th dim). if len(target.dense_shape) != len(to_append.dense_shape): raise RuntimeError( 'Unable to concatenate %s and %s. The inner dense shapes do not ' 'have the same number of dimensions (%s vs %s)' % (target, to_append, target.dense_shape, to_append.dense_shape)) if target.dense_shape[1:] != to_append.dense_shape[1:]: raise RuntimeError( 'Unable to concatenate %s and %s. The inner dense shapes do not ' 'match inner dimensions (%s vs %s)' % (target, to_append, target.dense_shape[1:], to_append.dense_shape[1:])) # Add the to_append indices to target, updating the 0th value, and keeping # track of the maximum so we know the final dense_shape of this tensor. base_dim0_value = target.dense_shape[0] max_dim0_value = target.dense_shape[0] new_indices = target.indices for index in to_append.indices: # Here, we iterate through the sparse indices of the tensor to append. For # each index, we update its zeroth value (the batch index) by adding the # number of batch items in the tensor we are appending to (so an index # of [0, 0, 1] for a value that is being appended to a tensor with 0th dim # size 3 would become [3, 0, 1].) index[0] += base_dim0_value max_dim0_value = max(max_dim0_value, index[0]) new_indices = np.append(new_indices, [index], axis=0) # Extend the values array to contain all of the appended values. These will # be in the same order as the indices added above. new_values = np.concatenate((target.values, to_append.values), axis=0) # Create a new dense shape by replacing the value for the 0th dimension # with the new max dim0 value. new_dense_shape = list(target.dense_shape) new_dense_shape[0] = max_dim0_value + 1 new_dense_shape = tuple(new_dense_shape) return tf.compat.v1.SparseTensorValue( indices=new_indices, values=new_values, dense_shape=new_dense_shape) def _append_ragged_tensor_value(target, to_append): """Append ragged tensor value objects.""" # Make sure the ragged tensors are of the same size (save for the 0th dim). if len(target.shape) != len(to_append.shape): raise RuntimeError('Unable to concatenate %s and %s' % (target, to_append)) if target.shape[1:] != to_append.shape[1:]: raise RuntimeError('Unable to concatenate %s and %s' % (target, to_append)) adjusted_row_splits = to_append.row_splits[1:] + target.row_splits[-1] new_row_splits = np.append(target.row_splits, adjusted_row_splits) if isinstance(target.values, tf.compat.v1.ragged.RaggedTensorValue): new_values = _append_ragged_tensor_value(target.values, to_append.values) else: new_values = np.concatenate((target.values, to_append.values), axis=0) return tf.compat.v1.ragged.RaggedTensorValue(new_values, new_row_splits) def _append_composite_tensor(target, to_append): """Helper function to append composite tensors to each other in the 0 axis. In order to support batching within a fit/evaluate/predict call, we need to be able to aggregate within a CompositeTensor. Unfortunately, the CT API currently does not make this easy - especially in V1 mode, where we're working with CompositeTensor Value objects that have no connection with the CompositeTensors that created them. Args: target: CompositeTensor or CompositeTensor value object that will be appended to. to_append: CompositeTensor or CompositeTensor value object to append to. 'target'. Returns: A CompositeTensor or CompositeTensor value object. Raises: RuntimeError: if concatenation is not possible. """ if type(target) is not type(to_append): raise RuntimeError('Unable to concatenate %s and %s' % (type(target), type(to_append))) # Perform type-specific concatenation. # TODO(b/125094323): This should be replaced by a simple call to # target.append() that should work on all of the below classes. # If we're seeing a CompositeTensor here, we know it's because we're in # Eager mode (or else we'd have evaluated the CT to a CT Value object # already). Therefore, it's safe to call concat() on it without evaluating # the result any further. If not - that is, if we're seeing a # SparseTensorValue or a RaggedTensorValue - we need to hand-update it # since we're outside of the graph anyways. if isinstance(target, tf.SparseTensor): # We need to invoke the sparse version of concatenate here - tf.concat # won't work. return tf.compat.v1.sparse_concat(sp_inputs=[target, to_append], axis=0) elif isinstance(target, tf.RaggedTensor): return tf.concat([target, to_append], axis=0) elif isinstance(target, tf.compat.v1.SparseTensorValue): return _append_sparse_tensor_value(target, to_append) elif isinstance(target, tf.compat.v1.ragged.RaggedTensorValue): return _append_ragged_tensor_value(target, to_append) else: raise RuntimeError('Attempted to concatenate unsupported object %s.' % type(target)) class ConcatAggregator(Aggregator): """Combine tensor-likes which cannot be merged on the fly. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes. """ def __init__(self, batch_size): self.composite = None super(ConcatAggregator, self).__init__( use_steps=True, num_samples=None, steps=None, batch_size=batch_size) def create(self, batch_element): self.composite = is_composite_or_composite_value(batch_element) def aggregate(self, batch_element, batch_start=None, batch_end=None): # TODO(psv): Add num_samples check here to detect when output batch # #samples is < batch size and != input batch #samples. if self.batch_size and self.batch_size < batch_element.shape[0]: raise ValueError( 'Mismatch between expected batch size and model output batch size. ' 'Output shape = {}, expected output shape = shape {}'.format( batch_element.shape, (self.batch_size,) + batch_element.shape[1:])) self.results.append(batch_element) def finalize(self): # Special case of single batch inference which skips a copy. if len(self.results) == 1: self.results = self.results[0] elif self.composite: # TODO(taylorrobie): efficiently concatenate. results = self.results[0] for r in self.results[1:]: results = _append_composite_tensor(results, r) self.results = results else: self.results = np.concatenate(self.results, axis=0) _COPY_THREADS = 4 _COPY_POOL = None def get_copy_pool(): """Shared threadpool for copying arrays. Pool instantiation takes ~ 2ms, so a singleton pool is used rather than creating a pool per SliceAggregator. Returns: The global copy threadpool. """ global _COPY_POOL if _COPY_POOL is None: _COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS) atexit.register(_COPY_POOL.close) return _COPY_POOL class SliceAggregator(Aggregator): """Combine arrays where the final size is known. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes. NumPy copies are an operation that threads handle quite well because all of the heavy lifting is in c and does not need the GIL. Moreover, we can perform lock-free writes to the same buffer in multiple threads because the nature of result aggregation guarantees that either the indices are disjoint or the aggregator will throw an exception in finalize. Moreover, because aggregation is performed on the slowest varying dimension, assignments for a given batch will write to contiguous blocks of memory, further minimizing contention. There is, however, some scheduling and context switching overhead which will offset the gains from pipelining the slice assignment. Below a given threshold it is faster to simply assign in the main thread rather than enqueue the assignment in a side thread. The exact threshold will vary from system to system, but the time is not very sensitive to the exact transition so a value of 2 ** 14 was chosen which should be reasonable on most systems. """ _BINARY_SIZE_THRESHOLD = 2 ** 14 _MAX_COPY_SECONDS = 300 def __init__(self, num_samples, batch_size): self._async_copies = [] self._pool = get_copy_pool() self._errors = [] super(SliceAggregator, self).__init__( use_steps=False, num_samples=num_samples, steps=None, batch_size=batch_size) def create(self, batch_element): # This step does not need to be pipelined because NumPy empty array # initialization is effectively instantaneous. shape = (self.num_samples,) + batch_element.shape[1:] dtype = batch_element.dtype self.results = np.empty(shape=shape, dtype=dtype) def aggregate(self, batch_element, batch_start, batch_end): # Fail early. if self._errors: raise self._errors[0] # In the special case of single batch inference, no copy is needed. if batch_end - batch_start == self.num_samples: if self.num_samples != batch_element.shape[0]: raise ValueError( 'Mismatch between expected batch size and model output batch size. ' 'Output shape = {}, expected output shape = shape {}'.format( batch_element.shape, self.results.shape)) self.results = batch_element return # This is an approximate threshold, so we don't need to consider the number # of bytes per element. num_elements = np.prod(batch_element.shape) if num_elements < self._BINARY_SIZE_THRESHOLD: self.results[batch_start:batch_end] = batch_element else: is_finished = threading.Event() self._pool.apply_async( self._slice_assign, args=(batch_element, batch_start, batch_end, is_finished)) self._async_copies.append(is_finished) def _slice_assign(self, batch_element, batch_start, batch_end, is_finished): """Legacy utility method to slice input arrays.""" try: self.results[batch_start:batch_end] = batch_element except Exception as e: # pylint: disable=broad-except # `_slice_assign` should only be called in threads and exceptions raised # in threads do not carry over to the main thread. So instead we perform a # a broad catch in the thread and then store the exception to be re-raised # in the main thread. self._errors.append(e) finally: is_finished.set() def finalize(self): start_time = time.time() for is_finished in self._async_copies: timeout = max([0., self._MAX_COPY_SECONDS - (time.time() - start_time)]) if not is_finished.wait(timeout): raise ValueError('Timed out waiting for copy to complete.') if self._errors: raise self._errors[0] class OutputsAggregator(Aggregator): """Aggregator that concatenates outputs.""" _structure = None def create(self, batch_outs): # SparseTensorValue is a named tuple which nest will flatten, so we need # to guard it to properly handle the structure. self._structure = tf.__internal__.nest.get_traverse_shallow_structure( lambda x: not is_composite_or_composite_value(x), batch_outs) batch_outs = tf.__internal__.nest.flatten_up_to(self._structure, batch_outs) for batch_element in batch_outs: if is_composite_or_composite_value(batch_element): # If the output is not a ndarray, it will be either a composite tensor # or a composite tensor's Value object. In either case, we can't # allocate an array to hold the object - we'll handle it later. self.results.append(ConcatAggregator(self.batch_size)) elif isinstance(batch_element, np.ndarray): self.results.append( (ConcatAggregator(self.batch_size) if self.use_steps else SliceAggregator(self.num_samples, self.batch_size))) else: # This is not a ndarray, a CompositeTensor, or a CompositeTensorValue. # Fail fast rather than trying to concatenate it. raise RuntimeError('Attempted to aggregate unsupported object {}.' .format(batch_element)) self.results[-1].create(batch_element) def aggregate(self, batch_outs, batch_start=None, batch_end=None): batch_outs = tf.__internal__.nest.flatten_up_to(self._structure, batch_outs) for batch_element, result in zip(batch_outs, self.results): result.aggregate(batch_element, batch_start, batch_end) def finalize(self): for result in self.results: result.finalize() self.results = [i.results for i in self.results] self.results = tf.nest.pack_sequence_as(self._structure, self.results) def get_progbar(model, count_mode, include_metrics=True): """Get Progbar.""" if include_metrics: stateful_metric_names = getattr(model, 'metrics_names', None) if stateful_metric_names: stateful_metric_names = stateful_metric_names[1:] # Exclude `loss` else: stateful_metric_names = None return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names) def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'): """Determine the number of samples provided for training and evaluation. The number of samples is not defined when running with `steps`, in which case the number of samples is set to `None`. Args: ins: List of tensors to be fed to the Keras function. batch_size: Integer batch size or `None` if not defined. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. steps_name: The public API's parameter name for `steps`. Raises: ValueError: when `steps` is `None` and the attribute `ins.shape` does not exist. Also raises ValueError when `steps` is not `None` and `batch_size` is not `None` because they are mutually exclusive. Returns: When steps is `None`, returns the number of samples to be processed based on the size of the first dimension of the first input numpy array. When steps is not `None` and `batch_size` is `None`, returns `None`. """ if steps is not None and batch_size is not None: raise ValueError('If ' + steps_name + ' is set, the `batch_size` must be None.') if check_steps_argument(ins, steps, steps_name): return None if hasattr(ins[0], 'shape'): return int(ins[0].shape[0]) return None # Edge case where ins == [static_learning_phase] def standardize_single_array(x, expected_shape=None): """Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1.""" if x is None: return None if is_composite_or_composite_value(x): return x if isinstance(x, int): raise ValueError( 'Expected an array data type but received an integer: {}'.format(x)) if (x.shape is not None and len(x.shape) == 1 and (expected_shape is None or len(expected_shape) != 1)): if tf.is_tensor(x): x = tf.compat.v1.expand_dims(x, axis=1) else: x = np.expand_dims(x, 1) return x def get_composite_shape(tensor): """Returns the shape of the passed composite tensor.""" if isinstance(tensor, tf.compat.v1.SparseTensorValue): # SparseTensorValues use a 'dense_shape' attribute return tensor.dense_shape else: return tensor.shape def standardize_input_data(data, names, shapes=None, check_batch_axis=True, exception_prefix=''): """Normalizes inputs and targets provided by users. Users may pass data as a list of arrays, dictionary of arrays, or as a single array. We normalize this to an ordered list of arrays (same order as `names`), while checking that the provided arrays have shapes that match the network's expectations. Args: data: User-provided input data (polymorphic). names: List of expected array names. shapes: Optional list of expected array shapes. check_batch_axis: Boolean; whether to check that the batch axis of the arrays matches the expected value found in `shapes`. exception_prefix: String prefix used for exception formatting. Returns: List of standardized input arrays (one array per model input). Raises: ValueError: in case of improperly formatted user-provided data. """ try: data_len = len(data) except TypeError: # For instance if data is `None` or a symbolic Tensor. data_len = None if not names: if data_len and not isinstance(data, dict): raise ValueError( 'Error when checking model ' + exception_prefix + ': ' 'expected no data, but got:', data) return [] if data is None: return [None for _ in range(len(names))] if isinstance(data, dict): try: data = [ data[x].values if data[x].__class__.__name__ == 'DataFrame' else data[x] for x in names ] except KeyError as e: raise ValueError('No data provided for "' + e.args[0] + '". Need data ' 'for each key in: ' + str(names)) elif isinstance(data, (list, tuple)): if isinstance(data[0], (list, tuple)): data = [np.asarray(d) for d in data] elif len(names) == 1 and isinstance(data[0], (float, int)): data = [np.asarray(data)] else: data = [ x.values if x.__class__.__name__ == 'DataFrame' else x for x in data ] else: data = data.values if data.__class__.__name__ == 'DataFrame' else data data = [data] if shapes is not None: data = [ standardize_single_array(x, shape) for (x, shape) in zip(data, shapes) ] else: data = [standardize_single_array(x) for x in data] if len(data) != len(names): if data and hasattr(data[0], 'shape'): raise ValueError('Error when checking model ' + exception_prefix + ': the list of Numpy arrays that you are passing to ' 'your model is not the size the model expected. ' 'Expected to see ' + str(len(names)) + ' array(s), ' + 'for inputs ' + str(names) + ' but instead got the ' 'following list of ' + str(len(data)) + ' arrays: ' + str(data)[:200] + '...') elif len(names) > 1: raise ValueError('Error when checking model ' + exception_prefix + ': you are passing a list as input to your model, ' 'but the model expects a list of ' + str(len(names)) + ' Numpy arrays instead. The list you passed was: ' + str(data)[:200]) elif len(data) == 1 and not hasattr(data[0], 'shape'): raise TypeError('Error when checking model ' + exception_prefix + ': data should be a Numpy array, or list/dict of ' 'Numpy arrays. Found: ' + str(data)[:200] + '...') elif len(names) == 1: data = [np.asarray(data)] # Check shapes compatibility. if shapes: for i in range(len(names)): if shapes[i] is not None: if tf.is_tensor(data[i]): tensorshape = data[i].shape if not tensorshape: continue data_shape = tuple(tensorshape.as_list()) elif is_composite_or_composite_value(data[i]): tensorshape = get_composite_shape(data[i]) data_shape = tuple(tensorshape.as_list()) else: data_shape = data[i].shape shape = shapes[i] if len(data_shape) != len(shape): raise ValueError('Error when checking ' + exception_prefix + ': expected ' + names[i] + ' to have ' + str(len(shape)) + ' dimensions, but got array ' 'with shape ' + str(data_shape)) if not check_batch_axis: data_shape = data_shape[1:] shape = shape[1:] for dim, ref_dim in zip(data_shape, shape): if ref_dim != dim and ref_dim is not None and dim is not None: raise ValueError('Error when checking ' + exception_prefix + ': expected ' + names[i] + ' to have shape ' + str(shape) + ' but got array with shape ' + str(data_shape)) return data def standardize_sample_or_class_weights(x_weight, output_names, weight_type): """Maps `sample_weight` or `class_weight` to model outputs. Args: x_weight: User-provided `sample_weight` or `class_weight` argument. output_names: List of output names (strings) in the model. weight_type: A string used purely for exception printing. Returns: A list of `sample_weight` or `class_weight` where there are exactly one element per model output. Raises: ValueError: In case of invalid user-provided argument. """ if x_weight is None or (isinstance(x_weight, (list, tuple)) and len(x_weight) == 0): # pylint: disable=g-explicit-length-test return [None for _ in output_names] if len(output_names) == 1: if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1: return x_weight if isinstance(x_weight, dict) and output_names[0] in x_weight: return [x_weight[output_names[0]]] else: return [x_weight] if isinstance(x_weight, (list, tuple)): if len(x_weight) != len(output_names): raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. ' 'You should provide one `' + weight_type + '`' 'array per model output.') return x_weight if isinstance(x_weight, collections.abc.Mapping): generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names) x_weights = [] for name in output_names: x_weights.append(x_weight.get(name)) return x_weights else: raise TypeError('The model has multiple outputs, so `' + weight_type + '` ' 'should be either a list or a dict. ' 'Provided `' + weight_type + '` type not understood: ' + str(x_weight)) def standardize_class_weights(class_weight, output_names): return standardize_sample_or_class_weights(class_weight, output_names, 'class_weight') def standardize_sample_weights(sample_weight, output_names): return standardize_sample_or_class_weights(sample_weight, output_names, 'sample_weight') def check_array_lengths(inputs, targets, weights=None): """Does user input validation for numpy arrays. Args: inputs: list of Numpy arrays of inputs. targets: list of Numpy arrays of targets. weights: list of Numpy arrays of sample weights. Raises: ValueError: in case of incorrectly formatted data. """ def is_tensor_or_composite_tensor(x): return tf.is_tensor(x) or is_composite_or_composite_value(x) def set_of_lengths(x): # Returns a set with the variation between # different shapes, with None => 0 if x is None: return {} else: return set([ y.shape[0] for y in x if y is not None and not is_tensor_or_composite_tensor(y) ]) set_x = set_of_lengths(inputs) set_y = set_of_lengths(targets) set_w = set_of_lengths(weights) if len(set_x) > 1: raise ValueError('All input arrays (x) should have ' 'the same number of samples. Got array shapes: ' + str([x.shape for x in inputs])) if len(set_y) > 1: raise ValueError('All target arrays (y) should have ' 'the same number of samples. Got array shapes: ' + str([y.shape for y in targets])) if set_x and set_y and list(set_x)[0] != list(set_y)[0]: raise ValueError('Input arrays should have ' 'the same number of samples as target arrays. ' 'Found ' + str(list(set_x)[0]) + ' input samples ' 'and ' + str(list(set_y)[0]) + ' target samples.') if len(set_w) > 1: raise ValueError('All sample_weight arrays should have ' 'the same number of samples. Got array shapes: ' + str([w.shape for w in weights])) if set_y and set_w and list(set_y)[0] != list(set_w)[0]: raise ValueError('Sample_weight arrays should have ' 'the same number of samples as target arrays. Got ' + str(list(set_y)[0]) + ' input samples and ' + str(list(set_w)[0]) + ' target samples.') def check_loss_and_target_compatibility(targets, loss_fns, output_shapes): """Does validation on the compatibility of targets and loss functions. This helps prevent users from using loss functions incorrectly. This check is purely for UX purposes. Args: targets: list of Numpy arrays of targets. loss_fns: list of loss functions. output_shapes: list of shapes of model outputs. Raises: ValueError: if a loss function or target array is incompatible with an output. """ key_loss_fns = { losses.mean_squared_error, losses.binary_crossentropy, losses.categorical_crossentropy } key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy, losses.CategoricalCrossentropy) for y, loss, shape in zip(targets, loss_fns, output_shapes): if y is None or loss is None or tf.is_tensor(y): continue if losses.is_categorical_crossentropy(loss): if y.shape[-1] == 1: raise ValueError('You are passing a target array of shape ' + str(y.shape) + ' while using as loss `categorical_crossentropy`. ' '`categorical_crossentropy` expects ' 'targets to be binary matrices (1s and 0s) ' 'of shape (samples, classes). ' 'If your targets are integer classes, ' 'you can convert them to the expected format via:\n' '```\n' 'from keras.utils import to_categorical\n' 'y_binary = to_categorical(y_int)\n' '```\n' '\n' 'Alternatively, you can use the loss function ' '`sparse_categorical_crossentropy` instead, ' 'which does expect integer targets.') is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper) if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and (loss.fn in key_loss_fns))): for target_dim, out_dim in zip(y.shape[1:], shape[1:]): if out_dim is not None and target_dim != out_dim: loss_name = loss.name if loss_name is None: loss_type = loss.fn if is_loss_wrapper else type(loss) loss_name = loss_type.__name__ raise ValueError('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss_name + '`. ' 'This loss expects targets to have the same shape ' 'as the output.') def collect_per_output_metric_info(metrics, output_names, output_shapes, loss_fns, from_serialized=False, is_weighted=False): """Maps metric names and functions to model outputs. Args: metrics: a list or a list of lists or a dict of metric functions. output_names: a list of the names (strings) of model outputs. output_shapes: a list of the shapes (strings) of model outputs. loss_fns: a list of the loss functions corresponding to the model outputs. from_serialized: whether the model the metrics are being sourced from is being initialized from a serialized format. is_weighted: Boolean indicating whether the given metrics are weighted. Returns: A list (one entry per model output) of dicts. For instance, if the model has 2 outputs, and for the first output we want to compute "binary_accuracy" and "binary_crossentropy", and just "binary_accuracy" for the second output, the list would look like: `[{ 'acc': binary_accuracy(), 'ce': binary_crossentropy(), }, { 'acc': binary_accuracy(), }]` Raises: TypeError: if an incorrect type is passed for the `metrics` argument. """ if not metrics: return [{} for _ in output_names] if isinstance(metrics, list): any_sub_list = any(isinstance(m, list) for m in metrics) if any_sub_list: if len(metrics) != len(output_names): raise ValueError('When passing a list of lists as `metrics`, ' 'it should have one entry per model output. ' 'The model has ' + str(len(output_names)) + ' outputs, but you passed metrics=' + str(metrics)) # User has provided a list of len = len(outputs). nested_metrics = [generic_utils.to_list(m) for m in metrics] else: # If it is a single list we then apply all metrics to all outputs. if len(output_names) > 1: nested_metrics = [] for _ in output_names: nested_metrics.append( [metrics_module.clone_metric(m) for m in metrics]) else: nested_metrics = [metrics] elif isinstance(metrics, collections.abc.Mapping): generic_utils.check_for_unexpected_keys('metrics', metrics, output_names) nested_metrics = [] for name in output_names: output_metrics = generic_utils.to_list(metrics.get(name, [])) nested_metrics.append(output_metrics) else: raise TypeError('Type of `metrics` argument not understood. ' 'Expected a list or dictionary, found: ' + str(metrics)) per_output_metrics = [] for i, metrics in enumerate(nested_metrics): metrics_dict = collections.OrderedDict() for metric in metrics: metric_name = get_metric_name(metric, is_weighted) metric_fn = get_metric_function( metric, output_shape=output_shapes[i], loss_fn=loss_fns[i]) metric_fn._from_serialized = from_serialized # pylint: disable=protected-access # If the metric function is not stateful, we create a stateful version. if not isinstance(metric_fn, metrics_module.Metric): metric_fn = metrics_module.MeanMetricWrapper( metric_fn, name=metric_name) # If the metric is being revived from something stateless, such as a # string (e.g. "accuracy"), we may need to later reapply transformations # such as renaming. metric_fn._from_serialized = False # pylint: disable=protected-access metrics_dict[metric_name] = metric_fn per_output_metrics.append(metrics_dict) return per_output_metrics def batch_shuffle(index_array, batch_size): """Shuffles an array in a batch-wise fashion. Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). Args: index_array: array of indices to be shuffled. batch_size: integer. Returns: The `index_array` array, shuffled in a batch-wise fashion. """ batch_count = int(len(index_array) / batch_size) # to reshape we need to be cleanly divisible by batch size # we stash extra items and reappend them after shuffling last_batch = index_array[batch_count * batch_size:] index_array = index_array[:batch_count * batch_size] index_array = index_array.reshape((batch_count, batch_size)) np.random.shuffle(index_array) index_array = index_array.flatten() return np.append(index_array, last_batch) def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None): """Performs sample weight validation and standardization. Everything gets normalized to a single sample-wise (or timestep-wise) weight array. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. Args: y: Numpy array or Tensor of model targets to be weighted. sample_weight: User-provided `sample_weight` argument. class_weight: User-provided `class_weight` argument. sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated that we expect 2D weight data that will be applied to the last 2 dimensions of the targets (i.e. we are weighting timesteps, not samples). Returns: A numpy array of target weights, one entry per sample to weight. Raises: ValueError: In case of invalid user-provided arguments. """ # Iterator may return sample_weight as 1-tuple if isinstance(sample_weight, tuple): sample_weight = sample_weight[0] if sample_weight_mode is not None and sample_weight_mode != 'samplewise': if sample_weight_mode != 'temporal': raise ValueError('"sample_weight_mode ' 'should be None or "temporal". ' 'Found: ' + str(sample_weight_mode)) if len(y.shape) < 3: raise ValueError('Found a sample_weight array for ' 'an input with shape ' + str(y.shape) + '. ' 'Timestep-wise sample weighting (use of ' 'sample_weight_mode="temporal") is restricted to ' 'outputs that are at least 3D, i.e. that have ' 'a time dimension.') if sample_weight is not None and len(sample_weight.shape) != 2: raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. ' 'In order to use timestep-wise sample weighting, ' 'you should pass a 2D sample_weight array.') else: if sample_weight is not None and len(sample_weight.shape) != 1: raise ValueError( 'Found a sample_weight array with shape {}. In order to ' 'use timestep-wise sample weights, you should specify ' 'sample_weight_mode="temporal" in compile(); founssd "{}" ' 'instead. If you just mean to use sample-wise weights, ' 'make sure your sample_weight array is 1D.'.format( sample_weight.shape, sample_weight_mode)) if sample_weight is not None: if len(sample_weight.shape) > len(y.shape): raise ValueError('Found a sample_weight with shape' + str(sample_weight.shape) + '.' 'Expected sample_weight with rank ' 'less than or equal to ' + str(len(y.shape))) if (not tf.is_tensor(sample_weight) and y.shape[:sample_weight.ndim] != sample_weight.shape): raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + ' for an input with shape ' + str(y.shape) + '. ' 'sample_weight cannot be broadcast.') # Class weights applied per-sample. class_sample_weight = None if isinstance(class_weight, dict): if len(y.shape) > 2: raise ValueError('`class_weight` not supported for ' '3+ dimensional targets.') if tf.is_tensor(y): # Few classes are expected, so densifying is reasonable. keys = np.array(sorted(class_weight.keys())) values = np.array([class_weight[i] for i in keys]) weight_vector = np.zeros(np.max(keys) + 1) weight_vector[:] = np.nan weight_vector[keys] = values y_classes = tf.__internal__.smart_cond.smart_cond( len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: tf.cast(backend.reshape(y, (-1,)), tf.int64)) class_sample_weight = tf.compat.v1.gather(weight_vector, y_classes) tf.debugging.check_numerics( class_sample_weight, 'Invalid classes or class weights detected. NaN values indicate that ' 'an appropriate class weight could not be determined.') class_sample_weight = tf.cast(class_sample_weight, backend.floatx()) if sample_weight is not None: sample_weight = tf.cast( tf.convert_to_tensor(sample_weight), backend.floatx()) else: y_classes = y if len(y.shape) == 2: if y.shape[1] > 1: y_classes = np.argmax(y, axis=1) elif y.shape[1] == 1: y_classes = np.reshape(y, y.shape[0]) class_sample_weight = np.asarray( [class_weight[cls] for cls in y_classes if cls in class_weight]) if len(class_sample_weight) != len(y_classes): # subtract the sets to pick all missing classes existing_classes = set(y_classes) existing_class_weight = set(class_weight.keys()) raise ValueError( '`class_weight` must contain all classes in the data.' ' The classes %s exist in the data but not in ' '`class_weight`.' % (existing_classes - existing_class_weight)) if class_sample_weight is not None and sample_weight is not None: # Multiply weights if both are provided. return class_sample_weight * sample_weight if sample_weight is not None: return sample_weight if class_sample_weight is not None: return class_sample_weight return None def has_symbolic_tensors(ls): if tf.executing_eagerly(): return False return has_tensors(ls) def has_tensors(ls): """Returns true if `ls` contains tensors.""" # Note: at some point in time ragged tensors didn't count as tensors, so this # returned false for ragged tensors. Making this return true fails some tests # which would then require a steps_per_epoch argument. if isinstance(ls, (list, tuple)): return any( tf.is_tensor(v) and not isinstance(v, tf.RaggedTensor) for v in ls) if isinstance(ls, dict): return any( tf.is_tensor(v) and not isinstance(v, tf.RaggedTensor) for _, v in ls.items()) return tf.is_tensor(ls) and not isinstance( ls, tf.RaggedTensor) def get_metric_name(metric, weighted=False): """Returns the name corresponding to the given metric input. Args: metric: Metric function name or reference. weighted: Boolean indicating if the given metric is weighted. Returns: The metric name. """ if tf.__internal__.tf2.enabled(): # We keep the string that the user has set in compile as the metric name. if isinstance(metric, str): return metric metric = metrics_module.get(metric) return metric.name if hasattr(metric, 'name') else metric.__name__ else: metric_name_prefix = 'weighted_' if weighted else '' if metric in ('accuracy', 'acc', 'crossentropy', 'ce'): if metric in ('accuracy', 'acc'): suffix = 'acc' elif metric in ('crossentropy', 'ce'): suffix = 'ce' else: metric_fn = metrics_module.get(metric) # Get metric name as string if hasattr(metric_fn, 'name'): suffix = metric_fn.name else: suffix = metric_fn.__name__ metric_name = metric_name_prefix + suffix return metric_name def get_metric_function(metric, output_shape=None, loss_fn=None): """Returns the metric function corresponding to the given metric input. Args: metric: Metric function name or reference. output_shape: The shape of the output that this metric will be calculated for. loss_fn: The loss function used. Returns: The metric function. """ if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']: return metrics_module.get(metric) is_sparse_categorical_crossentropy = ( isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.sparse_categorical_crossentropy)) is_binary_crossentropy = ( isinstance(loss_fn, losses.BinaryCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.binary_crossentropy)) if metric in ['accuracy', 'acc']: if output_shape[-1] == 1 or is_binary_crossentropy: return metrics_module.binary_accuracy elif is_sparse_categorical_crossentropy: return metrics_module.sparse_categorical_accuracy # If the output_shape[-1] is not 1, then we know output is `categorical`. # We assume it is sparse categorical only if loss is explicitly given # as sparse categorical crossentropy loss. return metrics_module.categorical_accuracy else: if output_shape[-1] == 1 or is_binary_crossentropy: return metrics_module.binary_crossentropy elif is_sparse_categorical_crossentropy: return metrics_module.sparse_categorical_crossentropy return metrics_module.categorical_crossentropy def call_metric_function(metric_fn, y_true, y_pred=None, weights=None, mask=None): """Invokes metric function and returns the metric result tensor.""" if mask is not None: mask = tf.cast(mask, y_pred.dtype) if weights is None: # Use mask as sample weight. weights = mask else: # Update dimensions of weights to match with mask. weights = tf.cast(weights, dtype=y_pred.dtype) mask, _, weights = losses_utils.squeeze_or_expand_dimensions( mask, sample_weight=weights) weights *= mask if y_pred is not None: return metric_fn(y_true, y_pred, sample_weight=weights) # `Mean` metric only takes a single value. return metric_fn(y_true, sample_weight=weights) def get_loss_function(loss): """Returns the loss corresponding to the loss input in `compile` API.""" if loss is None or isinstance(loss, losses.Loss): return loss if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss): # It is not safe to assume that the loss takes no constructor arguments. raise ValueError( 'Received uninstantiated Loss class: {}\nPlease call loss ""classes ' 'before passing them to Model.compile.'.format(loss)) # Deserialize loss configuration, if needed. if isinstance(loss, collections.abc.Mapping): loss = losses.get(loss) # Custom callable class. if callable(loss) and not hasattr(loss, '__name__'): return loss # Wrap loss function with signature `(y_true, y_pred, **kwargs)` # in `LossFunctionWrapper` class. loss_fn = losses.get(loss) # For losses which are given as strings/functions in the compile API, # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE` # (both in distribution strategy context and otherwise). return losses.LossFunctionWrapper( loss_fn, name=loss_fn.__name__, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE) def validate_dataset_input(x, y, sample_weight, validation_split=None): """Validates user input arguments when a dataset iterator is passed. Args: x: Input data. A `tf.data` dataset or iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be `None` when `x` is a dataset iterator. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. Expected to be `None` when `x` is a dataset iterator validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Expected to be `None` when `x` is a dataset iterator. Raises: ValueError: if argument `y` or `sample_weight` or `validation_split` are provided by user. """ if y is not None: raise ValueError('You passed a dataset or dataset iterator (%s) as ' 'input `x` to your model. In that case, you should ' 'not specify a target (`y`) argument, since the dataset ' 'or dataset iterator generates both input data and ' 'target data. ' 'Received: %s' % (x, y)) if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when input ' '`x` is a dataset or a dataset iterator. Instead, you' 'can provide sample_weight as the third element of your' 'dataset, i.e. (inputs, targets, sample_weight). ' 'Received: x=%s, sample_weight=%s' % (x, sample_weight)) if validation_split is not None and validation_split != 0.0: raise ValueError( '`validation_split` argument is not supported when ' 'input `x` is a dataset or a dataset iterator. ' 'Received: x=%s, validation_split=%f' % (x, validation_split)) def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'): """Helper function to validate either inputs or targets.""" if isinstance(inp, (list, tuple)): if not all(isinstance(v, np.ndarray) or tf.is_tensor(v) for v in inp): raise ValueError( 'Please provide as model inputs either a single array or a list of ' 'arrays. You passed: {}={}'.format(field_name, str(orig_inp))) elif isinstance(inp, dict): if not allow_dict: raise ValueError( 'You cannot pass a dictionary as model {}.'.format(field_name)) elif not isinstance(inp, np.ndarray) and not tf.is_tensor(inp): raise ValueError( 'Please provide as model inputs either a single array or a list of ' 'arrays. You passed: {}={}'.format(field_name, orig_inp)) def check_generator_arguments(y=None, sample_weight=None, validation_split=None): """Validates arguments passed when using a generator.""" if y is not None: raise ValueError('`y` argument is not supported when data is' 'a generator or Sequence instance. Instead pass targets' ' as the second element of the generator.') if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when data is' 'a generator or Sequence instance. Instead pass sample' ' weights as the third element of the generator.') if validation_split: raise ValueError('If your data is in the form of a Python generator, ' 'you cannot use `validation_split`.') def check_steps_argument(input_data, steps, steps_name): """Validates `steps` argument based on input data's type. The cases when `steps` value must be provided are when 1. input data passed is an iterator. 2. model was built on top of symbolic tensors, input data is not required and is `None`. 3. input data passed is a symbolic tensor. Args: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or `None`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. steps_name: The public API's parameter name for `steps`. Returns: boolean, True if `steps` argument is required, else False. Raises: ValueError: if `steps` argument is required for given input data type but not provided. """ is_x_iterator = isinstance( input_data, (tf.compat.v1.data.Iterator, tf.data.Iterator)) if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and not input_data)): if steps is None: input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors' raise ValueError('When using {input_type} as input to a model, you should' ' specify the `{steps_name}` argument.'.format( input_type=input_type_str, steps_name=steps_name)) return True if isinstance(input_data, (tf.compat.v1.data.Dataset, tf.data.Dataset)): return True if steps is not None: list_types = (np.ndarray, list, tuple) if (isinstance(input_data, list_types) or (isinstance(input_data, dict) and any(isinstance(v, list_types) for v in input_data.values()))): logging.warning('When passing input data as arrays, do not specify ' '`steps_per_epoch`/`steps` argument. ' 'Please use `batch_size` instead.') return False def cast_single_tensor(x, dtype=None): if isinstance(x, np.ndarray): x = tf.convert_to_tensor(x) dtype = dtype or backend.floatx() if x.dtype.is_floating: return tf.cast(x, dtype=dtype) return x def cast_if_floating_dtype_and_mismatch(targets, outputs): """Returns target data tensors using correct datatype. Checks that each target and output pair are the same datatype. If not, casts the target to the output's datatype. Args: targets: tensor or list of targets. outputs: tensor or list of outputs. Returns: Targets in appropriate datatype. """ if tf.is_tensor(targets): # There is one target, so output[0] should be the only output. return cast_single_tensor(targets, dtype=outputs[0].dtype) new_targets = [] for target, out in zip(targets, outputs): if isinstance(target, np.ndarray): target = tf.convert_to_tensor(target) if target.dtype != out.dtype: new_targets.append(cast_single_tensor(target, dtype=out.dtype)) else: new_targets.append(target) return new_targets def cast_if_floating_dtype(x, dtype=None): """Casts the given data tensors to the default floating point type. Casts only if the input is already a floating point type. Args: x: tensor or list/tuple of tensors. dtype: The dtype to which Tensors should be cast. Returns: Converted input. """ return tf.nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype), x) def cast_to_model_input_dtypes(x, model): """Casts the given data tensors to the dtypes of the model inputs. Args: x: tensor or list/tuple of tensors. model: The model. Returns: Converted input. Each tensor is casted to the corresponding input in `model.inputs`. """ input_dtypes = tf.nest.map_structure(lambda t: t.dtype, model.inputs) return tf.nest.map_structure(tf.cast, x, input_dtypes) def prepare_sample_weight_modes(training_endpoints, sample_weight_mode): """Prepares sample weight modes for the model. Args: training_endpoints: List of model _TrainingEndpoints. sample_weight_mode: sample weight mode user input passed from compile API. Raises: ValueError: In case of invalid `sample_weight_mode` input. """ if isinstance(sample_weight_mode, collections.abc.Mapping): generic_utils.check_for_unexpected_keys( 'sample_weight_mode', sample_weight_mode, [e.output_name for e in training_endpoints]) for end_point in training_endpoints: if not end_point.should_skip_target_weights(): if end_point.output_name not in sample_weight_mode: raise ValueError('Output ' + end_point.output_name + 'missing from `_sample_weight_modes` dictionary') else: end_point.sample_weight_mode = sample_weight_mode.get( end_point.output_name) elif isinstance(sample_weight_mode, (list, tuple)): if len(sample_weight_mode) != len(training_endpoints): raise ValueError('When passing a list as sample_weight_mode, ' 'it should have one entry per model output. ' 'The model has ' + str(len(training_endpoints)) + ' outputs, but you passed ' + str(len(sample_weight_mode)) + '_sample_weight_modes.') for mode, endpoint in zip(sample_weight_mode, training_endpoints): if not endpoint.should_skip_target_weights(): endpoint.sample_weight_mode = mode else: for endpoint in training_endpoints: if not endpoint.should_skip_target_weights(): endpoint.sample_weight_mode = sample_weight_mode def prepare_loss_functions(loss, output_names): """Converts loss to a list of loss functions. Args: loss: String (name of objective function), objective function or `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. output_names: List of model output names. Returns: A list of loss objective functions. Raises: ValueError: If loss is a dict with keys not in model output names, or if loss is a list with len not equal to model outputs. """ if isinstance(loss, collections.abc.Mapping): generic_utils.check_for_unexpected_keys('loss', loss, output_names) loss_functions = [] for name in output_names: if name not in loss: logging.warning( 'Output {0} missing from loss dictionary. We assume ' 'this was done on purpose. The fit and evaluate APIs will not be ' 'expecting any data to be passed to {0}.'.format(name)) loss_functions.append(get_loss_function(loss.get(name, None))) elif isinstance(loss, str): loss_functions = [get_loss_function(loss) for _ in output_names] elif isinstance(loss, collections.abc.Sequence): if len(loss) != len(output_names): raise ValueError('When passing a list as loss, it should have one entry ' 'per model outputs. The model has {} outputs, but you ' 'passed loss={}'.format(len(output_names), loss)) loss_functions = tf.nest.map_structure(get_loss_function, loss) else: loss_functions = [get_loss_function(loss) for _ in range(len(output_names))] return loss_functions def prepare_loss_weights(training_endpoints, loss_weights=None): """Converts loss weights to a list of loss weights. The result loss weights will be populated on the training endpoint. Args: training_endpoints: List of model training endpoints. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a dict, it is expected to map output names (strings) to scalar coefficients. Raises: ValueError: If loss weight is a dict with key not in model output names, or if loss is a list with len not equal to model outputs. """ if loss_weights is None: for e in training_endpoints: e.loss_weight = 1. elif isinstance(loss_weights, collections.abc.Mapping): generic_utils.check_for_unexpected_keys( 'loss_weights', loss_weights, [e.output_name for e in training_endpoints]) for e in training_endpoints: e.loss_weight = loss_weights.get(e.output_name, 1.) elif isinstance(loss_weights, list): if len(loss_weights) != len(training_endpoints): raise ValueError('When passing a list as loss_weights, ' 'it should have one entry per model output. ' 'The model has ' + str(len(training_endpoints)) + ' outputs, but you passed loss_weights=' + str(loss_weights)) for w, e in zip(loss_weights, training_endpoints): e.loss_weight = w else: raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.') # TODO(rohanj): This is a hack to get around not depending on feature_column and # create a cyclical dependency. Figure out a cleaner solution def is_feature_layer(layer): """Returns whether `layer` is a FeatureLayer or not.""" return getattr(layer, '_is_feature_layer', False) def is_eager_dataset_or_iterator(data): return tf.executing_eagerly() and isinstance( data, (tf.compat.v1.data.Dataset, tf.data.Dataset, tf.data.Iterator)) # pylint: disable=protected-access def get_dataset_graph_def(dataset): if tf.executing_eagerly(): graph_def_str = dataset._as_serialized_graph().numpy() else: graph_def_str = backend.get_value(dataset._as_serialized_graph()) return tf.compat.v1.GraphDef().FromString(graph_def_str) def verify_dataset_shuffled(x): """Verifies that the dataset is shuffled. Args: x: Dataset passed as an input to the model. Returns: boolean, whether the input dataset is shuffled or not. """ assert isinstance(x, tf.data.Dataset) graph_def = get_dataset_graph_def(x) for node in graph_def.node: if node.op.startswith('ShuffleDataset'): return True # Also check graph_def.library.function for ds.interleave or ds.flat_map for function in graph_def.library.function: for node in function.node_def: if node.op.startswith('ShuffleDataset'): return True logging.warning('Expected a shuffled dataset but input dataset `x` is ' 'not shuffled. Please invoke `shuffle()` on input dataset.') return False def is_dataset_or_iterator(data): return isinstance(data, (tf.compat.v1.data.Dataset, tf.data.Dataset, tf.compat.v1.data.Iterator, tf.data.Iterator)) def get_iterator(dataset): """Create and initialize an iterator from a dataset.""" if tf.executing_eagerly(): iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) else: iterator = tf.compat.v1.data.make_initializable_iterator(dataset) initialize_iterator(iterator) return iterator def initialize_iterator(iterator): if not tf.executing_eagerly(): init_op = iterator.initializer backend.get_session((init_op,)).run(init_op) def extract_tensors_from_dataset(dataset): """Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. """ iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight def unpack_iterator_input(iterator): """Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Args: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. """ try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError('Your dataset iterator ran out of data; ' 'Make sure that your dataset can generate ' 'required number of samples.') if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( 'Please provide model inputs as a list or tuple of 2 or 3 ' 'elements: (input, target) or (input, target, sample_weights) ' 'Received %s' % next_element) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights def infer_steps_for_dataset(model, dataset, steps, epochs=1, steps_name='steps'): """Infers steps_per_epoch needed to loop through a dataset. Args: model: Keras model instance. dataset: Input data of type tf.data.Dataset. steps: Number of steps to draw from the dataset (may be None if unknown). epochs: Number of times to iterate over the dataset. steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. Returns: Integer or `None`. Inferred number of steps to loop through the dataset. `None` is returned if 1) the size of the dataset is unknown and `steps` was not specified, or 2) this is multi-worker training and auto sharding is enabled. Raises: ValueError: In case of invalid argument values. """ assert isinstance(dataset, tf.data.Dataset) if (model._in_multi_worker_mode() and (dataset.options().experimental_distribute.auto_shard_policy != tf.data.experimental.AutoShardPolicy.OFF)): # If the dataset would be auto-sharded, we should not infer a local # steps_per_epoch due to the possible imbalanced sharding between workers. return None size = backend.get_value(tf.data.experimental.cardinality(dataset)) if size == tf.data.experimental.INFINITE_CARDINALITY and steps is None: raise ValueError('When passing an infinitely repeating dataset, you ' 'must specify the `%s` argument.' % (steps_name,)) if size >= 0: if steps is not None and steps * epochs > size: if epochs > 1: raise ValueError('The dataset you passed contains %s batches, but you ' 'passed `epochs=%s` and `%s=%s`, which is a total of ' '%s steps. We cannot draw that many steps from this ' 'dataset. We suggest to set `%s=%s`.' % (size, epochs, steps_name, steps, steps * epochs, steps_name, size // epochs)) else: raise ValueError('The dataset you passed contains %s batches, but you ' 'passed `%s=%s`. We cannot draw that many steps from ' 'this dataset. We suggest to set `%s=%s`.' % (size, steps_name, steps, steps_name, size)) if steps is None: if size >= 0: return size return None return steps class ModelInputs: """Encapsulates model inputs. Allows for transforming model inputs while keeping the same structure. """ def __init__(self, inputs): self._inputs = inputs self._is_dict = isinstance(self._inputs, dict) self._is_single_input = not isinstance(self._inputs, (list, tuple, dict)) self._flattened_inputs = [] self._input_names = [] if self._is_dict: for k in sorted(self._inputs.keys()): self._flattened_inputs.append(self._inputs[k]) self._input_names.append(k) else: self._flattened_inputs = tf.nest.flatten(self._inputs) self._input_names = [ 'input_%d' % (i + 1) for i in range(len(self._flattened_inputs)) ] def get_input_names(self): """Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys. """ return self._input_names def get_symbolic_inputs(self, return_single_as_list=False): """Returns inputs to be set as self.inputs for a model.""" # TODO(karmel): There is a side-effect here where what you get # with as_list and as_dict depends on whether you have called this # method first, since it modifies in place. for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)): if isinstance(v, (list, float, int)): v = np.asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, np.ndarray): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + tuple(v.shape[1:]) if shape == (None,): shape = (None, 1) dtype = tf.as_dtype(v.dtype) if dtype.is_floating: dtype = backend.floatx() v = backend.placeholder(shape=shape, name=k, dtype=dtype) elif isinstance(v, tf.TensorSpec): shape = (None,) + tuple(v.shape.as_list()[1:]) if shape == (None,): shape = (None, 1) v = backend.placeholder(shape=shape, name=k, dtype=v.dtype) self._flattened_inputs[i] = v if self._is_dict: return dict(zip(self._input_names, self._flattened_inputs)) if self._is_single_input and not return_single_as_list: return self._flattened_inputs[0] return self._flattened_inputs def as_dict(self): """An iterable over a dictionary version of inputs.""" for k, v in zip(self._input_names, self._flattened_inputs): yield k, v def as_list(self): """Returning the inputs as a list.""" return self._flattened_inputs # Allow use of methods not exposed to the user. # pylint: disable=protected-access # pylint: enable=protected-access def generic_output_names(outputs_list): return ['output_%d' % (i + 1) for i in range(len(outputs_list))] def should_run_validation(validation_freq, epoch): """Checks if validation should be run this epoch. Args: validation_freq: Integer or list. If an integer, specifies how many training epochs to run before a new validation run is performed. If a list, specifies the epochs on which to run validation. epoch: Integer, the number of the training epoch just completed. Returns: Bool, True if validation should be run. Raises: ValueError: if `validation_freq` is an Integer and less than 1, or if it is neither an Integer nor a Sequence. """ # `epoch` is 0-indexed internally but 1-indexed in the public API. one_indexed_epoch = epoch + 1 if isinstance(validation_freq, int): if validation_freq < 1: raise ValueError('`validation_freq` can not be less than 1.') return one_indexed_epoch % validation_freq == 0 if not isinstance(validation_freq, collections.abc.Container): raise ValueError('`validation_freq` must be an Integer or ' '`collections.abc.Container` (e.g. list, tuple, etc.)') return one_indexed_epoch in validation_freq def split_training_and_validation_data(x, y, sample_weights, validation_split): """Split input data into train/eval section based on validation_split.""" if has_symbolic_tensors(x): raise ValueError('If your data is in the form of symbolic tensors, ' 'you cannot use `validation_split`.') if hasattr(x[0], 'shape'): split_at = int(x[0].shape[0] * (1. - validation_split)) else: split_at = int(len(x[0]) * (1. - validation_split)) x, val_x = (generic_utils.slice_arrays(x, 0, split_at), generic_utils.slice_arrays(x, split_at)) y, val_y = (generic_utils.slice_arrays(y, 0, split_at), generic_utils.slice_arrays(y, split_at)) if sample_weights: sample_weights, val_sample_weights = ( generic_utils.slice_arrays(sample_weights, 0, split_at), generic_utils.slice_arrays(sample_weights, split_at), ) else: val_sample_weights = None return x, y, sample_weights, val_x, val_y, val_sample_weights def unpack_validation_data(validation_data, raise_if_ambiguous=True): """Unpack validation data based input type. The validation data is not touched if its dataset or dataset iterator. For other type of input (Numpy or tensor), it will be unpacked into tuple of 3 which is x, y and sample weights. Args: validation_data: dataset, dataset iterator, or numpy, tensor tuple. raise_if_ambiguous: boolean on whether to fail if validation_data cannot be parsed. Otherwise simply return validation_data, None, None and defer the decision to the caller. Returns: tuple of 3, (x, y, sample_weights) for numpy and tensor input. """ if (isinstance(validation_data, (tf.compat.v1.data.Iterator, tf.data.Iterator, tf.data.Dataset, data_utils.Sequence)) or not hasattr(validation_data, '__len__')): val_x = validation_data val_y = None val_sample_weight = None elif len(validation_data) == 2: try: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None except ValueError: val_x, val_y, val_sample_weight = validation_data, None, None elif len(validation_data) == 3: try: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence except ValueError: val_x, val_y, val_sample_weight = validation_data, None, None else: if raise_if_ambiguous: raise ValueError( 'When passing a `validation_data` argument, ' 'it must contain either 2 items (x_val, y_val), ' 'or 3 items (x_val, y_val, val_sample_weights), ' 'or alternatively it could be a dataset or a ' 'dataset or a dataset iterator. ' 'However we received `validation_data=%s`' % validation_data) val_x, val_y, val_sample_weight = validation_data, None, None return val_x, val_y, val_sample_weight class TrainingLoop: """TrainingLoop is a wrapper class around the training logic. This class is trying to encapsulate the different logic of fit/eval/predict with regard to different data input and model condition. Note that TrainingLoop is stateless, which means it doesn't contain any internal field and can be reused with different model and inputs. """ def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): """Train the model with the inputs and targets.""" raise NotImplementedError() def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): """Returns the loss value & metrics values for the model in test mode.""" raise NotImplementedError() def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): raise NotImplementedError()
75,891
38.099433
97
py
keras
keras-master/keras/engine/base_layer_v1.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Contains the base Layer class, from which all layers inherit.""" import tensorflow.compat.v2 as tf import collections import functools import itertools import threading import warnings import numpy as np from keras import backend from keras import constraints from keras import initializers from keras import regularizers from keras.engine import base_layer from keras.engine import base_layer_utils from keras.engine import input_spec from keras.mixed_precision import autocast_variable from keras.mixed_precision import loss_scale_optimizer from keras.mixed_precision import policy from keras.saving.saved_model import layer_serialization from keras.utils import generic_utils from keras.utils import layer_utils from keras.utils import object_identity from keras.utils import tf_inspect from keras.utils import tf_utils # A module that only depends on `keras.layers` import these from here. from keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import from keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import from tensorflow.python.platform import tf_logging from tensorflow.tools.docs import doc_controls # pylint: disable=g-classes-have-attributes class Layer(base_layer.Layer): """Base layer class. This is the class from which all layers inherit. A layer is a class implementing common neural networks operations, such as convolution, batch norm, etc. These operations require managing weights, losses, updates, and inter-layer connectivity. Users will just instantiate a layer and then treat it as a callable. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Save configuration in member variables * `build()`: Called once from `__call__`, when we know the shapes of inputs and `dtype`. Should have the calls to `add_weight()`, and then call the super's `build()` (which sets `self.built = True`, which is nice in case the user wants to call `build()` manually before the first `__call__`). * `call()`: Called in `__call__` after making sure `build()` has been called once. Should actually perform the logic of applying the layer to the input tensors (which should be passed in as the first argument). Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type of the first input in TensorFlow 1). dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Attributes: name: The name of the layer (string). dtype: The dtype of the layer's computations and weights. If mixed precision is used with a `tf.keras.mixed_precision.Policy`, this is instead just the dtype of the layer's weights, as the computations are done in a different dtype. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. Each layer has a dtype, which is typically the dtype of the layer's computations and variables. A layer's dtype can be queried via the `Layer.dtype` property. The dtype is specified with the `dtype` constructor argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()` if no dtype is passed. `floatx()` itself defaults to "float32". Additionally, layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed precision is used, layers may have different computation and variable dtypes. See `tf.keras.mixed_precision.Policy` for details on layer dtypes. """ # See tf.Module for the usage of this property. # The key for _obj_reference_counts_dict is a Trackable, which could be a # variable or layer etc. tf.Module._flatten will fail to flatten the key # since it is trying to convert Trackable to a string. This attribute can be # ignored even after the fix of nest lib, since the trackable object should # already been available as individual attributes. _obj_reference_counts_dict # just contains a copy of them. _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain( ('_obj_reference_counts_dict',), tf.Module._TF_MODULE_IGNORED_PROPERTIES )) @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__(self, trainable=True, name=None, dtype=None, dynamic=False, **kwargs): self._instrument_layer_creation() # These properties should be set by the user via keyword arguments. # note that 'dtype', 'input_shape' and 'batch_input_shape' # are only applicable to input layers: do not pass these keywords # to non-input layers. allowed_kwargs = { 'input_dim', 'input_shape', 'batch_input_shape', 'batch_size', 'weights', 'activity_regularizer', 'autocast', 'implementation' } # Validate optional keyword arguments. generic_utils.validate_kwargs(kwargs, allowed_kwargs) # Mutable properties # Indicates whether the layer's weights are updated during training # and whether the layer's updates are run during training. self._trainable = trainable # A stateful layer is a layer whose updates are run during inference too, # for instance stateful RNNs. self._stateful = False # Indicates whether `build` needs to be called upon layer call, to create # the layer's weights. self.built = False self._build_input_shape = None # Provides information about which inputs are compatible with the layer. self._input_spec = None self.supports_masking = False self._init_set_name(name) self._activity_regularizer = regularizers.get( kwargs.pop('activity_regularizer', None)) self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) self._updates = [] # Object to store all thread local layer properties. self._thread_local = threading.local() # A list of zero-argument lambdas which return Tensors, used for variable # regularizers. self._callable_losses = [] # A list of symbolic Tensors containing activity regularizers and losses # manually added through `add_loss` in graph-building mode. self._losses = [] # A list of metric instances corresponding to the symbolic metric tensors # added using the `add_metric` API. self._metrics = [] # Both graph and subclassed networks have a dtype policy. For graph # networks, the policy's compute and variable dtypes are ignored. Such # networks only use the policy if it is a PolicyV1, in which case it uses # the PolicyV1's loss_scale (Policy does not have a loss_scale). For # subclassed networks, the compute and variable dtypes are used as like any # ordinary layer. self._set_dtype_policy(dtype) # Boolean indicating whether the layer automatically casts its inputs to the # layer's compute_dtype. self._autocast = kwargs.get('autocast', base_layer_utils.v2_dtype_behavior_enabled()) # Dependencies tracked via attribute assignment. # All layers in order of horizontal graph traversal. # Entries are unique. For models includes input and output layers. self._maybe_create_attribute('_self_tracked_trackables', []) # These lists will be filled via successive calls # to self._add_inbound_node(). # Used in symbolic mode only, only in conjunction with graph-networks self._inbound_nodes_value = [] self._outbound_nodes_value = [] self._init_call_fn_args() # Whether the `call` method can be used to build a TF graph without issues. # This attribute has no effect if the model is created using the Functional # API. Instead, `model.dynamic` is determined based on the internal layers. self._dynamic = dynamic # Manage input shape information if passed. if 'input_dim' in kwargs and 'input_shape' not in kwargs: # Backwards compatibility: alias 'input_dim' to 'input_shape'. kwargs['input_shape'] = (kwargs['input_dim'],) if 'input_shape' in kwargs or 'batch_input_shape' in kwargs: # In this case we will later create an input layer # to insert before the current layer if 'batch_input_shape' in kwargs: batch_input_shape = tuple(kwargs['batch_input_shape']) elif 'input_shape' in kwargs: if 'batch_size' in kwargs: batch_size = kwargs['batch_size'] else: batch_size = None batch_input_shape = (batch_size,) + tuple(kwargs['input_shape']) self._batch_input_shape = batch_input_shape # Manage initial weight values if passed. self._initial_weights = kwargs.get('weights', None) # Whether the layer will track any layers that is set as attribute on itself # as sub-layers, the weights from the sub-layers will be included in the # parent layer's variables() as well. # Default to True, which means auto tracking is turned on. Certain subclass # might want to turn it off, like Sequential model. self._auto_track_sub_layers = True # Mark this layer as having been originally built as a tf1 layer/model self._originally_built_as_v1 = True # For backwards compat reasons, most built-in layers do not guarantee # That they will 100% preserve the structure of input args when saving # / loading configs. E.g. they may un-nest an arg that is # a list with one element. self._preserve_input_structure_in_config = False @tf.__internal__.tracking.no_automatic_dependency_tracking @generic_utils.default def build(self, input_shape): """Creates the variables of the layer (optional, for subclass implementers). This is a method that implementers of subclasses of `Layer` or `Model` can override if they need a state-creation step in-between layer instantiation and layer call. This is typically used to create the weights of `Layer` subclasses. Args: input_shape: Instance of `TensorShape`, or list of instances of `TensorShape` if the layer expects a list of inputs (one instance per input). """ if not hasattr(self.build, '_is_default'): self._build_input_shape = input_shape self.built = True @doc_controls.for_subclass_implementers def call(self, inputs, **kwargs): # pylint: disable=unused-argument """This is where the layer's logic lives. Args: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Returns: A tensor or list/tuple of tensors. """ return inputs @doc_controls.for_subclass_implementers def _add_trackable(self, trackable_object, trainable): """Adds a Trackable object to this layer's state. Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean and variance). Returns: The TrackableWeightHandler used to track this object. """ if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler): handler = trackable_object else: handler = base_layer_utils.TrackableWeightHandler(trackable_object) if trainable: self._trainable_weights.append(handler) else: self._non_trainable_weights.append(handler) return handler @doc_controls.for_subclass_implementers def add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, partitioner=None, use_resource=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE, **kwargs): """Adds a new variable to the layer. Args: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: Initializer instance (callable). regularizer: Regularizer instance (callable). trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean and variance). Note that `trainable` cannot be `True` if `synchronization` is set to `ON_READ`. constraint: Constraint instance (callable). partitioner: Partitioner to be passed to the `Trackable` API. use_resource: Whether to use `ResourceVariable`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. **kwargs: Additional keyword arguments. Accepted values are `getter`, `collections`, `experimental_autocast` and `caching_device`. Returns: The created variable. Usually either a `Variable` or `ResourceVariable` instance. If `partitioner` is not `None`, a `PartitionedVariable` instance is returned. Raises: RuntimeError: If called with partitioned variable regularization and eager execution is enabled. ValueError: When giving unsupported dtype and no initializer or when trainable has been set to True with synchronization set as `ON_READ`. """ if shape is None: shape = () # Validate optional keyword arguments. for kwarg in kwargs: if kwarg not in ['getter', 'collections', 'experimental_autocast', 'caching_device']: raise TypeError('Unknown keyword argument:', kwarg) has_custom_getter = 'getter' in kwargs getter = kwargs.pop('getter', base_layer_utils.make_variable) collections_arg = kwargs.pop('collections', None) # 'experimental_autocast' can be set to False by the caller to indicate an # AutoCastVariable should never be created. autocast = kwargs.pop('experimental_autocast', True) # See the docstring for tf.Variable about the details for caching_device. caching_device = kwargs.pop('caching_device', None) if dtype is None: dtype = self.dtype or backend.floatx() dtype = tf.as_dtype(dtype) if self._dtype_policy.variable_dtype is None: # The policy is "_infer", so we infer the policy from the variable dtype. self._set_dtype_policy(policy.Policy(dtype.base_dtype.name)) initializer = initializers.get(initializer) regularizer = regularizers.get(regularizer) constraint = constraints.get(constraint) if synchronization == tf.VariableSynchronization.ON_READ: if trainable: raise ValueError( 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.') else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True # Initialize variable when no initializer provided if initializer is None: # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = initializers.get('glorot_uniform') # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = tf.compat.v1.zeros_initializer() # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? elif not has_custom_getter: # When `getter` is specified, it's possibly fine for `initializer` to be # None since it's up to the custom `getter` to raise error in case it # indeed needs `initializer`. raise ValueError('An initializer for variable %s of type %s is required' ' for layer %s' % (name, dtype.base_dtype, self.name)) if (autocast and self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype and dtype.is_floating): # Wrap 'getter' with a version that returns an AutoCastVariable. old_getter = getter def getter(*args, **kwargs): # pylint: disable=function-redefined variable = old_getter(*args, **kwargs) return autocast_variable.create_autocast_variable(variable) # Also the caching_device does not work with the mixed precision API, # disable it if it is specified. # TODO(b/142020079): Reenable it once the bug is fixed. if caching_device is not None: tf_logging.warning( '`caching_device` does not work with mixed precision API. Ignoring ' 'user specified `caching_device`.') caching_device = None variable = self._add_variable_with_custom_getter( name=name, shape=shape, # TODO(allenl): a `make_variable` equivalent should be added as a # `Trackable` method. getter=getter, # Manage errors in Layer rather than Trackable. overwrite=True, initializer=initializer, dtype=dtype, constraint=constraint, trainable=trainable, partitioner=partitioner, use_resource=use_resource, collections=collections_arg, synchronization=synchronization, aggregation=aggregation, caching_device=caching_device) if regularizer is not None: # TODO(fchollet): in the future, this should be handled at the # level of variable creation, and weight regularization losses # should be variable attributes. name_in_scope = variable.name[:variable.name.find(':')] self._handle_weight_regularization(name_in_scope, variable, regularizer) if base_layer_utils.is_split_variable(variable): for v in variable: backend.track_variable(v) if trainable: self._trainable_weights.append(v) else: self._non_trainable_weights.append(v) else: backend.track_variable(variable) if trainable: self._trainable_weights.append(variable) else: self._non_trainable_weights.append(variable) return variable @generic_utils.default def get_config(self): """Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by `Network` (one layer of abstraction above). Returns: Python dictionary. """ all_args = tf_inspect.getfullargspec(self.__init__).args config = {'name': self.name, 'trainable': self.trainable} if hasattr(self, '_batch_input_shape'): config['batch_input_shape'] = self._batch_input_shape config['dtype'] = policy.serialize(self._dtype_policy) if hasattr(self, 'dynamic'): # Only include `dynamic` in the `config` if it is `True` if self.dynamic: config['dynamic'] = self.dynamic elif 'dynamic' in all_args: all_args.remove('dynamic') expected_args = config.keys() # Finds all arguments in the `__init__` that are not in the config: extra_args = [arg for arg in all_args if arg not in expected_args] # Check that either the only argument in the `__init__` is `self`, # or that `get_config` has been overridden: if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'): raise NotImplementedError('Layers with arguments in `__init__` must ' 'override `get_config`.') return config @classmethod def from_config(cls, config): """Creates a layer from its config. This method is the reverse of `get_config`, capable of instantiating the same layer from the config dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by `set_weights`). Args: config: A Python dictionary, typically the output of get_config. Returns: A layer instance. """ return cls(**config) def compute_output_shape(self, input_shape): """Computes the output shape of the layer. If the layer has not been built, this method will call `build` on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: An input shape tuple. """ if tf.executing_eagerly(): # In this case we build the model first in order to do shape inference. # This is acceptable because the framework only calls # `compute_output_shape` on shape values that the layer would later be # built for. It would however cause issues in case a user attempts to # use `compute_output_shape` manually with shapes that are incompatible # with the shape the Layer will be called on (these users will have to # implement `compute_output_shape` themselves). self._maybe_build(input_shape) with tf.compat.v1.get_default_graph().as_default(): graph = tf.__internal__.FuncGraph('graph') with graph.as_default(): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) inputs = tf.nest.map_structure( base_layer_utils.generate_placeholders_from_shape, input_shape) try: outputs = self(inputs, training=False) except TypeError as e: raise NotImplementedError( 'We could not automatically infer the static shape of the ' 'layer\'s output. Please implement the ' '`compute_output_shape` method on your layer (%s).' % self.__class__.__name__) from e return tf.nest.map_structure(lambda t: t.shape, outputs) raise NotImplementedError @doc_controls.for_subclass_implementers def compute_output_signature(self, input_signature): """Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use `compute_output_shape`, and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object. """ def check_type_return_shape(s): if not isinstance(s, tf.TensorSpec): raise TypeError('Only TensorSpec signature types are supported, ' 'but saw signature entry: {}.'.format(s)) return s.shape input_shape = tf.nest.map_structure(check_type_return_shape, input_signature) output_shape = self.compute_output_shape(input_shape) dtype = self._compute_dtype if dtype is None: input_dtypes = [s.dtype for s in tf.nest.flatten(input_signature)] # Default behavior when self.dtype is None, is to use the first input's # dtype. dtype = input_dtypes[0] return tf.nest.map_structure( lambda s: tf.TensorSpec(dtype=dtype, shape=s), output_shape) @generic_utils.default def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument """Computes an output mask tensor. Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer). """ if not self.supports_masking: if any(m is not None for m in tf.nest.flatten(mask)): raise TypeError('Layer ' + self.name + ' does not support masking, ' 'but was passed an input_mask: ' + str(mask)) # masking not explicitly supported: return None as mask. return None # if masking is explicitly supported, by default # carry over the input mask return mask def __call__(self, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. Args: *args: Positional arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`. Returns: Output tensor(s). Note: - The following optional keyword arguments are reserved for specific uses: * `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. * `mask`: Boolean input mask. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support. Raises: ValueError: if the layer's `call` method returns None (an invalid value). RuntimeError: if `super().__init__()` was not called in the constructor. """ self._assert_built_as_v1() if not hasattr(self, '_thread_local'): raise RuntimeError( 'You must call `super().__init__()` in the layer constructor.') # Grab the first positional or keyword argument. if args: inputs = args[0] args = args[1:] elif self._call_fn_args[0] in kwargs: inputs = kwargs.pop(self._call_fn_args[0]) else: raise ValueError( 'The first argument to `Layer.call` must always be passed.') call_context = base_layer_utils.call_context() input_list = tf.nest.flatten(inputs) # We will attempt to build a TF graph if & only if all inputs are symbolic. # This is always the case in graph mode. It can also be the case in eager # mode when all inputs can be traced back to `keras.Input()` (when building # models using the functional API). build_graph = tf_utils.are_all_symbolic_tensors(input_list) # Accept NumPy and scalar inputs by converting to Tensors. if any(isinstance(x, (np.ndarray, float, int)) for x in input_list): def _convert_non_tensor(x): # Don't call `ops.convert_to_tensor` on all `inputs` because # `SparseTensors` can't be converted to `Tensor`. if isinstance(x, (np.ndarray, float, int)): return tf.convert_to_tensor(x) return x inputs = tf.nest.map_structure(_convert_non_tensor, inputs) input_list = tf.nest.flatten(inputs) # Handle `mask` propagation from previous layer to current layer. Masks can # be propagated explicitly via the `mask` argument, or implicitly via # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed # explicitly take priority. mask_arg_passed_by_framework = False input_masks = self._collect_input_masks(inputs, args, kwargs) if (self._expects_mask_arg and input_masks is not None and not self._call_arg_was_passed('mask', args, kwargs)): mask_arg_passed_by_framework = True kwargs['mask'] = input_masks # If `training` argument is None or not explicitly passed, # propagate `training` value from this layer's calling layer. training_value = None training_arg_passed_by_framework = False # Priority 1: `training` was explicitly passed. if self._call_arg_was_passed('training', args, kwargs): training_value = self._get_call_arg_value('training', args, kwargs) if not self._expects_training_arg: kwargs.pop('training') if training_value is None: # Priority 2: `training` was passed to a parent layer. if call_context.training is not None: training_value = call_context.training # Priority 3a: `learning_phase()` has been set. elif backend.global_learning_phase_is_set(): training_value = backend.learning_phase() # Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph. elif build_graph: with backend.get_graph().as_default(): if base_layer_utils.is_in_keras_graph(): training_value = backend.learning_phase() if self._expects_training_arg and training_value is not None: # Force the training_value to be bool type which matches to the contract # for layer/model call args. if tf.is_tensor(training_value): training_value = tf.cast(training_value, tf.bool) else: training_value = bool(training_value) args, kwargs = self._set_call_arg_value( 'training', training_value, args, kwargs) training_arg_passed_by_framework = True # Only create Keras history if at least one tensor originates from a # `keras.Input`. Otherwise this Layer may be being used outside the Keras # framework. if build_graph and base_layer_utils.needs_keras_history(inputs): base_layer_utils.create_keras_history(inputs) with call_context.enter(self, inputs, build_graph, training_value): # Check input assumptions set after layer building, e.g. input shape. if build_graph: # Symbolic execution on symbolic tensors. We will attempt to build # the corresponding TF subgraph inside `backend.get_graph()` input_spec.assert_input_compatibility(self.input_spec, inputs, self.name) graph = backend.get_graph() with graph.as_default(), backend.name_scope(self._name_scope()): # pylint: disable=not-callable # Build layer if applicable (if the `build` method has been # overridden). self._maybe_build(inputs) cast_inputs = self._maybe_cast_inputs(inputs) # Wrapping `call` function in autograph to allow for dynamic control # flow and control dependencies in call. We are limiting this to # subclassed layers as autograph is strictly needed only for # subclassed layers and models. # tf_convert will respect the value of autograph setting in the # enclosing tf.function, if any. if (base_layer_utils.is_subclassed(self) and not base_layer_utils.from_saved_model(self)): call_fn = tf.__internal__.autograph.tf_convert( self.call, tf.__internal__.autograph.control_status_ctx()) else: call_fn = self.call if not self.dynamic: try: with autocast_variable.enable_auto_cast_variables( self._compute_dtype_object): outputs = call_fn(cast_inputs, *args, **kwargs) except tf.errors.OperatorNotAllowedInGraphError as e: raise TypeError('You are attempting to use Python control ' 'flow in a layer that was not declared to be ' 'dynamic. Pass `dynamic=True` to the class ' 'constructor.\nEncountered error:\n"""\n' + str(e) + '\n"""') else: # We will use static shape inference to return symbolic tensors # matching the specifications of the layer outputs. # Since `self.dynamic` is True, we will never attempt to # run the underlying TF graph (which is disconnected). # TODO(fchollet): consider py_func as an alternative, which # would enable us to run the underlying graph if needed. outputs = self._symbolic_call(inputs) if outputs is None: raise ValueError('A layer\'s `call` method should return a ' 'Tensor or a list of Tensors, not None ' '(layer: ' + self.name + ').') if base_layer_utils.have_all_keras_metadata(inputs): if training_arg_passed_by_framework: args, kwargs = self._set_call_arg_value( 'training', None, args, kwargs, pop_kwarg_if_none=True) if mask_arg_passed_by_framework: kwargs.pop('mask') outputs = self._set_connectivity_metadata((inputs,) + args, kwargs, outputs) self._handle_activity_regularization(inputs, outputs) self._set_mask_metadata(inputs, outputs, input_masks) if hasattr(self, '_set_inputs') and not self.inputs: # Subclassed network: explicitly set metadata normally set by # a call to self._set_inputs(). # TODO(b/120997007): This should be done in Eager as well, but # causes garbage collection issues because of the placeholders # created on the default Keras graph. self._set_save_spec(inputs, args, kwargs) self._set_inputs(inputs, outputs) else: # Eager execution on data tensors. with backend.name_scope(self._name_scope()): # pylint: disable=not-callable self._maybe_build(inputs) cast_inputs = self._maybe_cast_inputs(inputs) with autocast_variable.enable_auto_cast_variables( self._compute_dtype_object): outputs = self.call(cast_inputs, *args, **kwargs) self._handle_activity_regularization(inputs, outputs) self._set_mask_metadata(inputs, outputs, input_masks) return outputs def _assert_built_as_v1(self): if not hasattr(self, '_originally_built_as_v1'): raise ValueError( 'Your Layer or Model is in an invalid state. ' 'This can happen for the following cases:\n ' '1. You might be interleaving estimator/non-estimator models or ' 'interleaving models/layers made in tf.compat.v1.Graph.as_default() ' 'with models/layers created outside of it. ' 'Converting a model to an estimator (via model_to_estimator) ' 'invalidates all models/layers made before the conversion (even ' 'if they were not the model converted to an estimator). ' 'Similarly, making a layer or a model inside a ' 'a tf.compat.v1.Graph invalidates all layers/models you previously ' 'made outside of the graph.\n' '2. You might be using a custom keras layer implementation with ' ' custom __init__ which didn\'t call super().__init__. ' ' Please check the implementation of %s and its bases.' % (type(self),)) @property def dtype(self): return self._dtype_policy.variable_dtype @property def name(self): return self._name @property def dynamic(self): return any(layer._dynamic for layer in self._flatten_layers()) @property @doc_controls.do_not_generate_docs def stateful(self): return any(layer._stateful for layer in self._flatten_layers()) @stateful.setter def stateful(self, value): self._stateful = value @property def trainable(self): return self._trainable @trainable.setter def trainable(self, value): self._trainable = value for layer in getattr(self, '_self_tracked_trackables', []): layer.trainable = value @property def activity_regularizer(self): """Optional regularizer function for the output of this layer.""" return self._activity_regularizer @activity_regularizer.setter def activity_regularizer(self, regularizer): """Optional regularizer function for the output of this layer.""" self._activity_regularizer = regularizer @property def input_spec(self): return self._input_spec @input_spec.setter # Must be decorated to prevent tracking, since the input_spec can be nested # InputSpec objects. @tf.__internal__.tracking.no_automatic_dependency_tracking def input_spec(self, value): for v in tf.nest.flatten(value): if v is not None and not isinstance(v, base_layer.InputSpec): raise TypeError('Layer input_spec must be an instance of InputSpec. ' 'Got: {}'.format(v)) self._input_spec = value @property def updates(self): collected_updates = [] all_layers = self._flatten_layers() with backend.get_graph().as_default(): for layer in all_layers: if not layer.trainable and not layer.stateful: continue for u in layer._updates: if callable(u): try: u = u() except ValueError as e: if 'InaccessibleTensorError' in type(e).__name__: # For one specific case of error we try to raise # a more meaningful error message about the graph if we can. # This error is an internal TF symbol that is not # publicly exposed, so we check the name directly rather # than using a direct import. base_layer_utils.check_graph_consistency( method='add_update', force_raise=True) raise # check_graph_consistency may not always raise. base_layer_utils.check_graph_consistency(u, method='add_update') collected_updates.append(u) return collected_updates @property def losses(self): """Losses which are associated with this `Layer`. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Returns: A list of tensors. """ collected_losses = [] all_layers = self._flatten_layers() for layer in all_layers: # If any eager losses are present, we assume the model to be part of an # eager training loop (either a custom one or the one used when # `run_eagerly=True`) and so we always return just the eager losses. collected_losses.extend(layer._losses) for regularizer in layer._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses @doc_controls.for_subclass_implementers def add_loss(self, losses, inputs=None): """Add loss tensor(s), potentially dependent on layer inputs. Some losses (for instance, activity regularization losses) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.losses` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. This method can be used inside a subclassed layer or model's `call` function, in which case `losses` should be a Tensor or list of Tensors. Example: ```python class MyLayer(tf.keras.layers.Layer): def call(inputs, self): self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True) return inputs ``` This method can also be called directly on a Functional Model during construction. In this case, any loss Tensors passed to this Model must be symbolic and be able to be traced back to the model's `Input`s. These losses become part of the model's topology and are tracked in `get_config`. Example: ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) # Activity regularization. model.add_loss(tf.abs(tf.reduce_mean(x))) ``` If this is not the case for your loss (if, for example, your loss references a `Variable` of one of the model's layers), you can wrap your loss in a zero-argument lambda. These losses are not tracked as part of the model's topology since they can't be serialized. Example: ```python inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) # Weight regularization. model.add_loss(lambda: tf.reduce_mean(x.kernel)) ``` The `get_losses_for` method allows to retrieve the losses relevant to a specific set of inputs. Args: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. inputs: Ignored when executing eagerly. If anything other than None is passed, it signals the losses are conditional on some of the layer's inputs, and thus they should only be run where these inputs are available. This is the case for activity regularization losses, for instance. If `None` is passed, the losses are assumed to be unconditional, and will apply across all dataflows of the layer (e.g. weight regularization losses). """ def _tag_unconditional(loss): """Process the loss and tag it by setting loss._unconditional_loss.""" if callable(loss): # We run the loss without autocasting, as regularizers are often # numerically unstable in float16. with autocast_variable.enable_auto_cast_variables(None): loss = loss() if loss is None: return None # Will be filtered out when computing the .losses property if not tf.is_tensor(loss): loss = tf.convert_to_tensor( loss, dtype=backend.floatx()) loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access return loss losses = tf.nest.flatten(losses) callable_losses = [] symbolic_losses = [] for loss in losses: if callable(loss): callable_losses.append(functools.partial(_tag_unconditional, loss)) continue if loss is None: continue if not tf.is_tensor(loss): loss = tf.convert_to_tensor( loss, dtype=backend.floatx()) # TF Functions should take the eager path. if (tf_utils.is_symbolic_tensor(loss) and not base_layer_utils.is_in_tf_function()): symbolic_losses.append(_tag_unconditional(loss)) base_layer_utils.check_graph_consistency(loss, method='add_loss') self._callable_losses.extend(callable_losses) in_call_context = base_layer_utils.call_context().in_call if in_call_context: for symbolic_loss in symbolic_losses: self._losses.append(symbolic_loss) else: for symbolic_loss in symbolic_losses: if getattr(self, '_is_graph_network', False): self._graph_network_add_loss(symbolic_loss) else: # Possible a loss was added in a Layer's `build`. self._losses.append(symbolic_loss) @property def metrics(self): collected_metrics = [] for layer in self._flatten_layers(): collected_metrics.extend(layer._metrics) return collected_metrics @doc_controls.for_subclass_implementers def add_metric(self, value, aggregation=None, name=None): """Adds metric tensor to the layer. Args: value: Metric tensor. aggregation: Sample-wise metric reduction function. If `aggregation=None`, it indicates that the metric tensor provided has been aggregated already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the given metric tensor will be sample-wise reduced using `mean` function. eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean', aggregation='mean')`. name: String metric name. Raises: ValueError: If `aggregation` is anything other than None or `mean`. """ if aggregation is not None and aggregation != 'mean': raise ValueError( 'We currently support only `mean` sample-wise metric aggregation. ' 'You provided aggregation=`%s`' % aggregation) from_metric_obj = hasattr(value, '_metric_obj') is_symbolic = tf_utils.is_symbolic_tensor(value) in_call_context = base_layer_utils.call_context().in_call if name is None and not from_metric_obj: # Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')` # In eager mode, we use metric name to lookup a metric. Without a name, # a new Mean metric wrapper will be created on every model/layer call. # So, we raise an error when no name is provided. # We will do the same for symbolic mode for consistency although a name # will be generated if no name is provided. # We will not raise this error in the foll use case for the sake of # consistency as name in provided in the metric constructor. # mean = metrics.Mean(name='my_metric') # model.add_metric(mean(outputs)) raise ValueError('Please provide a name for your metric like ' '`self.add_metric(tf.reduce_sum(inputs), ' 'name=\'mean_activation\', aggregation=\'mean\')`') elif from_metric_obj: name = value._metric_obj.name if in_call_context: # TF Function path should take the eager path. self._symbolic_add_metric(value, aggregation, name) else: if not is_symbolic: raise ValueError('Expected a symbolic Tensor for the metric value, ' 'received: ' + str(value)) # Possible a metric was added in a Layer's `build`. if not getattr(self, '_is_graph_network', False): with backend.get_graph().as_default(): self._symbolic_add_metric(value, aggregation, name) return if from_metric_obj: raise ValueError('Using the result of calling a `Metric` object ' 'when calling `add_metric` on a Functional ' 'Model is not supported. Please pass the ' 'Tensor to monitor directly.') # Insert layers into the Keras Graph Network. self._graph_network_add_metric(value, aggregation, name) @doc_controls.for_subclass_implementers def add_update(self, updates, inputs=None): """Add update op(s), potentially dependent on layer inputs. Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs `a` and `b`, some entries in `layer.updates` may be dependent on `a` and some on `b`. This method automatically keeps track of dependencies. The `get_updates_for` method allows to retrieve the updates relevant to a specific set of inputs. This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution). Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting `trainable=False` on this Layer, when executing in Eager mode. inputs: Deprecated, will be automatically inferred. """ if inputs is not None: tf_logging.warning( '`add_update` `inputs` kwarg has been deprecated. You no longer need ' 'to pass a value to `inputs` as it is being automatically inferred.') call_context = base_layer_utils.call_context() if (tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context() and # When saving the model, the distribution strategy context should be # ignored, following the default path for adding updates. not call_context.saving): # Updates don't need to be run in a cross-replica context. return updates = generic_utils.to_list(updates) if call_context.in_call: relevant_inputs = call_context.inputs else: inbound_nodes = getattr(self, '_inbound_nodes', []) relevant_inputs = [node.input_tensors for node in inbound_nodes] def process_update(x): """Standardize update ops. Args: x: Tensor, op, or callable. Returns: An update op. """ if callable(x): update = lambda: process_update(x()) return update() elif isinstance(x, tf.Operation): update = x elif hasattr(x, 'op'): update = x.op else: update = tf.convert_to_tensor(x) reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update]) update._unconditional_update = update not in reachable return update updates = [process_update(x) for x in updates] self._updates.extend(updates) def set_weights(self, weights): """Sets the weights of the layer, from Numpy arrays. The weights of a layer represent the state of the layer. This function sets the weight values from numpy arrays. The weight values should be passed in the order they are created by the layer. Note that the layer's weights must be instantiated before calling this function by calling the layer. For example, a Dense layer returns a list of two values-- per-output weights and the bias value. These can be used to set the weights of another Dense layer: >>> a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]])) >>> a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] >>> b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]])) >>> b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] >>> b.set_weights(a.get_weights()) >>> b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the layer (i.e. it should match the output of `get_weights`). Raises: ValueError: If the provided weights list does not match the layer's specifications. """ params = self.weights expected_num_weights = 0 for param in params: if isinstance(param, base_layer_utils.TrackableWeightHandler): expected_num_weights += param.num_tensors else: expected_num_weights += 1 if expected_num_weights != len(weights): raise ValueError( 'You called `set_weights(weights)` on layer "%s" ' 'with a weight list of length %s, but the layer was ' 'expecting %s weights. Provided weights: %s...' % (self.name, len(weights), expected_num_weights, str(weights)[:50])) weight_index = 0 weight_value_tuples = [] for param in params: if isinstance(param, base_layer_utils.TrackableWeightHandler): num_tensors = param.num_tensors tensors = weights[weight_index:weight_index + num_tensors] param.set_weights(tensors) weight_index += num_tensors else: weight = weights[weight_index] weight_shape = weight.shape if hasattr(weight, 'shape') else () ref_shape = param.shape if not ref_shape.is_compatible_with(weight_shape): raise ValueError( 'Layer weight shape %s not compatible with provided weight ' 'shape %s' % (ref_shape, weight_shape)) weight_value_tuples.append((param, weight)) weight_index += 1 backend.batch_set_value(weight_value_tuples) def get_weights(self): """Returns the current weights of the layer. The weights of a layer represent the state of the layer. This function returns both trainable and non-trainable weight values associated with this layer as a list of Numpy arrays, which can in turn be used to load state into similarly parameterized layers. For example, a Dense layer returns a list of two values-- per-output weights and the bias value. These can be used to set the weights of another Dense layer: >>> a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]])) >>> a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] >>> b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]])) >>> b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] >>> b.set_weights(a.get_weights()) >>> b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] Returns: Weights values as a list of numpy arrays. """ weights = self.weights output_weights = [] for weight in weights: if isinstance(weight, base_layer_utils.TrackableWeightHandler): output_weights.extend(weight.get_tensors()) else: output_weights.append(weight) return backend.batch_get_value(output_weights) def get_updates_for(self, inputs): """Retrieves updates relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of update ops of the layer that depend on `inputs`. """ if inputs is None: # Requesting unconditional updates. return [u for u in self.updates if u._unconditional_update] # Requesting input-conditional updates. updates = [u for u in self.updates if not u._unconditional_update] inputs = tf.nest.flatten(inputs) reachable = tf_utils.get_reachable_from_inputs(inputs, updates) return [u for u in updates if u in reachable] def get_losses_for(self, inputs): """Retrieves losses relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of loss tensors of the layer that depend on `inputs`. """ if inputs is None: # Requesting unconditional losses. return [l for l in self.losses if l._unconditional_loss] # Requesting input-conditional losses. losses = [l for l in self.losses if not l._unconditional_loss] inputs = tf.nest.flatten(inputs) reachable = tf_utils.get_reachable_from_inputs(inputs, losses) return [l for l in losses if l in reachable] def get_input_mask_at(self, node_index): """Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs). """ inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None) def get_output_mask_at(self, node_index): """Retrieves the output mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple outputs). """ output = self.get_output_at(node_index) if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None) @property def input_mask(self): """Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Input mask tensor (potentially None) or list of input mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. """ inputs = self.input if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None) @property def output_mask(self): """Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Output mask tensor (potentially None) or list of output mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. """ output = self.output if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None) def get_input_shape_at(self, node_index): """Retrieves the input shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape') def get_output_shape_at(self, node_index): """Retrieves the output shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape') def get_input_at(self, node_index): """Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input') def get_output_at(self, node_index): """Retrieves the output tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first output node of the layer. Returns: A tensor (or list of tensors if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode. """ return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output') @property def input(self): """Retrieves the input tensor(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer. Returns: Input tensor or list of input tensors. Raises: RuntimeError: If called in Eager mode. AttributeError: If no inbound nodes are found. """ if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.') return self._get_node_attribute_at_index(0, 'input_tensors', 'input') @property def output(self): """Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('Layer ' + self.name + ' has no inbound nodes.') return self._get_node_attribute_at_index(0, 'output_tensors', 'output') @property def input_shape(self): """Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('The layer has never been called ' 'and thus has no defined input shape.') all_input_shapes = set( [str(node.input_shapes) for node in self._inbound_nodes]) if len(all_input_shapes) == 1: return self._inbound_nodes[0].input_shapes else: raise AttributeError('The layer "' + str(self.name) + ' has multiple inbound nodes, ' 'with different input shapes. Hence ' 'the notion of "input shape" is ' 'ill-defined for the layer. ' 'Use `get_input_shape_at(node_index)` ' 'instead.') def count_params(self): """Count the total number of scalars composing the weights. Returns: An integer count. Raises: ValueError: if the layer isn't yet built (in which case its weights aren't yet defined). """ if not self.built: if getattr(self, '_is_graph_network', False): with tf_utils.maybe_init_scope(self): self._maybe_build(self.inputs) else: raise ValueError('You tried to call `count_params` on ' + self.name + ', but the layer isn\'t built. ' 'You can build it manually via: `' + self.name + '.build(batch_input_shape)`.') return layer_utils.count_params(self.weights) @property def output_shape(self): """Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode. """ if not self._inbound_nodes: raise AttributeError('The layer has never been called ' 'and thus has no defined output shape.') all_output_shapes = set( [str(node.output_shapes) for node in self._inbound_nodes]) if len(all_output_shapes) == 1: return self._inbound_nodes[0].output_shapes else: raise AttributeError('The layer "%s"' ' has multiple inbound nodes, ' 'with different output shapes. Hence ' 'the notion of "output shape" is ' 'ill-defined for the layer. ' 'Use `get_output_shape_at(node_index)` ' 'instead.' % self.name) @property @doc_controls.do_not_doc_inheritable def inbound_nodes(self): """Deprecated, do NOT use! Only for compatibility with external Keras.""" return self._inbound_nodes @property @doc_controls.do_not_doc_inheritable def outbound_nodes(self): """Deprecated, do NOT use! Only for compatibility with external Keras.""" return self._outbound_nodes ############################################################################## # Methods & attributes below are public aliases of other methods. # ############################################################################## @doc_controls.do_not_doc_inheritable def apply(self, inputs, *args, **kwargs): """Deprecated, do NOT use! This is an alias of `self.__call__`. Args: inputs: Input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. Returns: Output tensor(s). """ warnings.warn('`layer.apply` is deprecated and ' 'will be removed in a future version. ' 'Please use `layer.__call__` method instead.') return self.__call__(inputs, *args, **kwargs) @doc_controls.do_not_doc_inheritable def add_variable(self, *args, **kwargs): """Deprecated, do NOT use! Alias for `add_weight`.""" warnings.warn('`layer.add_variable` is deprecated and ' 'will be removed in a future version. ' 'Please use `layer.add_weight` method instead.') return self.add_weight(*args, **kwargs) @property def variables(self): """Returns the list of all layer variables/weights. Alias of `self.weights`. Returns: A list of variables. """ return self.weights @property def trainable_variables(self): return self.trainable_weights @property def non_trainable_variables(self): return self.non_trainable_weights ############################################################################## # Methods & attributes below are all private and only used by the framework. # ############################################################################## @property def _inbound_nodes(self): return self._inbound_nodes_value @_inbound_nodes.setter @tf.__internal__.tracking.no_automatic_dependency_tracking def _inbound_nodes(self, value): self._inbound_nodes_value = value @property def _outbound_nodes(self): return self._outbound_nodes_value @_outbound_nodes.setter @tf.__internal__.tracking.no_automatic_dependency_tracking def _outbound_nodes(self, value): self._outbound_nodes_value = value def _set_dtype_policy(self, dtype): """Sets self._dtype_policy.""" if isinstance(dtype, policy.Policy): self._dtype_policy = dtype elif isinstance(dtype, dict): self._dtype_policy = policy.deserialize(dtype) elif isinstance(dtype, str) and dtype in ('mixed_float16', 'mixed_bfloat16'): # The isinstance check is required since np.dtype raises an error if # compared to a non-dtype string. self._dtype_policy = policy.Policy(dtype) elif dtype: self._dtype_policy = policy.Policy(tf.as_dtype(dtype).name) else: self._dtype_policy = policy.global_policy() if (self._dtype_policy.name == 'mixed_float16' and not loss_scale_optimizer.strategy_supports_loss_scaling()): # Although only loss scaling doesn't support certain strategies, to avoid # confusion, we disallow the 'mixed_float16' policy with unsupported # strategies. This is because 'mixed_float16' requires loss scaling for # numeric stability. strategy = tf.distribute.get_strategy() raise ValueError('Mixed precision is not supported with the ' 'tf.distribute.Strategy: %s. Either stop using mixed ' 'precision by removing the use of the "%s" policy or ' 'use a different Strategy, e.g. a MirroredStrategy.' % (strategy.__class__.__name__, self._dtype_policy.name)) # Performance optimization: cache the compute dtype as a Dtype object or # None, so that str to Dtype conversion doesn't happen in Layer.__call__. if self._dtype_policy.compute_dtype: self._compute_dtype_object = tf.as_dtype( self._dtype_policy.compute_dtype) else: self._compute_dtype_object = None # TODO(reedwm): Expose this property? @property def _compute_dtype(self): """The layer's compute dtype. Unless mixed-precision is used, this is the same as `Layer.dtype`. If self._autocast is True, layer's will cast floating-point inputs to this. Returns: The layer's compute dtype. """ return self._dtype_policy.compute_dtype def _maybe_cast_inputs(self, inputs): """Maybe casts the inputs to the compute dtype. If self._compute_dtype is floating-point, and self_autocast is True, floating-point inputs are casted to self._compute_dtype. Args: inputs: Input tensor, or structure of input tensors. Returns: `inputs`, but tensors may have been casted to self._compute_dtype """ compute_dtype = self._compute_dtype if (self._autocast and compute_dtype and tf.as_dtype(compute_dtype).is_floating): def f(x): """Cast a single Tensor or TensorSpec to the compute dtype.""" cast_types = (tf.Tensor, tf.SparseTensor, tf.RaggedTensor) if (isinstance(x, cast_types) and x.dtype.is_floating and x.dtype.base_dtype.name != compute_dtype): return tf.cast(x, compute_dtype) elif isinstance(x, tf.TensorSpec) and x.dtype.is_floating: # Inputs may be TensorSpecs when this function is called from # model._set_inputs. return tf.TensorSpec(x.shape, compute_dtype, x.name) else: return x return tf.nest.map_structure(f, inputs) else: return inputs # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property. @property def _dtype(self): # This is equivalent to returning self.dtype . We do not return self.dtype # as it would cause infinite recursion in a few subclasses, which override # "dtype" to return self._dtype. return self._dtype_policy.variable_dtype @_dtype.setter def _dtype(self, value): value = tf.as_dtype(value).name self._set_dtype_policy(policy.Policy(value)) def _name_scope(self): # pylint: disable=method-hidden return self.name def _init_set_name(self, name, zero_based=True): if not name: self._name = backend.unique_object_name( generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based) else: self._name = name def _get_existing_metric(self, name=None): match = [m for m in self._metrics if m.name == name] if not match: return if len(match) > 1: raise ValueError( 'Please provide different names for the metrics you have added. ' 'We found {} metrics with the name: "{}"'.format(len(match), name)) return match[0] def _symbolic_add_metric(self, value, aggregation=None, name=None): base_layer_utils.check_graph_consistency(value, method='add_metric') match = self._get_existing_metric(name) if aggregation is None: # Iterate over the metrics and check if the given metric exists already. # This can happen when a metric instance is created in subclassed model # layer `__init__` and we have tracked that instance already in # model.__setattr__. if match: result_tensor = value metric_obj = match elif hasattr(value, '_metric_obj'): # We track the instance using the metadata on the result tensor. result_tensor = value metric_obj = result_tensor._metric_obj self._metrics.append(metric_obj) else: raise ValueError( 'We do not support adding an aggregated metric result tensor that ' 'is not the output of a `tf.keras.metrics.Metric` metric instance. ' 'Without having access to the metric instance we cannot reset the ' 'state of a metric after every epoch during training. You can ' 'create a `tf.keras.metrics.Metric` instance and pass the result ' 'here or pass an un-aggregated result with `aggregation` parameter ' 'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)' ', name=\'mean_activation\', aggregation=\'mean\')`') else: # If a non-aggregated tensor is given as input (ie. `aggregation` is # explicitly set to `mean`), we wrap the tensor in `Mean` metric. if match: result_tensor = match(value) metric_obj = match else: metric_obj, result_tensor = base_layer_utils.create_mean_metric( value, name) self._metrics.append(metric_obj) def _handle_weight_regularization(self, name, variable, regularizer): """Create lambdas which compute regularization losses.""" def _loss_for_variable(v): """Creates a regularization loss `Tensor` for variable `v`.""" with backend.name_scope(name + '/Regularizer'): regularization = regularizer(v) return regularization if base_layer_utils.is_split_variable(variable): for v in variable: self.add_loss(functools.partial(_loss_for_variable, v)) else: self.add_loss(functools.partial(_loss_for_variable, variable)) def _handle_activity_regularization(self, inputs, outputs): # Apply activity regularization. # Note that it should be applied every time the layer creates a new # output, since it is output-specific. if self._activity_regularizer: output_list = tf.nest.flatten(outputs) with backend.name_scope('ActivityRegularizer'): for output in output_list: activity_loss = self._activity_regularizer(output) batch_size = tf.cast( tf.compat.v1.shape(output)[0], activity_loss.dtype) # Make activity regularization strength batch-agnostic. mean_activity_loss = activity_loss / batch_size base_layer_utils.check_graph_consistency( mean_activity_loss, method='activity_regularizer') self.add_loss(mean_activity_loss, inputs=inputs) def _set_mask_metadata(self, inputs, outputs, previous_mask): flat_outputs = tf.nest.flatten(outputs) mask_already_computed = ( getattr(self, '_compute_output_and_mask_jointly', False) or all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs)) # Only compute the mask if the Layer explicitly supports masking or has # overridden `compute_mask`. should_compute_mask = ( hasattr(self, 'compute_mask') and (self.supports_masking or not getattr(self.compute_mask, '_is_default', False))) if mask_already_computed: flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs] elif not should_compute_mask: flat_masks = [None for _ in flat_outputs] else: output_masks = self.compute_mask(inputs, previous_mask) # `compute_mask` can return a single `None` even when a Layer # has multiple outputs. if output_masks is None: flat_masks = [None for _ in flat_outputs] else: flat_masks = tf.nest.flatten(output_masks) for output, mask in zip(flat_outputs, flat_masks): try: output._keras_mask = mask except AttributeError: # C Type such as np.ndarray. pass if tf_utils.are_all_symbolic_tensors(flat_outputs): for output in flat_outputs: if getattr(output, '_keras_mask', None) is not None: # Do not track masks for `TensorFlowOpLayer` construction. output._keras_mask._keras_history_checked = True def _collect_input_masks(self, inputs, args, kwargs): """Checks if `mask` argument was passed, else gathers mask from inputs.""" if self._call_arg_was_passed('mask', args, kwargs): return self._get_call_arg_value('mask', args, kwargs) if not self._should_compute_mask: return None input_masks = tf.nest.map_structure(lambda t: getattr(t, '_keras_mask', None), inputs) if generic_utils.is_all_none(input_masks): return None return input_masks def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False): if arg_name in kwargs: return True call_fn_args = self._call_fn_args if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] if arg_name in dict(zip(call_fn_args, args)): return True return False def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False): if arg_name in kwargs: return kwargs[arg_name] call_fn_args = self._call_fn_args if not inputs_in_args: # Ignore `inputs` arg. call_fn_args = call_fn_args[1:] args_dict = dict(zip(call_fn_args, args)) return args_dict[arg_name] def _set_call_arg_value( self, arg_name, new_value, args, kwargs, inputs_in_args=False, pop_kwarg_if_none=False): arg_pos = self._call_fn_arg_positions.get(arg_name, None) if arg_pos is not None: if not inputs_in_args: # Ignore `inputs` arg. arg_pos = arg_pos - 1 if len(args) > arg_pos: args = list(args) args[arg_pos] = new_value return args, kwargs if new_value is None and pop_kwarg_if_none: kwargs.pop(arg_name, None) else: kwargs[arg_name] = new_value return args, kwargs def _get_node_attribute_at_index(self, node_index, attr, attr_name): """Private utility to retrieves an attribute (e.g. inputs) from a node. This is used to implement the methods: - get_input_shape_at - get_output_shape_at - get_input_at etc... Args: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. attr_name: Human-readable attribute name, for error messages. Returns: The layer's attribute `attr` at the node of index `node_index`. Raises: RuntimeError: If the layer has no inbound nodes, or if called in Eager mode. ValueError: If the index provided does not match any node. """ if not self._inbound_nodes: raise RuntimeError('The layer has never been called ' 'and thus has no defined ' + attr_name + '.') if not len(self._inbound_nodes) > node_index: raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.') values = getattr(self._inbound_nodes[node_index], attr) if isinstance(values, list) and len(values) == 1: return values[0] else: return values def _maybe_build(self, inputs): # Check input assumptions set before layer building, e.g. input rank. if not self.built: input_spec.assert_input_compatibility( self.input_spec, inputs, self.name) input_list = tf.nest.flatten(inputs) if input_list and self._dtype_policy.compute_dtype is None: try: dtype = input_list[0].dtype.base_dtype.name except AttributeError: pass else: self._set_dtype_policy(policy.Policy(dtype)) input_shapes = None if all(hasattr(x, 'shape') for x in input_list): input_shapes = tf.nest.map_structure(lambda x: x.shape, inputs) # Only call `build` if the user has manually overridden the build method. if not hasattr(self.build, '_is_default'): # Any setup work performed only once should happen in an `init_scope` # to avoid creating symbolic Tensors that will later pollute any eager # operations. with tf_utils.maybe_init_scope(self): self.build(input_shapes) # We must set also ensure that the layer is marked as built, and the build # shape is stored since user defined build functions may not be calling # `super.build()` Layer.build(self, input_shapes) # Optionally load weight values specified at layer instantiation. if self._initial_weights is not None: self.set_weights(self._initial_weights) self._initial_weights = None def _symbolic_call(self, inputs): input_shapes = tf.nest.map_structure(lambda x: x.shape, inputs) output_shapes = self.compute_output_shape(input_shapes) def _make_placeholder_like(shape): ph = backend.placeholder(shape=shape, dtype=self.dtype) ph._keras_mask = None return ph return tf.nest.map_structure(_make_placeholder_like, output_shapes) def _get_trainable_state(self): """Get the `trainable` state of each sublayer. Returns: A dict mapping all sublayers to their `trainable` value. """ layers = self._flatten_layers(include_self=False, recursive=False) trainable_state = {self: self.trainable} for l in layers: trainable_state.update(l._get_trainable_state()) return trainable_state def _set_trainable_state(self, trainable_state): """Set `trainable` state for each sublayer.""" if self in trainable_state: self.trainable = trainable_state[self] layers = self._flatten_layers(include_self=False, recursive=False) for l in layers: if l in trainable_state: l._set_trainable_state(trainable_state) @property def _obj_reference_counts(self): """A dictionary counting the number of attributes referencing an object.""" self._maybe_create_attribute('_obj_reference_counts_dict', object_identity.ObjectIdentityDictionary()) return self._obj_reference_counts_dict @tf.__internal__.tracking.no_automatic_dependency_tracking def _maybe_create_attribute(self, name, default_value): """Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute. """ if not hasattr(self, name): self.__setattr__(name, default_value) def __delattr__(self, name): # For any super.__delattr__() call, we will directly use the implementation # in Trackable and skip the behavior in AutoTrackable. The Layer was # originally use Trackable as base class, the change of using Module as base # class forced us to have AutoTrackable in the class hierarchy. # # TODO(b/180760306) Keeping the status quo of skipping _delattr__ and # __setattr__ in AutoTrackable may be unsustainable. existing_value = getattr(self, name, None) # If this value is replacing an existing object assigned to an attribute, we # should clean it out to avoid leaking memory. First we check if there are # other attributes referencing it. reference_counts = self._obj_reference_counts if existing_value not in reference_counts: super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call return reference_count = reference_counts[existing_value] if reference_count > 1: # There are other remaining references. We can't remove this object from # _layers etc. reference_counts[existing_value] = reference_count - 1 super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call return else: # This is the last remaining reference. del reference_counts[existing_value] super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call if (isinstance(existing_value, Layer) or base_layer_utils.has_weights(existing_value)): super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call '_self_tracked_trackables', [l for l in self._self_tracked_trackables if l is not existing_value]) if isinstance(existing_value, tf.Variable): super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call '_trainable_weights', [w for w in self._trainable_weights if w is not existing_value]) super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call '_non_trainable_weights', [w for w in self._non_trainable_weights if w is not existing_value]) def __setattr__(self, name, value): if (name == '_self_setattr_tracking' or not getattr(self, '_self_setattr_tracking', True) or # Exclude @property.setters from tracking hasattr(self.__class__, name)): try: super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call except AttributeError: raise AttributeError( ('Can\'t set the attribute "{}", likely because it conflicts with ' 'an existing read-only @property of the object. Please choose a ' 'different name.').format(name)) return # Keep track of trackable objects, for the needs of `Network.save_weights`. value = tf.__internal__.tracking.sticky_attribute_assignment( trackable=self, value=value, name=name) reference_counts = self._obj_reference_counts reference_counts[value] = reference_counts.get(value, 0) + 1 # Clean out the old attribute, which clears _layers and _trainable_weights # if necessary. try: self.__delattr__(name) except AttributeError: pass # Keep track of metric instance created in subclassed layer. from keras import metrics as metrics_module # pylint: disable=g-import-not-at-top for val in tf.nest.flatten(value): if isinstance(val, metrics_module.Metric) and hasattr(self, '_metrics'): self._metrics.append(val) # TODO(scottzhu): Need to track Module object as well for weight tracking. # Be careful about metric if it becomes a Module in future. # Append value to self._layers if relevant if (getattr(self, '_auto_track_sub_layers', True) and (isinstance(value, Layer) or base_layer_utils.has_weights(value))): self._maybe_create_attribute('_self_tracked_trackables', []) # We need to check object identity to avoid de-duplicating empty # container types which compare equal. if not any((layer is value for layer in self._self_tracked_trackables)): self._self_tracked_trackables.append(value) if hasattr(value, '_use_resource_variables'): # Legacy layers (V1 tf.layers) must always use # resource variables. value._use_resource_variables = True # Append value to list of trainable / non-trainable weights if relevant # TODO(b/125122625): This won't pick up on any variables added to a # list/dict after creation. for val in tf.nest.flatten(value): if not isinstance(val, tf.Variable): continue # Users may add extra weights/variables # simply by assigning them to attributes (invalid for graph networks) self._maybe_create_attribute('_trainable_weights', []) self._maybe_create_attribute('_non_trainable_weights', []) if val.trainable: if any(val is w for w in self._trainable_weights): continue self._trainable_weights.append(val) else: if any(val is w for w in self._non_trainable_weights): continue self._non_trainable_weights.append(val) backend.track_variable(val) # TODO(b/180760306) Skip the auto trackable from tf.Module to keep status # quo. See the comment at __delattr__. super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call # This is a hack so that the is_layer (within # training/trackable/layer_utils.py) check doesn't get the weights attr. # TODO(b/110718070): Remove when fixed. def _is_layer(self): return True def _init_call_fn_args(self, expects_training_arg=None): # Clear cached call function arguments. self.__class__._call_full_argspec.fget.cache.pop(self, None) self.__class__._call_fn_args.fget.cache.pop(self, None) self.__class__._call_accepts_kwargs.fget.cache.pop(self, None) call_fn_args = self._call_fn_args if expects_training_arg is None: self._expects_training_arg = ('training' in call_fn_args or self._call_accepts_kwargs) else: # Use value encoded into the metadata when loading from the SavedModel. self._expects_training_arg = expects_training_arg self._expects_mask_arg = ('mask' in call_fn_args or self._call_accepts_kwargs) @property @layer_utils.cached_per_instance def _call_full_argspec(self): # Argspec inspection is expensive and the call spec is used often, so it # makes sense to cache the result. return tf_inspect.getfullargspec(self.call) @property @layer_utils.cached_per_instance def _call_fn_args(self): all_args = self._call_full_argspec.args # Scrub `self` that appears if a decorator was applied. if all_args and all_args[0] == 'self': return all_args[1:] return all_args @property @layer_utils.cached_per_instance def _call_fn_arg_positions(self): call_fn_arg_positions = dict() for pos, arg in enumerate(self._call_fn_args): call_fn_arg_positions[arg] = pos return call_fn_arg_positions @property @layer_utils.cached_per_instance def _call_accepts_kwargs(self): return self._call_full_argspec.varkw is not None @property @layer_utils.cached_per_instance def _should_compute_mask(self): return ('mask' in self._call_fn_args or getattr(self, 'compute_mask', None) is not None) def _dedup_weights(self, weights): """Dedupe weights while maintaining order as much as possible.""" output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for details. @property def _trackable_saved_model_saver(self): return layer_serialization.LayerSavedModelSaver(self) @property def _object_identifier(self): return self._trackable_saved_model_saver.object_identifier @property def _tracking_metadata(self): return self._trackable_saved_model_saver.tracking_metadata def _list_extra_dependencies_for_serialization(self, serialization_cache): return (self._trackable_saved_model_saver .list_extra_dependencies_for_serialization(serialization_cache)) def _list_functions_for_serialization(self, serialization_cache): return (self._trackable_saved_model_saver .list_functions_for_serialization(serialization_cache)) def __getstate__(self): # Override to support `copy.deepcopy` and pickling. # Thread-local objects cannot be copied in Python 3, so pop these. # Thread-local objects are used to cache losses in MirroredStrategy, and # so shouldn't be copied. state = self.__dict__.copy() state.pop('_thread_local', None) return state def __setstate__(self, state): state['_thread_local'] = threading.local() # Bypass Trackable logic as `__dict__` already contains this info. object.__setattr__(self, '__dict__', state) class KerasHistory( collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])): """Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an `InputLayer`. This allows Keras to track how each Tensor was produced, and this information is later retraced by the `keras.engine.Network` class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Tensor is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via `nest.flatten`. """ # Added to maintain memory and performance characteristics of `namedtuple` # while subclassing. __slots__ = () # Avoid breaking users who directly import this symbol from this file. # TODO(fchollet): remove this. InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
96,525
39.438207
118
py
keras
keras-master/keras/engine/node_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #,============================================================================ """Tests for layer graphs construction & handling.""" import tensorflow.compat.v2 as tf from keras import keras_parameterized from keras.engine import base_layer from keras.engine import node as node_module class DummyTensor: def __init__(self, shape=None): self.shape = shape class DummyLayer(base_layer.Layer): pass class NetworkConstructionTest(keras_parameterized.TestCase): def test_chained_node_construction(self): # test basics a = DummyTensor(shape=(None, 32)) b = DummyTensor(shape=(None, 32)) a_layer = DummyLayer() node = node_module.Node(a_layer, outputs=a) self.assertEqual(node.outbound_layer, a_layer) self.assertTrue(node.is_input) self.assertListEqual(node.inbound_layers, []) self.assertListEqual(node.input_tensors, [a]) self.assertListEqual(node.input_shapes, [(None, 32)]) self.assertListEqual(node.output_tensors, [a]) self.assertListEqual(node.output_shapes, [(None, 32)]) b_layer = DummyLayer() node_module.Node(b_layer, outputs=b) dense = DummyLayer() a_2 = DummyTensor() node_a = node_module.Node(layer=dense, call_args=(a,), outputs=a_2) b_2 = DummyTensor() node_b = node_module.Node(layer=dense, call_args=(b,), outputs=b_2) # test the node attributes self.assertFalse(node_a.is_input) self.assertFalse(node_b.is_input) self.assertEqual(node_a.call_args, (a,)) self.assertEqual(node_a.call_kwargs, {}) self.assertEqual(node_a.outputs, a_2) # Test the layer wiring self.assertLen(dense._inbound_nodes, 2) self.assertLen(dense._outbound_nodes, 0) self.assertEqual(dense._inbound_nodes, [node_a, node_b]) self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer) self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense) self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer) self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense) self.assertIs(dense._inbound_nodes[0].input_tensors, a) self.assertIs(dense._inbound_nodes[1].input_tensors, b) def test_multi_input_node(self): # test multi-input layer a = DummyTensor() b = DummyTensor() dense = DummyLayer() a_2 = DummyTensor() node_module.Node(layer=dense, call_args=(a,), outputs=a_2) b_2 = DummyTensor() node_module.Node(layer=dense, call_args=(b,), outputs=b_2) concat_layer = DummyLayer() merged = DummyTensor() node_module.Node(layer=concat_layer, call_args=([a_2, b_2],), outputs=merged) merge_layer, merge_node_index, merge_tensor_index = merged._keras_history self.assertEqual(merge_node_index, 0) self.assertEqual(merge_tensor_index, 0) self.assertLen(merge_layer._inbound_nodes, 1) self.assertLen(merge_layer._outbound_nodes, 0) self.assertLen(merge_layer._inbound_nodes[0].input_tensors, 2) self.assertEqual(merge_layer._inbound_nodes[0].input_tensors, [a_2, b_2]) self.assertLen(merge_layer._inbound_nodes[0].inbound_layers, 2) def test_arg_and_kwarg_mix(self): input_layer = DummyLayer() input_layer_2 = DummyLayer() a = DummyTensor() node_a = node_module.Node(layer=input_layer, outputs=a) b = DummyTensor() node_b = node_module.Node(layer=input_layer_2, outputs=b) arg_2 = DummyTensor() arg_3 = DummyTensor() node_c = node_module.Node(layer=input_layer, outputs=arg_3) kwarg_x = DummyTensor() kwarg_y = DummyTensor() node_d = node_module.Node(layer=input_layer, outputs=kwarg_y) merge_layer = DummyLayer() merged = DummyTensor() node = node_module.Node(layer=merge_layer, call_args=([a, b], arg_2, arg_3), call_kwargs={'x': kwarg_x, 'y': kwarg_y}, outputs=merged) merge_layer, merge_node_index, merge_tensor_index = merged._keras_history # Check the saved call args/kwargs self.assertEqual(([a, b], arg_2, arg_3), node.call_args) self.assertEqual({'x': kwarg_x, 'y': kwarg_y}, node.call_kwargs) # Only the inputs that were produced by input nodes should appear in # keras_tensors self.assertEqual({a, b, arg_3, kwarg_y}, set(node.keras_inputs)) self.assertEqual(set(node.parent_nodes), {node_a, node_b, node_c, node_d}) # Check the layer wirings self.assertEqual(merge_node_index, 0) self.assertEqual(merge_tensor_index, 0) self.assertLen(merge_layer._inbound_nodes, 1) self.assertLen(merge_layer._outbound_nodes, 0) self.assertLen(input_layer._outbound_nodes, 3) self.assertLen(input_layer_2._outbound_nodes, 1) # The 'backwards compatibility' attributes should only check the # first call argument self.assertLen(merge_layer._inbound_nodes[0].input_tensors, 2) self.assertEqual(merge_layer._inbound_nodes[0].input_tensors, [a, b]) self.assertLen(merge_layer._inbound_nodes[0].inbound_layers, 2) if __name__ == '__main__': tf.test.main()
5,662
34.841772
78
py
keras
keras-master/keras/engine/training_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related utilities.""" import tensorflow.compat.v2 as tf import numpy as np from keras.utils import generic_utils def slice_arrays(arrays, indices, contiguous=True): """Slices batches out of provided arrays (workaround for eager tensors). Unfortunately eager tensors don't have the same slicing behavior as Numpy arrays (they follow the same slicing behavior as symbolic TF tensors), hence we cannot use `generic_utils.slice_arrays` directly and we have to implement this workaround based on `concat`. This has a performance cost. Args: arrays: Single array or list of arrays. indices: List of indices in the array that should be included in the output batch. contiguous: Boolean flag indicating whether the indices are contiguous. Returns: Slice of data (either single array or list of arrays). """ converted_to_list = False if not isinstance(arrays, list): converted_to_list = True arrays = [arrays] if any(tf.is_tensor(x) for x in arrays): if not contiguous: entries = [[x[i:i + 1] for i in indices] for x in arrays] slices = [tf.concat(x, axis=0) for x in entries] else: slices = [x[indices[0]:indices[-1] + 1] for x in arrays] else: slices = generic_utils.slice_arrays(arrays, indices) if converted_to_list: slices = slices[0] return slices def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes, check_all_flat=False): """Adds 1.0 as sample weights for the outputs for which there is no weight. Args: outputs: List of model outputs. sample_weights: List of sample weight inputs. sample_weight_modes: List of sample weight modes or None. check_all_flat: Ensure that inputs are not nested structures. This is not a free check, so we may not want to run it eagerly every iteration. Returns: Tuple of sample weights, one sample weight for every output, and booleans describing the raw sample weights. """ any_sample_weight = sample_weights is not None and any( w is not None for w in sample_weights) partial_sample_weight = any_sample_weight and any( w is None for w in sample_weights) if not any_sample_weight: return None, any_sample_weight, partial_sample_weight if not partial_sample_weight: return sample_weights, any_sample_weight, partial_sample_weight if check_all_flat: tf.nest.assert_same_structure( list_to_tuple(sample_weights), list_to_tuple(tf.nest.flatten(sample_weights))) tf.nest.assert_same_structure( list_to_tuple(outputs), list_to_tuple(tf.nest.flatten(outputs))) if sample_weight_modes is not None: tf.nest.assert_same_structure( sample_weight_modes, tf.nest.flatten(sample_weight_modes)) new_sample_weights = [] for i, sw in enumerate(sample_weights): if sw is None: as_numpy = isinstance(outputs[i], np.ndarray) output = outputs[i] output_shape = output.shape if as_numpy else tf.shape(output) is_temporal = ( sample_weight_modes is not None and sample_weight_modes[i] == 'temporal') sw_shape = (output_shape[0], output_shape[1]) if is_temporal else (output_shape[0],) new_sample_weights.append( np.ones(sw_shape) if as_numpy else tf.ones(sw_shape)) else: new_sample_weights.append(sw) return (list_to_tuple(new_sample_weights), any_sample_weight, partial_sample_weight) class RespectCompiledTrainableState: """Set and restore trainable state if it has changed since compile. The keras API guarantees that the value of each Layer's `trainable` property at `Model.compile` time will be used when training that model. In order to respect this requirement, it may be necessary to set the trainable value of layers to their compile time values before beginning a training endpoint and restore the values before returning from said endpoint. This scope checks if any layer's trainable state has changed since Model compile, and performs this set and un-set bookkeeping. However, the trainable state of a layer changes quite infrequently, if ever, for many kinds of workflows. Moreover, updating every layer in a model is an expensive operation. As a result, we will only explicitly set and unset the trainable state of a model if a trainable value has changed since compile. """ def __init__(self, model): self._model = model self._current_trainable_state = None self._compiled_trainable_state = None self._should_set_trainable = False def __enter__(self): self._current_trainable_state = self._model._get_trainable_state() # pylint: disable=protected-access self._compiled_trainable_state = self._model._compiled_trainable_state # pylint: disable=protected-access # Check to see if any layer's trainable state has changed since `compile`. for layer, trainable in self._compiled_trainable_state.items(): if (layer in self._current_trainable_state and trainable != self._current_trainable_state[layer]): self._should_set_trainable = True break # If so, restore the model to its compiled state. if self._should_set_trainable: self._model._set_trainable_state(self._compiled_trainable_state) # pylint: disable=protected-access def __exit__(self, type_arg, value_arg, traceback_arg): # If we set the values to their compiled state in __enter__, we need to # restore the original values before leaving the scope. if self._should_set_trainable: self._model._set_trainable_state(self._current_trainable_state) # pylint: disable=protected-access return False # False values do not suppress exceptions # Allow use of methods not exposed to the user. # pylint: disable=protected-access def get_input_shape_and_dtype(layer): """Retrieves input shape and input dtype of layer if applicable. Args: layer: Layer (or model) instance. Returns: Tuple (input_shape, input_dtype). Both could be None if the layer does not have a defined input shape. Raises: ValueError: in case an empty Sequential or Functional model is passed. """ def _is_graph_model(layer): return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or layer.__class__.__name__ == 'Sequential') # In case of nested models: recover the first layer # of the deepest model to infer input shape and dtype. # Subclassed Models may not have been built so can't be checked. while _is_graph_model(layer): if not layer.layers: raise ValueError('An empty Model cannot be used as a Layer.') layer = layer.layers[0] if getattr(layer, '_batch_input_shape', None): return layer._batch_input_shape, layer.dtype return None, None # pylint: enable=protected-access def get_static_batch_size(layer): """Gets the static batch size of a Layer. Args: layer: a `Layer` instance. Returns: The static batch size of a Layer. """ batch_input_shape, _ = get_input_shape_and_dtype(layer) if batch_input_shape is not None: return tf.compat.v1.Dimension(batch_input_shape[0]).value return None def list_to_tuple(maybe_list): """Datasets will stack the list of tensor, so switch them to tuples.""" if isinstance(maybe_list, list): return tuple(maybe_list) return maybe_list
8,117
35.9
110
py
keras
keras-master/keras/engine/training_integration_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """End-to-end tests for a variety of small models.""" import tensorflow.compat.v2 as tf import collections import itertools from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import testing_utils def _conv2d_filter(**kwargs): """Convolution with non-default strides and dilation rate is not supported.""" return kwargs['strides'] <= 1 or kwargs['dilation_rate'] <= 1 # Scheme: (layer_class, data_shape, fuzz_dims, constructor_args, filter_fn) # layer_class: # A keras Layer class to be tested. # data_shape: # The shape of the input data. (not including batch dim) # fuzz_dims: # Dimensions which can be unspecified during model construction. For # instance, if data_shape is (2, 5) and fuzz_dims is (False, True), a pass # with model input shape of (2, None) will also be performed. # constructor_args: # An OrderedDict (to ensure consistent test names) with a key and a list # of values to test. Test cases will be generated for the Cartesian product # of all constructor args, so adding more fields can cause the drastically # increase the testing load. # filter_fn: # If not None, this function will be called on each set of generated # constructor args, and prevents generation of contradictory combinations. # A True return value indicates a valid test. _LAYERS_TO_TEST = [ (keras.layers.Dense, (1,), (False,), collections.OrderedDict([ ('units', [1])]), None), (keras.layers.Activation, (2, 2), (True, True), collections.OrderedDict([ ('activation', ['relu'])]), None), (keras.layers.Dropout, (16,), (False,), collections.OrderedDict([ ('rate', [0.25])]), None), (keras.layers.BatchNormalization, (8, 8, 3), (True, True, False), collections.OrderedDict([ ('axis', [3]), ('center', [True, False]), ('scale', [True, False]) ]), None), (keras.layers.Conv1D, (8, 8), (False, False), collections.OrderedDict([ ('filters', [1]), ('kernel_size', [1, 3]), ('strides', [1, 2]), ('padding', ['valid', 'same']), ('use_bias', [True]), ('kernel_regularizer', ['l2']), ('data_format', ['channels_last']) ]), None), (keras.layers.Conv2D, (8, 8, 3), (True, True, False), collections.OrderedDict([ ('filters', [1]), ('kernel_size', [1, 3]), ('strides', [1, 2]), ('padding', ['valid', 'same']), ('use_bias', [True, False]), ('kernel_regularizer', ['l2']), ('dilation_rate', [1, 2]), ('data_format', ['channels_last']) ]), _conv2d_filter), (keras.layers.LSTM, (4, 4), (False, False), collections.OrderedDict([ ('units', [1]), ('kernel_regularizer', ['l2']), ('dropout', [0, 0.5]), ('stateful', [True, False]), ('unroll', [True, False]), ('return_sequences', [True, False]) ]), None), ] def _gather_test_cases(): cases = [] for layer_type, inp_shape, fuzz_dims, arg_dict, filter_fn in _LAYERS_TO_TEST: arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()] # pylint: disable=g-complex-comprehension for arguments in itertools.product(*arg_combinations): layer_kwargs = {k: v for k, v in arguments} if filter_fn is not None and not filter_fn(**layer_kwargs): continue name = '_{}_{}'.format(layer_type.__name__, '_'.join('{}_{}'.format(*i) for i in arguments)) cases.append((name, layer_type, inp_shape, fuzz_dims, layer_kwargs)) return cases OUTPUT_TEST_CASES = _gather_test_cases() class CoreLayerIntegrationTest(keras_parameterized.TestCase): """Test that layers and models produce the correct tensor types.""" # In v1 graph there are only symbolic tensors. @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @parameterized.named_parameters(*OUTPUT_TEST_CASES) def test_layer_output_type(self, layer_to_test, input_shape, _, layer_kwargs): layer = layer_to_test(**layer_kwargs) input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32) layer_result = layer(input_data) inp = keras.layers.Input(shape=input_shape, batch_size=2) model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp)) model_result = model(input_data) for x in [layer_result, model_result]: if not isinstance(x, tf.Tensor): raise ValueError('Tensor or EagerTensor expected, got type {}' .format(type(x))) if isinstance(x, tf.__internal__.EagerTensor) != tf.executing_eagerly(): expected_type = (tf.__internal__.EagerTensor if tf.executing_eagerly() else tf.Tensor) raise ValueError('Expected type {}, got type {}' .format(expected_type, type(x))) def _run_fit_eval_predict(self, layer_to_test, input_shape, data_shape, layer_kwargs): batch_size = 2 run_eagerly = testing_utils.should_run_eagerly() def map_fn(_): x = keras.backend.random_uniform(shape=data_shape) y = keras.backend.random_uniform(shape=(1,)) return x, y dataset = tf.data.Dataset.range(4).map(map_fn).batch(batch_size) inp = keras.layers.Input(shape=input_shape, batch_size=batch_size) layer = layer_to_test(**layer_kwargs)(inp) # Condense the output down to a single scalar. layer = keras.layers.Flatten()(layer) layer = keras.layers.Lambda( lambda x: tf.reduce_mean(x, keepdims=True))(layer) layer = keras.layers.Dense(1, activation=None)(layer) model = keras.models.Model(inp, layer) model.compile(loss='mse', optimizer='sgd', run_eagerly=run_eagerly) model.fit(dataset, verbose=2, epochs=2) model.compile(loss='mse', optimizer='sgd', run_eagerly=run_eagerly) model.fit(dataset.repeat(2), verbose=2, epochs=2, steps_per_epoch=2) eval_dataset = tf.data.Dataset.range(4).map(map_fn).batch(batch_size) model.evaluate(eval_dataset, verbose=2) def pred_map_fn(_): return keras.backend.random_uniform(shape=data_shape) pred_dataset = tf.data.Dataset.range(4) pred_dataset = pred_dataset.map(pred_map_fn).batch(batch_size) model.predict(pred_dataset, verbose=2) @keras_parameterized.run_all_keras_modes(always_skip_v1=False) @parameterized.named_parameters(*OUTPUT_TEST_CASES) def test_model_loops(self, layer_to_test, input_shape, fuzz_dims, layer_kwargs): self._run_fit_eval_predict(layer_to_test, input_shape, input_shape, layer_kwargs) if any(fuzz_dims): fuzzed_shape = [] for dim, should_fuzz in zip(input_shape, fuzz_dims): fuzzed_shape.append(None if should_fuzz else dim) self._run_fit_eval_predict(layer_to_test, fuzzed_shape, input_shape, layer_kwargs) if __name__ == '__main__': tf.test.main()
7,704
37.914141
116
py
keras
keras-master/keras/engine/correctness_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for numerical correctness.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import testing_utils class MultiInputSubclassed(keras.Model): """Subclassed Model that adds its inputs and then adds a bias.""" def __init__(self): super(MultiInputSubclassed, self).__init__() self.add = keras.layers.Add() self.bias = testing_utils.Bias() def call(self, inputs): added = self.add(inputs) return self.bias(added) def multi_input_functional(): """Functional Model that adds its inputs and then adds a bias.""" input_1 = keras.Input(shape=(1,)) input_2 = keras.Input(shape=(1,)) input_3 = keras.Input(shape=(1,)) added = keras.layers.Add()([input_1, input_2, input_3]) output = testing_utils.Bias()(added) return keras.Model([input_1, input_2, input_3], output) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class SimpleBiasTest(keras_parameterized.TestCase): def _get_simple_bias_model(self): model = testing_utils.get_model_from_layers([testing_utils.Bias()], input_shape=(1,)) model.compile( keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae', run_eagerly=testing_utils.should_run_eagerly()) return model def test_simple_bias_fit(self): x = np.array([[0.], [1.], [2.]]) y = np.array([[0.5], [2.], [3.5]]) model = self._get_simple_bias_model() history = model.fit(x, y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6]) def test_simple_bias_evaluate(self): x = np.array([[0.], [1.], [2.]]) y = np.array([[1.], [3.], [5.]]) model = self._get_simple_bias_model() loss = model.evaluate(x, y, batch_size=1) self.assertAlmostEqual(loss, 2.) def test_simple_bias_predict(self): x = np.array([[0.], [1.], [2.]]) model = self._get_simple_bias_model() pred = model.predict(x, batch_size=1) self.assertAllClose(x, pred) @keras_parameterized.run_all_keras_modes class MultipleInputTest(keras_parameterized.TestCase): def _get_multiple_input_model(self, subclassed=True): if subclassed: model = MultiInputSubclassed() else: model = multi_input_functional() model.compile( keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae', run_eagerly=testing_utils.should_run_eagerly()) return model @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_fit(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] y = np.array([[12.5], [16.], [19.5]]) model = self._get_multiple_input_model(subclassed) history = model.fit(x, y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6]) @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_evaluate(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] y = np.array([[13.], [17.], [21.]]) model = self._get_multiple_input_model(subclassed) loss = model.evaluate(x, y, batch_size=3) self.assertAlmostEqual(loss, 2.) @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_predict(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] model = self._get_multiple_input_model(subclassed) pred = model.predict(x, batch_size=1) self.assertAllClose(pred, [[12.], [15.], [18.]]) if __name__ == '__main__': tf.test.main()
4,608
31.457746
80
py
keras
keras-master/keras/engine/training_arrays_v1.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Part of the Keras training engine related to plain array data.""" import tensorflow.compat.v2 as tf # pylint: disable=protected-access import functools import numpy as np from keras import backend from keras import callbacks as cbks from keras.distribute import distributed_training_utils_v1 from keras.engine import training_utils_v1 from keras.utils.generic_utils import make_batches from keras.utils.generic_utils import slice_arrays from keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging try: from scipy.sparse import issparse # pylint: disable=g-import-not-at-top except ImportError: issparse = None def model_iteration(model, inputs, targets=None, sample_weights=None, batch_size=None, epochs=1, verbose=1, callbacks=None, val_inputs=None, val_targets=None, val_sample_weights=None, shuffle=True, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, mode=ModeKeys.TRAIN, validation_in_fit=False, prepared_feed_values_from_dataset=False, steps_name='steps', **kwargs): """Loop function for arrays of data with modes TRAIN/TEST/PREDICT. Args: model: Keras Model instance. inputs: Either a list or dictionary of arrays, or a dataset instance. targets: List/dictionary of input arrays. sample_weights: Optional list of sample weight arrays. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of callbacks to be called during training val_inputs: Either a list or dictionary of arrays, or a dataset instance. val_targets: List/dictionary of target arrays. val_sample_weights: Optional list of sample weight arrays. shuffle: Whether to shuffle the data at the beginning of each epoch concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. validation_in_fit: if true, then this method is invoked from within training iteration (for validation). In the case where `val_inputs` is a dataset, this flag indicates that its iterator and feed values are already created so should properly reuse resources. prepared_feed_values_from_dataset: if True, `inputs` is a list of feed tensors returned from `_prepare_feed_values` call on the validation dataset, so do not call it again on `inputs`. Should only be used for inline validation (i.e., only if `validation_in_fit` is also True). steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. **kwargs: Additional arguments for backwards compatibility. Returns: - In TRAIN mode: `History` object. - In TEST mode: Evaluation metrics. - In PREDICT mode: Outputs of the Model called on inputs. Raises: ValueError: in case of invalid arguments. """ # Backwards compatibility. if 'steps' in kwargs: steps_per_epoch = kwargs.pop('steps') if kwargs: raise TypeError('Unknown arguments: %s' % (kwargs,)) # In case we were passed a dataset, we extract symbolic tensors from it. reset_dataset_after_each_epoch = False input_iterator = None is_dataset = isinstance(inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset)) # TODO(fchollet): consider moving `steps_per_epoch` inference to # _standardize_user_data and set reset_dataset_after_each_epoch as an # attribute on the dataset instance. if is_dataset: if steps_per_epoch is None: reset_dataset_after_each_epoch = True steps_per_epoch = training_utils_v1.infer_steps_for_dataset( model, inputs, steps_per_epoch, epochs=epochs, steps_name=steps_name) input_iterator = _get_iterator(inputs, model._distribution_strategy) # Enter tf.distribute.Strategy scope. if model._distribution_strategy: scope = distributed_training_utils_v1.distributed_scope( strategy=model._distribution_strategy, learning_phase=(1 if mode == ModeKeys.TRAIN else 0)) scope.__enter__() use_steps = is_dataset or steps_per_epoch is not None do_validation = val_inputs is not None # Prepare input data. inputs = input_iterator or inputs if validation_in_fit and prepared_feed_values_from_dataset: # When invoking validation in training loop, avoid creating iterator and # list of feed values for the same validation dataset multiple times (which # essentially would call `iterator.get_next()` that slows down execution and # leads to OOM errors eventually. ins = inputs else: ins = _prepare_feed_values(model, inputs, targets, sample_weights, mode) # `ins` is a function when a distribute strategy is used in Eager mode. In # that case `is_dataset` is True. The code branches that have requirements # about the type of `ins` do not trigger in the distributed case. if not is_dataset: num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size, steps_per_epoch) else: num_samples_or_steps = steps_per_epoch # Update sample_weight_mode of the model if sample_weights is specified by the # user. We need to call this function after we have a handle on the inputs # (both numpy arrays and datasets) in order to determine if the user has # specified sample_weights. _update_sample_weight_mode(model, mode, ins) # Get step function and loop type. As part of building the execution # function we recompile the metrics based on the updated # sample_weight_mode value. f = _make_execution_function(model, mode) # Prepare validation data. Hold references to the iterator and the input list # to properly reinitialize and reuse in multiple validation passes. val_iterator = None if isinstance(val_inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset)): if validation_steps is None: # Because we pass an iterator feed instead of a Dataset to the eval # model_iteration() call, it will not trigger the dataset-input path # that determines the number of steps required. To avoid this issue, # set validation_steps here if validation_steps is None. validation_steps = training_utils_v1.infer_steps_for_dataset( model, val_inputs, validation_steps, epochs=epochs, steps_name='validation_steps') val_iterator = _get_iterator(val_inputs, model._distribution_strategy) val_inputs = _prepare_feed_values( model, val_iterator, val_targets, val_sample_weights, ModeKeys.TEST) # Get num steps for printing. val_samples_or_steps = validation_steps else: # Get num samples for printing. val_samples_or_steps = val_inputs and tf.nest.flatten( val_inputs)[0].shape[0] or None if mode == ModeKeys.TRAIN and verbose: _print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset) # Configure callbacks. count_mode = 'steps' if use_steps else 'samples' callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=num_samples_or_steps, count_mode=count_mode, verbose=verbose, mode=mode) # Find beforehand arrays that need sparse-to-dense conversion. if issparse is not None and not use_steps: indices_for_conversion_to_dense = [] feed = _get_model_feed(model, mode) for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)): if issparse(input_data) and not backend.is_sparse(feed_tensor): indices_for_conversion_to_dense.append(i) # Select aggregation method. if mode == ModeKeys.PREDICT: aggregator = training_utils_v1.OutputsAggregator( use_steps, num_samples=None if steps_per_epoch else num_samples_or_steps, steps=steps_per_epoch) else: aggregator = training_utils_v1.MetricsAggregator( use_steps, num_samples=None if steps_per_epoch else num_samples_or_steps, steps=steps_per_epoch) if model._compile_distribution: distributed_training_utils_v1._copy_weights_to_distributed_model( model, mode) callbacks.model.stop_training = False callbacks._call_begin_hook(mode) initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode) for epoch in range(initial_epoch, epochs): if callbacks.model.stop_training: break # Setup work for each epoch epoch_logs = {} if mode != ModeKeys.PREDICT: # Collecting and resetting metrics has non-zero cost and will needlessly # slow down model.predict. model.reset_metrics() if mode == ModeKeys.TRAIN: callbacks.on_epoch_begin(epoch, epoch_logs) if use_steps: # Step-wise loop. if steps_per_epoch is None: # Loop over dataset until `OutOfRangeError` is raised. target_steps = np.inf else: # Loop over dataset for the specified number of steps. target_steps = steps_per_epoch step = 0 while step < target_steps: batch_logs = {'batch': step, 'size': 1} callbacks._call_batch_hook(mode, 'begin', step, batch_logs) # Get outputs. try: # `ins` can be callable in tf.distribute.Strategy + eager case. if not callable(ins) or (model._distribution_strategy and not distributed_training_utils_v1 .is_distributing_by_cloning(model)): actual_inputs = ins else: actual_inputs = ins() batch_outs = f(actual_inputs) except tf.errors.OutOfRangeError: if is_dataset: # The dataset passed by the user ran out of batches. # Now we know the cardinality of the dataset. # If steps_per_epoch was specified, then running out of data is # unexpected, so we stop training and inform the user. if steps_per_epoch: callbacks.model.stop_training = True logging.warning( 'Your dataset ran out of data; interrupting training. ' 'Make sure that your dataset can generate at least ' '`%s * epochs` batches (in this case, %d batches). ' 'You may need to use the repeat() function when ' 'building your dataset.' % (steps_name, steps_per_epoch * epochs)) elif step > 0: steps_per_epoch = step aggregator.steps = steps_per_epoch else: # We ran out of batches while the user passed an iterator (legacy). callbacks.model.stop_training = True logging.warning( 'Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your iterator ' 'can generate at least `%s * epochs` ' 'batches (in this case, %d batches). You may need to' 'use the repeat() function when building your ' 'dataset.' % (steps_name, steps_per_epoch * epochs)) break if not isinstance(batch_outs, list): batch_outs = [batch_outs] if model._distribution_strategy: batch_outs = ( distributed_training_utils_v1._per_replica_aggregate_batch( model._distribution_strategy, batch_outs, model, mode)) # Aggregate results. if step == 0: aggregator.create(batch_outs) aggregator.aggregate(batch_outs) # Callbacks batch end. batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode) callbacks._call_batch_hook(mode, 'end', step, batch_logs) step += 1 if callbacks.model.stop_training: break else: # Sample-wise loop. index_array = np.arange(num_samples_or_steps) if shuffle == 'batch': index_array = training_utils_v1.batch_shuffle(index_array, batch_size) elif shuffle: np.random.shuffle(index_array) batches = make_batches(num_samples_or_steps, batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] # Slice into a batch. if len(batches) == 1: # If we only have one batch, do not slice. This takes care of # composite tensors in non-Dataset modes; we currently don't support # slicing them. # TODO(b/133517906): Add slicing support. ins_batch = ins else: try: if ins and isinstance(ins[-1], int): # Do not slice the training phase flag. ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_arrays(ins, batch_ids) except TypeError: raise TypeError('TypeError while preparing batch. ' 'If using HDF5 input data, ' 'pass shuffle="batch".') # Sparse to dense conversion. if issparse is not None: for i in indices_for_conversion_to_dense: ins_batch[i] = ins_batch[i].toarray() # Callbacks batch_begin. batch_logs = {'batch': batch_index, 'size': len(batch_ids)} callbacks._call_batch_hook(mode, 'begin', batch_index, batch_logs) # Get outputs. batch_outs = f(ins_batch) if not isinstance(batch_outs, list): batch_outs = [batch_outs] # Aggregate results. if batch_index == 0: aggregator.create(batch_outs) aggregator.aggregate(batch_outs, batch_start, batch_end) # Callbacks batch end. batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode) callbacks._call_batch_hook(mode, 'end', batch_index, batch_logs) if callbacks.model.stop_training: break aggregator.finalize() results = aggregator.results epoch_logs = cbks.make_logs(model, epoch_logs, results, mode) if len(results) == 1: results = results[0] # Run the test loop every `validation_freq` epochs during training. if (do_validation and training_utils_v1.should_run_validation(validation_freq, epoch) and not callbacks.model.stop_training): if model._compile_distribution: # Since we create a new clone from the original model we need to copy # the weights back to the original model before we can run validation. distributed_training_utils_v1._copy_weights_to_original_model( model, ModeKeys.TRAIN) val_results = model_iteration( model, val_inputs, targets=val_targets, sample_weights=val_sample_weights, batch_size=batch_size, steps_per_epoch=validation_steps, callbacks=callbacks, verbose=0, mode=ModeKeys.TEST, validation_in_fit=True, prepared_feed_values_from_dataset=(val_iterator is not None), steps_name='validation_steps') if not isinstance(val_results, list): val_results = [val_results] epoch_logs = cbks.make_logs( model, epoch_logs, val_results, mode, prefix='val_') if val_iterator and epoch < epochs - 1: _reinitialize_iterator(val_iterator, model._distribution_strategy) if mode == ModeKeys.TRAIN: # Epochs only apply to `fit`. callbacks.on_epoch_end(epoch, epoch_logs) # Reinitialize dataset iterator for the next epoch. if reset_dataset_after_each_epoch and epoch < epochs - 1: _reinitialize_iterator(input_iterator, model._distribution_strategy) model._successful_loop_finish = True callbacks._call_end_hook(mode) if model._distribution_strategy: if model._compile_distribution: # TODO(priyag, psv): Copy back metrics to the original model as well? distributed_training_utils_v1._copy_weights_to_original_model(model, mode) scope.__exit__(None, None, None) if mode == ModeKeys.TRAIN: return model.history return results def _get_model_feed(model, mode): if mode == ModeKeys.PREDICT: feed = model._feed_inputs else: feed = ( model._feed_inputs + model._feed_targets + model._feed_sample_weights) return feed def _print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset): increment = 'steps' if is_dataset else 'samples' msg = 'Train on {0} {increment}'.format( num_samples_or_steps, increment=increment) if val_samples_or_steps: msg += ', validate on {0} {increment}'.format( val_samples_or_steps, increment=increment) print(msg) def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch): """Returns total number of samples (when training in batch mode) or steps.""" if steps_per_epoch: return steps_per_epoch return training_utils_v1.check_num_samples(ins, batch_size, steps_per_epoch, 'steps_per_epoch') def _prepare_feed_values(model, inputs, targets, sample_weights, mode): """Prepare feed values to the model execution function. Args: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode. """ if model._distribution_strategy: if isinstance(inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset)): inputs = distributed_training_utils_v1.get_iterator( inputs, model._distribution_strategy) def get_distributed_inputs(): return distributed_training_utils_v1._prepare_feed_values( model, inputs, targets, sample_weights, mode) # In the eager case, we want to call the input method per step, so return # a lambda from here that can be called. Note that this is applicable only # in Distribution Strategy case as it follows the same code path for both # eager and graph modes. # TODO(priyag,omalleyt): Either we should move the training DS with # IteratorBase to use training_generator code path, or figure out how to # set a symbolic Iterator out of a Dataset when in eager mode. if tf.executing_eagerly(): return get_distributed_inputs else: return get_distributed_inputs() if isinstance(inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset, tf.compat.v1.data.Iterator)): inputs, targets, sample_weights = model._standardize_user_data( inputs, extract_tensors_from_dataset=True) inputs = training_utils_v1.ModelInputs(inputs).as_list() targets = list(targets or []) sample_weights = list(sample_weights or []) ins = inputs + targets + sample_weights if mode == ModeKeys.TRAIN and not isinstance( backend.symbolic_learning_phase(), int): ins += [True] # Add learning phase value. return ins def _get_iterator(inputs, distribution_strategy=None): if distribution_strategy: return distributed_training_utils_v1.get_iterator( inputs, distribution_strategy) return training_utils_v1.get_iterator(inputs) def _reinitialize_iterator(iterator, distribution_strategy=None): if distribution_strategy: distributed_training_utils_v1.initialize_iterator( iterator, distribution_strategy) else: training_utils_v1.initialize_iterator(iterator) def _make_execution_function(model, mode): """Makes function to run one step of model execution.""" if model._distribution_strategy: return distributed_training_utils_v1._make_execution_function(model, mode) return model._make_execution_function(mode) def _update_sample_weight_mode(model, mode, inputs): """Updates the sample_weight_mode of a given model.""" # Add a quick return to prevent us from calling model._feed_targets that # accesses certain model properties that may not be set in the `PREDICT` mode. if mode == ModeKeys.PREDICT: return sample_weights = None # `inputs` is the model's inputs + targets + sample_weights + # learning phase placeholder if specified. To update the sample_weight_mode # we need to determine if the user has passed sample weights as part of the # input. if not callable(inputs): sample_weights = inputs[len(model._feed_inputs) + len(model._feed_targets):] has_learning_phase_pl = (mode == ModeKeys.TRAIN and not isinstance(backend.symbolic_learning_phase(), int)) if has_learning_phase_pl: sample_weights = sample_weights[:-1] model._update_sample_weight_modes(sample_weights=sample_weights) # Call the DistributionStrategy specific function to update the # sample_weight_mode on the model. if model._distribution_strategy: distributed_training_utils_v1._update_sample_weight_modes(model, mode, sample_weights) # For backwards compatibility for internal users of these loops. fit_loop = functools.partial(model_iteration, mode=ModeKeys.TRAIN) test_loop = functools.partial( model_iteration, mode=ModeKeys.TEST, shuffle=False) predict_loop = functools.partial( model_iteration, mode=ModeKeys.PREDICT, shuffle=False) class ArrayLikeTrainingLoop(training_utils_v1.TrainingLoop): """TrainingLoop that handle inputs like array. This is the default handler for most of the input data types, includes symbolic tensors or Numpy array-like, Datasets and iterators in graph mode (since they generate symbolic tensors). This Function is used to handle model with `run_eagerly` = False. """ def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps_per_epoch, x) x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=True, steps_name='steps_per_epoch', steps=steps_per_epoch, validation_split=validation_split, shuffle=shuffle) if validation_data: val_x, val_y, val_sample_weights = model._prepare_validation_data( validation_data, batch_size, validation_steps) elif validation_split and 0. < validation_split < 1.: (x, y, sample_weights, val_x, val_y, val_sample_weights ) = training_utils_v1.split_training_and_validation_data( x, y, sample_weights, validation_split) else: if validation_steps: raise ValueError('`validation_steps` should not be specified if ' '`validation_data` is None.') val_x, val_y, val_sample_weights = None, None, None return fit_loop( model, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq, steps_name='steps_per_epoch') def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) x, y, sample_weights = model._standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size, check_steps=True, steps_name='steps', steps=steps) return test_loop( model, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) x, _, _ = model._standardize_user_data( x, check_steps=True, steps_name='steps', steps=steps) return predict_loop( model, x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks)
27,795
38.371105
80
py
keras
keras-master/keras/engine/base_preprocessing_layer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the base ProcessingLayer and a subclass that uses Combiners.""" import abc from keras.engine import data_adapter from keras.engine.base_layer import Layer from keras.utils import version_utils import tensorflow.compat.v2 as tf # pylint: disable=g-direct-tensorflow-import from tensorflow.python.eager import context from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls keras_kpl_gauge = tf.__internal__.monitoring.BoolGauge( '/tensorflow/api/keras/layers/preprocessing', 'keras preprocessing layers usage', 'method') @keras_export('keras.layers.experimental.preprocessing.PreprocessingLayer') class PreprocessingLayer(Layer, metaclass=abc.ABCMeta): """Base class for Preprocessing Layers. **Don't use this class directly: it's an abstract base class!** You may be looking for one of the many built-in [preprocessing layers](https://keras.io/guides/preprocessing_layers/) instead. Preprocessing layers are layers whose state gets computed before model training starts. They do not get updated during training. Most preprocessing layers implement an `adapt()` method for state computation. The `PreprocessingLayer` class is the base class you would subclass to implement your own preprocessing layers. """ _must_restore_from_config = True def __init__(self, **kwargs): super(PreprocessingLayer, self).__init__(**kwargs) self._is_compiled = False self._is_adapted = False # Sets `is_adapted=False` when `reset_state` is called. self._reset_state_impl = self.reset_state self.reset_state = self._reset_state_wrapper self._adapt_function = None @property def is_adapted(self): """Whether the layer has been fit to data already.""" return self._is_adapted @doc_controls.do_not_generate_docs def update_state(self, data): """Accumulates statistics for the preprocessing layer. Arguments: data: A mini-batch of inputs to the layer. """ raise NotImplementedError @doc_controls.do_not_generate_docs def reset_state(self): # pylint: disable=method-hidden """Resets the statistics of the preprocessing layer.""" raise NotImplementedError @doc_controls.do_not_generate_docs def finalize_state(self): """Finalize the statistics for the preprocessing layer. This method is called at the end of `adapt` or after restoring a serialized preprocessing layer's state. This method handles any one-time operations that should occur on the layer's state before `Layer.__call__`. """ pass @doc_controls.do_not_generate_docs def make_adapt_function(self): """Creates a function to execute one step of `adapt`. This method can be overridden to support custom adapt logic. This method is called by `PreprocessingLayer.adapt`. Typically, this method directly controls `tf.function` settings, and delegates the actual state update logic to `PreprocessingLayer.update_state`. This function is cached the first time `PreprocessingLayer.adapt` is called. The cache is cleared whenever `PreprocessingLayer.compile` is called. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, retrieve a batch, and update the state of the layer. """ if self._adapt_function is not None: return self._adapt_function def adapt_step(iterator): data = next(iterator) self._adapt_maybe_build(data) self.update_state(data) if self._steps_per_execution.numpy().item() == 1: adapt_fn = adapt_step else: def adapt_fn(iterator): for _ in tf.range(self._steps_per_execution): adapt_step(iterator) if not self._run_eagerly: adapt_fn = tf.function(adapt_fn) self._adapt_function = adapt_fn return self._adapt_function def compile(self, run_eagerly=None, steps_per_execution=None): """Configures the layer for `adapt`. Arguments: run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s logic will not be wrapped in a `tf.function`. Recommended to leave this as `None` unless your `Model` cannot be run inside a `tf.function`. steps_per_execution: Int. Defaults to 1. The number of batches to run during each `tf.function` call. Running multiple batches inside a single `tf.function` call can greatly improve performance on TPUs or small models with a large Python overhead. """ if steps_per_execution is None: steps_per_execution = 1 self._configure_steps_per_execution(steps_per_execution) if run_eagerly is None: run_eagerly = self.dynamic self._run_eagerly = run_eagerly self._is_compiled = True def adapt(self, data, batch_size=None, steps=None): """Fits the state of the preprocessing layer to the data being passed. After calling `adapt` on a layer, a preprocessing layer's state will not update during training. In order to make preprocessing layers efficient in any distribution context, they are kept constant with respect to any compiled `tf.Graph`s that call the layer. This does not affect the layer use when adapting each layer only once, but if you adapt a layer multiple times you will need to take care to re-compile any compiled functions as follows: * If you are adding a preprocessing layer to a `keras.Model`, you need to call `model.compile` after each subsequent call to `adapt`. * If you are calling a preprocessing layer inside `tf.data.Dataset.map`, you should call `map` again on the input `tf.data.Dataset` after each `adapt`. * If you are using a `tf.function` directly which calls a preprocessing layer, you need to call `tf.function` again on your callable after each subsequent call to `adapt`. `tf.keras.Model` example with multiple adapts: >>> layer = tf.keras.layers.experimental.preprocessing.Normalization( ... axis=None) >>> layer.adapt([0, 2]) >>> model = tf.keras.Sequential(layer) >>> model.predict([0, 1, 2]) array([-1., 0., 1.], dtype=float32) >>> layer.adapt([-1, 1]) >>> model.compile() # This is needed to re-compile model.predict! >>> model.predict([0, 1, 2]) array([0., 1., 2.], dtype=float32) `tf.data.Dataset` example with multiple adapts: >>> layer = tf.keras.layers.experimental.preprocessing.Normalization( ... axis=None) >>> layer.adapt([0, 2]) >>> input_ds = tf.data.Dataset.range(3) >>> normalized_ds = input_ds.map(layer) >>> list(normalized_ds.as_numpy_iterator()) [array([-1.], dtype=float32), array([0.], dtype=float32), array([1.], dtype=float32)] >>> layer.adapt([-1, 1]) >>> normalized_ds = input_ds.map(layer) # Re-map over the input dataset. >>> list(normalized_ds.as_numpy_iterator()) [array([0.], dtype=float32), array([1.], dtype=float32), array([2.], dtype=float32)] Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. batch_size: Integer or `None`. Number of samples per state update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). steps: Integer or `None`. Total number of steps (batches of samples) When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps' is None, the epoch will run until the input dataset is exhausted. When passing an infinitely repeating dataset, you must specify the `steps` argument. This argument is not supported with array inputs. """ _disallow_inside_tf_function('adapt') if not version_utils.should_use_v2(): raise RuntimeError('`adapt` is only supported in tensorflow v2.') # pylint: disable=g-doc-exception if not self._is_compiled: self.compile() # Compile with defaults. if self.built: self.reset_state() data_handler = data_adapter.DataHandler( data, batch_size=batch_size, steps_per_epoch=steps, epochs=1, steps_per_execution=self._steps_per_execution, distribute=False) self._adapt_function = self.make_adapt_function() for _, iterator in data_handler.enumerate_epochs(): with data_handler.catch_stop_iteration(): for _ in data_handler.steps(): self._adapt_function(iterator) if data_handler.should_sync: context.async_wait() self.finalize_state() self._is_adapted = True def _reset_state_wrapper(self): """Calls `reset_state` and sets `adapted` to `False`.""" self._reset_state_impl() self._is_adapted = False @tf.__internal__.tracking.no_automatic_dependency_tracking def _configure_steps_per_execution(self, steps_per_execution): self._steps_per_execution = tf.Variable( steps_per_execution, dtype='int64', aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) # TODO(omalleyt): Unify this logic with `Layer._maybe_build`. def _adapt_maybe_build(self, data): if not self.built: try: # If this is a Numpy array or tensor, we can get shape from .shape. # If not, an attribute error will be thrown. data_shape = data.shape data_shape_nones = tuple([None] * len(data.shape)) except AttributeError: # The input has an unknown number of dimensions. data_shape = None data_shape_nones = None # TODO (b/159261555): move this to base layer build. batch_input_shape = getattr(self, '_batch_input_shape', None) if batch_input_shape is None: # Set the number of dimensions. self._batch_input_shape = data_shape_nones self.build(data_shape) self.built = True def _disallow_inside_tf_function(method_name): """Disallow calling a method inside a `tf.function`.""" if tf.inside_function(): error_msg = ( 'Detected a call to `PreprocessingLayer.{method_name}` inside a ' '`tf.function`. `PreprocessingLayer.{method_name} is a high-level ' 'endpoint that manages its own `tf.function`. Please move the call ' 'to `PreprocessingLayer.{method_name}` outside of all enclosing ' '`tf.function`s. Note that you can call a `PreprocessingLayer` ' 'directly on `Tensor`s inside a `tf.function` like: `layer(x)`, ' 'or update its state like: `layer.update_state(x)`.').format( method_name=method_name) raise RuntimeError(error_msg)
11,701
38.400673
106
py
keras
keras-master/keras/engine/base_layer_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TensorFlow 2.0 layer behavior.""" # pylint: disable=g-bad-import-order import tensorflow.compat.v2 as tf import copy import os import numpy as np from keras import backend from keras import combinations from keras import keras_parameterized from keras import layers from keras import regularizers from keras import testing_utils from keras.engine import base_layer from keras.engine import input_layer from keras.engine import sequential from keras.engine import training as training_lib from keras.legacy_tf_layers import core as legacy_core from keras.optimizer_v2 import rmsprop from keras.utils import control_flow_util class DynamicLayer(base_layer.Layer): def __init__(self, dynamic=False, **kwargs): super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs) def call(self, inputs): samples = tf.TensorArray( dtype=tf.float32, size=tf.shape(inputs)[0]) for idx, sample in enumerate(inputs): samples = samples.write(idx, tf.square(sample)) return samples.stack() def compute_output_shape(self, input_shape): return input_shape class InvalidLayer(base_layer.Layer): def call(self, inputs): raise ValueError('You did something wrong!') class BaseLayerTest(keras_parameterized.TestCase): @combinations.generate(combinations.keras_mode_combinations()) def test_layer_instrumentation(self): layer = layers.Add() self.assertTrue(layer._instrumented_keras_api) self.assertTrue(layer._instrumented_keras_layer_class) self.assertFalse(layer._instrumented_keras_model_class) self.assertTrue(base_layer.keras_api_gauge.get_cell('tf.keras.layers.Add')) # Verify this was not instrumented as a legacy layer self.assertFalse( base_layer.keras_api_gauge.get_cell('legacy_layer').value()) base_layer.keras_api_gauge.get_cell('tf.keras.layers.Add').set(False) @combinations.generate(combinations.keras_model_type_combinations()) def test_dynamic_layer(self): model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)], input_shape=(3,)) self.assertEqual(model.dynamic, True) model.compile(rmsprop.RMSprop(0.001), loss='mse') self.assertEqual(model.run_eagerly, True) model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) @combinations.generate(combinations.keras_model_type_combinations()) def test_dynamic_layer_error(self): # Functional Models hit the `dyanamic=True` error during construction. # Subclass Models should just throw the original autograph error during # execution. raised_error = False try: model = testing_utils.get_model_from_layers([DynamicLayer()], input_shape=(3,)) model.compile(rmsprop.RMSprop(0.001), loss='mse') model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) except tf.errors.OperatorNotAllowedInGraphError as e: if 'iterating over `tf.Tensor` is not allowed' in str(e): raised_error = True except TypeError as e: if 'attempting to use Python control flow' in str(e): raised_error = True self.assertTrue(raised_error) @combinations.generate(combinations.keras_model_type_combinations()) def test_dynamic_layer_error_running_in_graph_mode(self): with tf.compat.v1.get_default_graph().as_default(): model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)], input_shape=(3,)) self.assertEqual(model.dynamic, True) # But then you cannot run the model since you're in a graph scope. with self.assertRaisesRegex(ValueError, 'You must enable eager execution'): model.compile(rmsprop.RMSprop(0.001), loss='mse') def test_manual_compute_output_shape(self): class BuildCounter(base_layer.Layer): def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name super(BuildCounter, self).__init__(*args, **kwargs) self.build_counter = 0 def build(self, input_shape): self.build_counter += 1 self.build_shape = input_shape def call(self, inputs): return inputs layer = BuildCounter(dtype=tf.float64) output_shape = layer.compute_output_shape((None, 10)) self.assertEqual(layer.build_counter, 1) self.assertEqual(layer.build_shape.as_list(), [None, 10]) self.assertEqual(output_shape.as_list(), [None, 10]) output_signature = layer.compute_output_signature( tf.TensorSpec(dtype=tf.float64, shape=[None, 10])) self.assertEqual(layer.build_counter, 1) self.assertEqual(layer.build_shape.as_list(), [None, 10]) self.assertEqual(output_signature.dtype, tf.float64) self.assertEqual(output_signature.shape.as_list(), [None, 10]) layer(np.ones((5, 10))) self.assertEqual(layer.build_counter, 1) self.assertEqual(layer.build_shape.as_list(), [None, 10]) def test_dynamic_layer_with_deferred_sequential_model(self): model = sequential.Sequential([DynamicLayer(dynamic=True), layers.Dense(3)]) self.assertEqual(model.dynamic, True) model.compile(rmsprop.RMSprop(0.001), loss='mse') self.assertEqual(model.run_eagerly, True) model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) def test_nested_dynamic_layers_in_eager_mode(self): inputs = input_layer.Input((3,)) outputs = DynamicLayer(dynamic=True)(inputs) inner_model = training_lib.Model(inputs, outputs) self.assertEqual(inner_model.dynamic, True) inputs = input_layer.Input((3,)) x = DynamicLayer(dynamic=True)(inputs) outputs = inner_model(x) model = training_lib.Model(inputs, outputs) self.assertEqual(model.dynamic, True) model.compile(rmsprop.RMSprop(0.001), loss='mse') self.assertEqual(model.run_eagerly, True) model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) def test_dynamic_subclassed_model_no_shape_inference(self): class MyModel(training_lib.Model): def __init__(self): super(MyModel, self).__init__(dynamic=True) self.layer1 = layers.Dense(3) self.layer2 = layers.Dense(3) def call(self, inputs): if tf.reduce_sum(inputs) > 0: return self.layer1(inputs) else: return self.layer2(inputs) model = MyModel() self.assertEqual(model.dynamic, True) model.compile(rmsprop.RMSprop(0.001), loss='mse') self.assertEqual(model.run_eagerly, True) model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3))) self.assertEqual(model.outputs, None) def test_dynamic_subclassed_model_with_shape_inference(self): class MyModel(training_lib.Model): def __init__(self): super(MyModel, self).__init__(dynamic=True) self.layer1 = layers.Dense(3) self.layer2 = layers.Dense(3) def call(self, inputs): if tf.reduce_sum(inputs) > 0: return self.layer1(inputs) else: return self.layer2(inputs) def compute_output_shape(self, input_shape): return tuple(input_shape[:-1].as_list()) + (3,) model = MyModel() self.assertEqual(model.dynamic, True) model.compile(rmsprop.RMSprop(0.001), loss='mse') x, y = np.random.random((2, 3)), np.random.random((2, 3)) model.train_on_batch(x, y) outputs = model(x) self.assertEqual(outputs.shape.as_list(), [2, 3]) def test_deepcopy(self): bias_reg = lambda x: 1e-3 * tf.reduce_sum(x) layer = layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg) # Call the Layer on data to generate regularize losses. layer(tf.ones((1, 10, 10, 3))) self.assertLen(layer.losses, 1) new_layer = copy.deepcopy(layer) self.assertEqual(new_layer.bias_regularizer, bias_reg) self.assertEqual(layer.get_config(), new_layer.get_config()) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_invalid_forward_pass(self): inputs = input_layer.Input((3,)) with self.assertRaisesRegex(ValueError, 'You did something wrong!'): _ = InvalidLayer()(inputs) def test_no_legacy_model(self): inputs = input_layer.Input((1,)) legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0') legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1') layer = legacy_dense_0(inputs) layer = layers.Dense(1)(layer) layer = legacy_dense_1(layer) expected_regex = (r'The following are legacy tf\.layers\.Layers:\n ' '{}\n {}'.format(legacy_dense_0, legacy_dense_1)) with self.assertRaisesRegex(TypeError, expected_regex): _ = training_lib.Model(inputs=[inputs], outputs=[layer]) model = training_lib.Model(inputs=[inputs], outputs=[inputs]) with self.assertRaisesRegex(TypeError, expected_regex): model._insert_layers([legacy_dense_0, legacy_dense_1]) def test_no_legacy_sequential(self): layer = [layers.Dense(1), legacy_core.Dense(1, name='legacy_dense_0')] expected_regex = r'legacy tf\.layers\.Layers:\n {}'.format(layer[1]) with self.assertRaisesRegex(TypeError, expected_regex): _ = sequential.Sequential(layer) with self.assertRaisesRegex(TypeError, expected_regex): _ = sequential.Sequential([input_layer.Input(shape=(4,))] + layer) model = sequential.Sequential() with self.assertRaisesRegex(TypeError, expected_regex): for l in layer: model.add(l) @combinations.generate( combinations.times( combinations.keras_model_type_combinations(), combinations.combine(mode=['graph', 'eager']))) def test_build_with_numpy_data(self): model_layers = [ layers.Dense(3, activation='relu', kernel_initializer='ones'), layers.Dense(1, activation='sigmoid', kernel_initializer='ones') ] model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,)) model(np.zeros((2, 4), dtype='float32')) self.assertTrue(model.built) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_default_add_weight(self): class TestLayer(base_layer.Layer): def __init__(self): super(TestLayer, self).__init__() self.default_weight = self.add_weight() self.weight_without_name = self.add_weight(shape=(3, 4)) self.regularized_weight_without_name = self.add_weight( shape=(3, 4), regularizer='l2') layer = TestLayer() self.assertEqual(layer.default_weight.shape.as_list(), []) self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4]) self.assertEqual(layer.default_weight.dtype.name, 'float32') self.assertEqual(layer.weight_without_name.dtype.name, 'float32') self.assertEqual(len(layer.losses), 1) if not tf.executing_eagerly(): # Cannot access tensor.name in eager execution. self.assertIn('Variable_2/Regularizer', layer.losses[0].name) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_add_weight_by_getter(self): layer = base_layer.Layer() variable = tf.Variable('abc') added = layer.add_weight( dtype=tf.string, getter=lambda *_, **__: variable) self.assertIs(variable, added) @combinations.generate(combinations.keras_mode_combinations(mode=['eager'])) def test_learning_phase_freezing_for_layers(self): class LearningPhaseLayer(base_layer.Layer): def call(self, inputs): return backend.in_train_phase(lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs)) def get_learning_phase_value(): model = sequential.Sequential([LearningPhaseLayer(input_shape=(1,))]) model._run_eagerly = testing_utils.should_run_eagerly() return np.sum(model(np.ones((1, 1)))) self.assertEqual(get_learning_phase_value(), 0) # Test scope. with backend.learning_phase_scope(1): self.assertEqual(get_learning_phase_value(), 1) # The effects of the scope end after exiting it. self.assertEqual(get_learning_phase_value(), 0) # Test setting. backend.set_learning_phase(1) self.assertEqual(get_learning_phase_value(), 1) backend.set_learning_phase(0) self.assertEqual(get_learning_phase_value(), 0) # Cannot be enabled with `run_eagerly=True`, see b/123904578 @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_layer_can_return_variable(self): class ComputeSum(base_layer.Layer): def __init__(self): super(ComputeSum, self).__init__() self.total = tf.Variable( initial_value=tf.zeros((1, 1)), trainable=False) if not tf.executing_eagerly(): backend.get_session().run(self.total.initializer) def call(self, inputs): self.total.assign_add(inputs) return self.total inputs = input_layer.Input(shape=(1,)) model = training_lib.Model(inputs, ComputeSum()(inputs)) model.predict(np.ones((1, 1))) def _get_layer_with_training_arg(self): class TrainingLayer(base_layer.Layer): """A layer with a `training` argument in a defuned `call`.""" @tf.function def call(self, inputs, training=None): if training is None: training = backend.learning_phase() return control_flow_util.smart_cond( training, lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs)) return TrainingLayer() # b/124459427: can't test with `run_eagerly=True` for now. @combinations.generate( combinations.times(combinations.keras_mode_combinations(), combinations.keras_model_type_combinations())) def test_training_arg_in_defun(self): layer = self._get_layer_with_training_arg() model = testing_utils.get_model_from_layers([layer], input_shape=(1,)) model.compile(rmsprop.RMSprop(0.), loss='mae') history = model.fit(np.zeros((1, 1)), np.zeros((1, 1))) self.assertEqual(history.history['loss'][0], 1.) loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1))) self.assertEqual(loss, 0.) # Test that the argument injection performed in `call` is not active # when the argument is passed explicitly. layer = self._get_layer_with_training_arg() inputs = input_layer.Input(shape=(1,)) # Pass `training` by name outputs = layer(inputs, training=False) model = training_lib.Model(inputs, outputs) model.compile(rmsprop.RMSprop(0.), loss='mae') history = model.fit(np.zeros((1, 1)), np.zeros((1, 1))) self.assertEqual(history.history['loss'][0], 0.) @combinations.generate( combinations.times(combinations.keras_mode_combinations(), combinations.keras_model_type_combinations())) def test_raw_variable_assignment(self): class RawVariableLayer(base_layer.Layer): def __init__(self, **kwargs): super(RawVariableLayer, self).__init__(**kwargs) # Test variables in nested structure. self.var_list = [tf.Variable(1.), {'a': tf.Variable(2.)}] def call(self, inputs): return inputs * self.var_list[0] * self.var_list[1]['a'] model = testing_utils.get_model_from_layers([RawVariableLayer()], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 10)) # Checks that variables get initialized. model.fit(x, y, batch_size=2, epochs=2) @combinations.generate(combinations.combine(mode=['eager'])) def test_composite_variable_assignment(self): class Spec(tf.TypeSpec): value_type = property(lambda self: CompositeVariable) def _component_specs(self): pass def _serialize(self): pass def _to_components(self, value): return value._variables def _from_components(self, variable_list): return CompositeVariable(variable_list) class CompositeVariable(tf.__internal__.CompositeTensor): def __init__(self, variable_list): self._variables = variable_list @property def _type_spec(self): return Spec() class CompositeVariableLayer(base_layer.Layer): def __init__(self): super().__init__() self.composite_var = CompositeVariable( [tf.Variable(1.), tf.Variable(2.)]) layer = CompositeVariableLayer() self.assertLen(layer.weights, 2) self.assertIsInstance(layer.weights[0], tf.Variable) self.assertIsInstance(layer.weights[1], tf.Variable) self.assertEqual(self.evaluate(layer.weights[0]), 1.) self.assertEqual(self.evaluate(layer.weights[1]), 2.) def test_exception_if_trainable_not_boolean(self): base_layer.Layer(trainable=True) base_layer.Layer(trainable=tf.constant(True)) base_layer.Layer(trainable=tf.Variable(tf.constant(True))) with self.assertRaisesRegex( TypeError, 'Expected `trainable` argument to be a boolean'): base_layer.Layer(trainable=0) def test_exception_if_dynamic_not_boolean(self): base_layer.Layer(dynamic=True) with self.assertRaisesRegex(TypeError, 'Expected `dynamic` argument to be a boolean'): base_layer.Layer(dynamic=0) def test_exception_if_name_not_string_or_none(self): base_layer.Layer(name=None) base_layer.Layer(name='layer_name') with self.assertRaisesRegex(TypeError, 'Expected `name` argument to be a string'): base_layer.Layer(name=0) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_layer_names(self): inputs = input_layer.Input(shape=[2]) add1 = inputs + inputs add2 = layers.Add()([inputs, inputs]) add3 = inputs + inputs add4 = layers.Add()([inputs, inputs]) model = training_lib.Model(inputs=[inputs], outputs=[add1, add2, add3, add4]) actual_names = [l.name for l in model.layers] graph_names = [ 'input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1' ] eager_names = [ 'input_1', 'tf.__operators__.add', 'add', 'tf.__operators__.add_1', 'add_1' ] for actual, eager, graph in zip(actual_names, graph_names, eager_names): self.assertIn(actual, {eager, graph}) @combinations.generate(combinations.combine(mode=['eager'])) def test_layer_names_after_loading(self): backend.clear_session() # Mimic loading a model that already contained add layers with # name = 'add_1' and 'tf.__operators__.add' layers.Add(name='add_1') layers.Add(name='tf.__operators__.add') inputs = input_layer.Input(shape=[2]) add1 = inputs + inputs add2 = layers.Add()([inputs, inputs]) add3 = inputs + inputs add4 = layers.Add()([inputs, inputs]) model = training_lib.Model( inputs=[inputs], outputs=[add1, add2, add3, add4]) actual_names = [l.name for l in model.layers] # The generated op layer names should have avoided layer names seen in # the loaded model. (This avoiance should not apply to non-op-layers) expected_names = [ 'input_1', 'tf.__operators__.add_1', 'add', 'tf.__operators__.add_2', 'add_1' ] self.assertAllEqual(actual_names, expected_names) def test_add_trainable_weight_on_frozen_layer(self): class TestLayer(base_layer.Layer): def build(self, input_shape): self.w = self.add_weight(shape=(), trainable=True) def call(self, inputs): return self.w * inputs layer = TestLayer() layer.trainable = False layer.build(None) layer.trainable = True self.assertListEqual(layer.trainable_weights, [layer.w]) @combinations.generate( combinations.times(combinations.keras_mode_combinations(), combinations.keras_model_type_combinations())) def test_passing_initial_weights_values(self): kernel_value = np.random.random((10, 2)) layer_with_weights = layers.Dense(2, use_bias=False, weights=[kernel_value]) model = testing_utils.get_model_from_layers([layer_with_weights], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.random.random((3, 10)) out = model.predict(inputs) self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value) self.assertAllClose(out, np.dot(inputs, kernel_value)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_set_weights_and_get_weights(self): layer = layers.Dense(2) layer.build((None, 10)) kernel = np.random.random((10, 2)) bias = np.random.random((2,)) layer.set_weights([kernel, bias]) weights = layer.get_weights() self.assertEqual(len(weights), 2) self.assertAllClose(weights[0], kernel) self.assertAllClose(weights[1], bias) with self.assertRaisesRegex(ValueError, 'but the layer was expecting 2 weights'): layer.set_weights([1, 2, 3]) with self.assertRaisesRegex(ValueError, 'not compatible with provided weight shape'): layer.set_weights([kernel.T, bias]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_set_weights_accepts_output_of_get_weights(self): layer = layers.Layer() layer.add_weight(name='scalar_float', shape=(), dtype=tf.float32) layer.add_weight(name='scalar_string', shape=(), dtype=tf.string, initializer=lambda *a, **k: 'abc') layer.add_weight(name='vector_float', shape=(3,), dtype=tf.float32) layer.add_weight(name='vector_string', shape=(2,), dtype=tf.string, initializer=lambda *a, **k: 2 * ['abc']) layer.set_weights(layer.get_weights()) def test_get_config_error(self): class MyLayer(base_layer.Layer): def __init__(self, my_kwarg='default', **kwargs): super(MyLayer, self).__init__(**kwargs) self.my_kwarg = my_kwarg # `__init__` includes kwargs but `get_config` is not overridden, so # an error should be thrown: with self.assertRaisesRegex(NotImplementedError, 'Layer MyLayer has'): MyLayer('custom').get_config() class MyLayerNew(base_layer.Layer): def __init__(self, my_kwarg='default', **kwargs): super(MyLayerNew, self).__init__(**kwargs) self.my_kwarg = my_kwarg def get_config(self): config = super(MyLayerNew, self).get_config() config['my_kwarg'] = self.my_kwarg return config # Test to make sure that error is not raised if the method call is # from an overridden `get_config`: self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom') class MyLayerNew2(base_layer.Layer): def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs) # Check that if the kwargs in `__init__` are base layer constructor # arguments, no error is thrown: self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_count_params(self): dense = layers.Dense(16) dense.build((None, 4)) self.assertEqual(dense.count_params(), 16 * 4 + 16) dense = layers.Dense(16) with self.assertRaisesRegex(ValueError, 'call `count_params`'): dense.count_params() model = sequential.Sequential(layers.Dense(16)) with self.assertRaisesRegex(ValueError, 'call `count_params`'): model.count_params() dense = layers.Dense(16, input_dim=4) model = sequential.Sequential(dense) self.assertEqual(model.count_params(), 16 * 4 + 16) def test_super_not_called(self): class CustomLayerNotCallingSuper(base_layer.Layer): def __init__(self): pass layer = CustomLayerNotCallingSuper() with self.assertRaisesRegex(RuntimeError, 'You must call `super()'): layer(np.random.random((10, 2))) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_first_arg_not_called_inputs(self): x, y = tf.ones((10, 1)), tf.ones((10, 1)) class ArgLayer(base_layer.Layer): def call(self, x, y): return x + y layer = ArgLayer() out = self.evaluate(layer(x=x, y=y)) self.assertAllClose(out, 2 * np.ones((10, 1))) class KwargLayer(base_layer.Layer): def call(self, x=None, y=None): return x + y layer = KwargLayer() out = self.evaluate(layer(x=x, y=y)) self.assertAllClose(out, 2 * np.ones((10, 1))) with self.assertRaisesRegex(ValueError, 'must always be passed'): layer(y=y) class TFFunctionLayer(base_layer.Layer): @tf.function def call(self, x, y=None): if y is None: return x return x + y layer = TFFunctionLayer() out = self.evaluate(layer(x=x, y=y)) self.assertAllClose(out, 2 * np.ones((10, 1))) def test_build_input_shape(self): class CustomLayer(base_layer.Layer): def build(self, input_shape): self.add_weight('w', shape=input_shape[1:]) super(CustomLayer, self).build(input_shape) layer = CustomLayer() self.assertFalse(layer.built) layer.build([None, 1, 2, 3]) self.assertTrue(layer.built) self.assertEqual([None, 1, 2, 3], layer._build_input_shape) layer = CustomLayer() layer(input_layer.Input((3,))) self.assertTrue(layer.built) self.assertEqual([None, 3], layer._build_input_shape.as_list()) @combinations.generate(combinations.combine(mode=['eager'])) def test_custom_layer_training_arg(self): class CustomLayerNoTrainingArg(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerNoTrainingArg, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs): return self._nested_layer(inputs) class CustomLayerDefaultTrainingMissing(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingMissing, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, training): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 class CustomLayerDefaultTrainingNone(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingNone, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, training=None): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 class CustomLayerDefaultTrainingFalse(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingFalse, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, training=False): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 class CustomLayerDefaultTrainingTrue(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingTrue, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, training=True): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 self._test_custom_layer_training_arg( CustomLayerNoTrainingArg=CustomLayerNoTrainingArg, CustomLayerDefaultTrainingMissing=CustomLayerDefaultTrainingMissing, CustomLayerDefaultTrainingNone=CustomLayerDefaultTrainingNone, CustomLayerDefaultTrainingFalse=CustomLayerDefaultTrainingFalse, CustomLayerDefaultTrainingTrue=CustomLayerDefaultTrainingTrue) @combinations.generate(combinations.combine(mode=['eager'])) def test_custom_layer_training_arg_kwargonly(self): class CustomLayerNoTrainingArg(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerNoTrainingArg, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs): return self._nested_layer(inputs) class CustomLayerDefaultTrainingMissing(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingMissing, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, *, training): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 class CustomLayerDefaultTrainingNone(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingNone, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, *, training=None): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 class CustomLayerDefaultTrainingFalse(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingFalse, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, *, training=False): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 class CustomLayerDefaultTrainingTrue(base_layer.Layer): def __init__(self, nested_layer=None): super(CustomLayerDefaultTrainingTrue, self).__init__() self._nested_layer = nested_layer or tf.identity def call(self, inputs, *, training=True): if training: return self._nested_layer(inputs) else: return self._nested_layer(inputs) * 0.5 self._test_custom_layer_training_arg( CustomLayerNoTrainingArg=CustomLayerNoTrainingArg, CustomLayerDefaultTrainingMissing=CustomLayerDefaultTrainingMissing, CustomLayerDefaultTrainingNone=CustomLayerDefaultTrainingNone, CustomLayerDefaultTrainingFalse=CustomLayerDefaultTrainingFalse, CustomLayerDefaultTrainingTrue=CustomLayerDefaultTrainingTrue) def _test_custom_layer_training_arg(self, # pylint: disable=invalid-name CustomLayerNoTrainingArg, CustomLayerDefaultTrainingMissing, CustomLayerDefaultTrainingNone, CustomLayerDefaultTrainingFalse, CustomLayerDefaultTrainingTrue, # pylint: enable=invalid-name ): x = tf.ones(shape=(1, 1)) # If the layer signature doesn't specify a default training arg, # run it in inference mode when to training arg is passed # to __call__ layer = CustomLayerDefaultTrainingMissing() self.assertAllEqual(layer(x), x * 0.5) self.assertAllEqual(layer(x, training=False), x * 0.5) self.assertAllEqual(layer(x, training=True), x) # If the layer signature specifies `False` as the default training arg, # run it in inference mode when no training arg is passed # to __call__ layer = CustomLayerDefaultTrainingFalse() self.assertAllEqual(layer(x), x * 0.5) self.assertAllEqual(layer(x, training=False), x * 0.5) self.assertAllEqual(layer(x, training=True), x) # If the layer signature specifies `True` as the default training arg, # explicitly run it in training mode when no training arg is passed # to __call__ layer = CustomLayerDefaultTrainingTrue() self.assertAllEqual(layer(x), x) self.assertAllEqual(layer(x, training=False), x * 0.5) self.assertAllEqual(layer(x, training=True), x) # Outer layers/models should set the training context implicitly for all # nested layers, respecting whatever mode the outer layer was run with. layer = CustomLayerDefaultTrainingTrue(CustomLayerDefaultTrainingFalse()) # No outer value passed: use local defaults self.assertAllEqual(layer(x), x) # Use outer default True # Outer value passed: override local defaults self.assertAllEqual(layer(x, training=False), x * 0.25) self.assertAllEqual(layer(x, training=True), x) layer = CustomLayerDefaultTrainingFalse(CustomLayerDefaultTrainingTrue()) # No outer value passed: use local defaults self.assertAllEqual(layer(x), x * 0.25) # Use outer default False # Outer value passed: override local defaults self.assertAllEqual(layer(x, training=False), x * 0.25) self.assertAllEqual(layer(x, training=True), x) # If the outer layer `call` doesn't take a training argument at all, # it'll set the nested scope as None when no training arg is passed in. # If a training arg is passed in it won't use it directly in `call`, but # it will set the nested training mode. layer = CustomLayerNoTrainingArg(CustomLayerDefaultTrainingTrue()) self.assertAllEqual(layer(x), x) # Use local default True self.assertAllEqual(layer(x, training=False), x * 0.5) self.assertAllEqual(layer(x, training=True), x) layer = CustomLayerDefaultTrainingNone(CustomLayerDefaultTrainingTrue()) self.assertAllEqual(layer(x), x * 0.5) # Nested use local default True self.assertAllEqual(layer(x, training=False), x * 0.25) self.assertAllEqual(layer(x, training=True), x) def test_activity_regularizer_string(self): class MyLayer(base_layer.Layer): pass layer = MyLayer(activity_regularizer='l2') self.assertIsInstance(layer.activity_regularizer, regularizers.L2) def test_tf_module_tracking(self): class MyModule(tf.Module): def __init__(self): super(MyModule, self).__init__() self.v1 = tf.Variable(1., trainable=True, name='v1') self.v2 = tf.Variable(2., trainable=False, name='v2') def __call__(self, x): return x * self.v1 * self.v2 class MyLayer(base_layer.Layer): def __init__(self, **kwargs): super(MyLayer, self).__init__(**kwargs) self.my_modules = {} self.my_modules['a'] = MyModule() def call(self, x): return self.my_modules['a'](x) layer = MyLayer() self.assertLen(layer.variables, 2) self.assertLen(layer.trainable_variables, 1) self.assertLen(layer.non_trainable_variables, 1) layer.trainable = False self.assertLen(layer.variables, 2) self.assertLen(layer.trainable_variables, 0) self.assertLen(layer.non_trainable_variables, 2) class MyModel(training_lib.Model): def __init__(self): super(MyModel, self).__init__() self.my_modules = [] self.my_modules.append(MyModule()) def call(self, x): return self.my_modules[0](x) model = MyModel() self.assertLen(model.variables, 2) self.assertLen(model.trainable_variables, 1) self.assertLen(model.non_trainable_variables, 1) model.trainable = False self.assertLen(model.variables, 2) self.assertLen(model.trainable_variables, 0) self.assertLen(model.non_trainable_variables, 2) class SymbolicSupportTest(keras_parameterized.TestCase): def test_using_symbolic_tensors_with_tf_ops(self): # Single-input. x = input_layer.Input((3,)) tf.square(x) # Multi-inputs. x1, x2 = input_layer.Input((3,)), input_layer.Input((3,)) tf.concat([x1, x2], axis=1) # Mixing Keras symbolic tensors and graph tensors from the same graph works. with backend.get_graph().as_default(): x1 = input_layer.Input((3,)) x2 = input_layer.Input((3,)) tf.matmul(x1, x2) # Creating same op type (matmul) multiple times in the Keras graph works. x1 = input_layer.Input((3,)) x2 = input_layer.Input((3,)) tf.matmul(x1, x2) def test_mixing_eager_and_graph_tensors(self): with tf.Graph().as_default(): x1 = tf.ones((3, 3)) x2 = tf.ones((3, 3)) with self.assertRaisesRegex(TypeError, 'Graph tensors'): tf.matmul(x1, x2) def test_mixing_numpy_arrays_and_graph_tensors(self): with tf.Graph().as_default(): x1 = tf.ones((3, 3)) x2 = np.ones((3, 3), dtype='float32') with self.assertRaisesRegex(TypeError, 'Graph tensors'): tf.matmul(x1, x2) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_mixing_keras_symbolic_tensors_and_eager_tensors(self): x1 = input_layer.Input((3,)) x2 = tf.ones((3, 3)) y = tf.matmul(x1, x2) fn = backend.function(inputs=[x1], outputs=[y]) x_val = np.random.random((3, 3)) y_val = np.ones((3, 3)) self.assertAllClose(fn([x_val])[0], np.matmul(x_val, y_val), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self): x1 = input_layer.Input((3,)) x2 = np.ones((3, 3), dtype='float32') y = tf.matmul(x1, x2) fn = backend.function(inputs=[x1], outputs=[y]) x_val = np.random.random((3, 3)) y_val = np.ones((3, 3)) self.assertAllClose(fn([x_val])[0], np.matmul(x_val, y_val), atol=1e-5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_reraising_exception(self): # When layer is not dynamic, we have some pattern matching during exception # handling to detect when the user is trying to use python control flow. # When an exception is thrown but the pattern doesn't match, we want to # preserve the originating stack trace. An early implementation of this # logic lost the stack trace. We test the correct behavior here. class TypeErrorLayer(base_layer.Layer): def call(self, inputs): def easily_identifiable_name(): raise TypeError('Non-matching TypeError message.') easily_identifiable_name() inputs = input_layer.Input((3,)) try: _ = TypeErrorLayer()(inputs) except TypeError as e: self.assertIn('easily_identifiable_name', str(e)) # pylint: disable=g-assert-in-except @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_summaries_in_tf_function(self): if not tf.executing_eagerly(): return class MyLayer(base_layer.Layer): def call(self, inputs): tf.summary.scalar('mean', tf.reduce_mean(inputs)) return inputs tmp_dir = self.get_temp_dir() writer = tf.summary.create_file_writer(tmp_dir) with writer.as_default(step=1), tf.summary.record_if(True): my_layer = MyLayer() x = tf.ones((10, 10)) def my_fn(x): return my_layer(x) _ = my_fn(x) event_file = tf.compat.v1.gfile.Glob(os.path.join(tmp_dir, 'events*')) self.assertLen(event_file, 1) event_file = event_file[0] tags = set() for e in tf.compat.v1.train.summary_iterator(event_file): for val in e.summary.value: tags.add(val.tag) self.assertEqual(set(['my_layer/mean']), tags) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_error_when_passing_non_tensor(self): # layers that have an `input_spec` will raise an error when called on # non-tensors. This covers all built-in layers. layer = layers.Dense(3) x = object() with self.assertRaisesRegex(TypeError, r'should be tensors'): layer(x) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class NestedTrackingTest(tf.test.TestCase): def test_nested_layer_variable_tracking(self): # Test that variables from nested sublayers are # being tracked by subclassed layers. class MyLayer(base_layer.Layer): def __init__(self): super(MyLayer, self).__init__() self.dense1 = layers.Dense(1) self.dense2 = layers.BatchNormalization() def build(self, input_shape): self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list()) self.v2 = tf.Variable( name='v2', initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'), trainable=False) def call(self, inputs): x = self.dense1(inputs) + self.dense2(inputs) return x + self.v1 + self.v2 layer = MyLayer() inputs = input_layer.Input((1,)) _ = layer(inputs) self.assertEqual(len(layer.weights), 8) self.assertEqual(len(layer.trainable_weights), 5) self.assertEqual(len(layer.non_trainable_weights), 3) layer.dense1.trainable = False self.assertEqual(len(layer.weights), 8) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 5) layer.trainable = False self.assertEqual(len(layer.weights), 8) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.non_trainable_weights), 8) self.assertEqual( {id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]}, {id(v) for _, v in layer._checkpoint_dependencies}) def test_nested_layer_updates_losses_tracking(self): # Test that updates and losses from nested sublayers are # being tracked by subclassed layers. class UpdateAndLossLayer(base_layer.Layer): def build(self, _): self.v1 = self.add_weight('v1', shape=()) def call(self, inputs): self.add_loss(tf.reduce_sum(inputs)) self.add_update(tf.compat.v1.assign_add(self.v1, 1)) return inputs + 1 class MyLayer(base_layer.Layer): def build(self, _): self.v1 = self.add_weight('v1', shape=()) def __init__(self): super(MyLayer, self).__init__() self.ul1 = UpdateAndLossLayer() self.ul2 = UpdateAndLossLayer() def call(self, inputs): self.add_loss(tf.reduce_sum(inputs)) self.add_update(tf.compat.v1.assign_add(self.v1, 1)) x = self.ul1(inputs) return self.ul2(x) layer = MyLayer() if tf.executing_eagerly(): inputs = tf.ones((3, 1)) _ = layer(inputs) self.assertEqual(len(layer.losses), 3) self.assertLen(layer.get_losses_for(None), 3) else: inputs = input_layer.Input((1,)) _ = layer(inputs) self.assertEqual(len(layer.losses), 3) self.assertEqual(len(layer.updates), 3) self.assertLen(layer.get_losses_for(None), 3) def test_attribute_reassignment(self): l = base_layer.Layer() l.a = base_layer.Layer() l.a = [] l.a = tf.Variable(1.) l.a = base_layer.Layer() last_assignment = base_layer.Layer() l.a = last_assignment l.b = tf.Variable(1.) del l.b l.c = base_layer.Layer() del l.c l.d = last_assignment del l.d sublayers = list(l._flatten_layers(include_self=False, recursive=False)) self.assertEqual([last_assignment], sublayers) self.assertEqual([], l.trainable_weights) self.assertEqual([], l.non_trainable_weights) self.assertEqual([], l.weights) del l.a self.assertEqual([], l._self_tracked_trackables) def test_layer_class_not_tracked_as_sublayer(self): # See https://github.com/tensorflow/tensorflow/issues/27431 for details. class LayerWithClassAttribute(base_layer.Layer): def __init__(self): super(LayerWithClassAttribute, self).__init__() self.layer_fn = layers.Dense layer = LayerWithClassAttribute() self.assertEmpty(layer.variables) self.assertEmpty(layer.submodules) def test_layer_call_fn_args(self): class NonDefunLayer(base_layer.Layer): def call(self, inputs, a, mask, b=None, training=None): return inputs class DefunLayer(base_layer.Layer): @tf.function def call(self, x, mask, a, training=None, b=None): return x nondefun_layer = NonDefunLayer() self.assertEqual(nondefun_layer._call_fn_args, ['inputs', 'a', 'mask', 'b', 'training']) defun_layer = DefunLayer() self.assertEqual(defun_layer._call_fn_args, ['x', 'mask', 'a', 'training', 'b']) def test_sequential_model(self): model = sequential.Sequential( [layers.Dense(10, input_shape=(10,)), layers.Dense(5)]) self.assertLen(model.layers, 2) self.assertLen(model.weights, 4) # Make sure a subclass model also works when it is called 'Sequential'. class Sequential(training_lib.Model): def __init__(self): super(Sequential, self).__init__() self.dense_layers = [layers.Dense(10), layers.Dense(5)] def call(self, inputs): x = inputs for d in self.dense_layers: x = d(x) return x s = Sequential() self.assertLen(s.layers, 2) self.assertLen(s.weights, 0) s(input_layer.Input((10,))) self.assertLen(s.weights, 4) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class NameScopingTest(keras_parameterized.TestCase): def test_name_scope_layer(self): x = backend.placeholder(shape=(10, 10)) layer = layers.Dense(10, name='MyName') layer(x) self.assertEqual(layer.bias.name, 'MyName/bias:0') self.assertEqual(layer.kernel.name, 'MyName/kernel:0') def test_name_scope_functional_api(self): inputs = input_layer.Input((3,)) layer = layers.Dense(10, name='MyName') _ = layer(inputs) self.assertEqual(layer.bias.name, 'MyName/bias:0') self.assertEqual(layer.kernel.name, 'MyName/kernel:0') def test_name_scope_functional_api_nested(self): class NestedLayer(base_layer.Layer): def __init__(self, name='OuterName'): super(NestedLayer, self).__init__(name=name) self.dense = layers.Dense(10, name='InnerName') def call(self, inputs): return self.dense(inputs) inputs = input_layer.Input((3,)) layer = NestedLayer() _ = layer(inputs) self.assertEqual(layer.dense.bias.name, 'OuterName/InnerName/bias:0') self.assertEqual(layer.dense.kernel.name, 'OuterName/InnerName/kernel:0') def test_name_scope_sublayer(self): class NameScopeTracker(base_layer.Layer): def call(self, inputs): self.active_name_scope = tf.__internal__.get_name_scope() return inputs x = backend.placeholder(shape=(10, 10)) sublayer = NameScopeTracker(name='Sublayer') layer = layers.Dense(10, activation=sublayer, name='MyName2') layer(x) self.assertEqual(layer.bias.name, 'MyName2/bias:0') self.assertEqual(layer.kernel.name, 'MyName2/kernel:0') self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer') def test_name_scope_tf_tensor(self): x = tf.convert_to_tensor(np.ones((10, 10))) layer = layers.Dense( 10, activation=layers.ReLU(name='MyAct'), name='MyName3') layer(x) self.assertEqual(layer.bias.name, 'MyName3/bias:0') self.assertEqual(layer.kernel.name, 'MyName3/kernel:0') @testing_utils.run_v2_only def test_apply_name_scope_on_model_declaration(self): if not tf.executing_eagerly(): self.skipTest('`apply_name_scope_on_model_declaration` API is supported' ' only for V2 eager') base_layer._apply_name_scope_on_model_declaration(True) inputs = input_layer.Input((3,)) x = layers.Dense(10, name='Dense1')(inputs) with tf.name_scope('outer'): x = layers.Dense(10, name='Dense2')(x) with tf.name_scope('inner'): x = layers.Dense(10, name='Dense3')(x) x = layers.Dense(10, name='Dense4')(x) outputs = layers.Dense(10, name='Dense5')(x) model = training_lib.Model(inputs, outputs) node_names = self._get_model_node_names(model, np.random.random((1, 3)), 'call_scope') self.assertListEqual(node_names, [ 'call_scope/Const', 'call_scope/model/Cast', 'call_scope/model/Dense1/MatMul/ReadVariableOp/resource', 'call_scope/model/Dense1/MatMul/ReadVariableOp', 'call_scope/model/Dense1/MatMul', 'call_scope/model/Dense1/BiasAdd/ReadVariableOp/resource', 'call_scope/model/Dense1/BiasAdd/ReadVariableOp', 'call_scope/model/Dense1/BiasAdd', 'call_scope/model/outer/Dense2/MatMul/ReadVariableOp/resource', 'call_scope/model/outer/Dense2/MatMul/ReadVariableOp', 'call_scope/model/outer/Dense2/MatMul', 'call_scope/model/outer/Dense2/BiasAdd/ReadVariableOp/resource', 'call_scope/model/outer/Dense2/BiasAdd/ReadVariableOp', 'call_scope/model/outer/Dense2/BiasAdd', 'call_scope/model/outer/inner/Dense3/MatMul/ReadVariableOp/resource', 'call_scope/model/outer/inner/Dense3/MatMul/ReadVariableOp', 'call_scope/model/outer/inner/Dense3/MatMul', 'call_scope/model/outer/inner/Dense3/BiasAdd/ReadVariableOp/resource', 'call_scope/model/outer/inner/Dense3/BiasAdd/ReadVariableOp', 'call_scope/model/outer/inner/Dense3/BiasAdd', 'call_scope/model/outer/Dense4/MatMul/ReadVariableOp/resource', 'call_scope/model/outer/Dense4/MatMul/ReadVariableOp', 'call_scope/model/outer/Dense4/MatMul', 'call_scope/model/outer/Dense4/BiasAdd/ReadVariableOp/resource', 'call_scope/model/outer/Dense4/BiasAdd/ReadVariableOp', 'call_scope/model/outer/Dense4/BiasAdd', 'call_scope/model/Dense5/MatMul/ReadVariableOp/resource', 'call_scope/model/Dense5/MatMul/ReadVariableOp', 'call_scope/model/Dense5/MatMul', 'call_scope/model/Dense5/BiasAdd/ReadVariableOp/resource', 'call_scope/model/Dense5/BiasAdd/ReadVariableOp', 'call_scope/model/Dense5/BiasAdd', 'Identity', 'NoOp' ]) base_layer._apply_name_scope_on_model_declaration(False) def _get_model_node_names(self, model, inputs, call_name_scope): """Returns a list of model's node names.""" @tf.function() def wrapper(): with tf.name_scope(call_name_scope): return model(inputs) return [ node.name for node in wrapper.get_concrete_function().graph.as_graph_def().node ] @combinations.generate(combinations.keras_mode_combinations(mode=['eager'])) class AutographControlFlowTest(keras_parameterized.TestCase): def test_disabling_in_context_is_matched(self): test_obj = self class MyLayer(base_layer.Layer): def call(self, inputs, training=None): with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'): if tf.constant(False): return inputs * 1. return inputs * 0. @tf.function(autograph=False) def test_fn(): return MyLayer()(tf.constant([[1., 2., 3.]])) test_fn() def test_if_training_pattern_output(self): class MyLayer(base_layer.Layer): def call(self, inputs, training=None): if training: return inputs * 1. return inputs * 0. inputs = input_layer.Input((3,)) outputs = MyLayer()(inputs) model = training_lib.Model(inputs, outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(train_loss, 0.) test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(test_loss, 1.) def test_if_training_pattern_loss(self): class MyLayer(base_layer.Layer): def call(self, inputs, training=None): if training: loss = tf.reduce_sum(inputs) else: loss = 0. self.add_loss(loss) return inputs inputs = input_layer.Input((3,)) outputs = MyLayer()(inputs) model = training_lib.Model(inputs, outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(train_loss, 2 * 3) test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(test_loss, 0) def test_if_training_pattern_metric(self): class MyLayer(base_layer.Layer): def call(self, inputs, training=None): if training: metric = tf.reduce_sum(inputs) else: metric = 0. self.add_metric(metric, name='my_metric', aggregation='mean') return inputs inputs = input_layer.Input((3,)) outputs = MyLayer()(inputs) model = training_lib.Model(inputs, outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) for _ in range(3): _, train_metric = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(train_metric, 2 * 3) _, test_metric = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(test_metric, 0) def test_if_training_pattern_update(self): class MyLayer(base_layer.Layer): def build(self, input_shape): self.counter = self.add_weight( shape=(), trainable=False, initializer='zeros') def call(self, inputs, training=None): if training: increment = 1. else: increment = 0. self.counter.assign_add(increment) return inputs inputs = input_layer.Input((3,)) layer = MyLayer() outputs = layer(inputs) model = training_lib.Model(inputs, outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.train_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(backend.get_value(layer.counter), 1.) def test_conditional_losses_in_call(self): class MyLayer(base_layer.Layer): def __init__(self): super(MyLayer, self).__init__(dynamic=testing_utils.should_run_eagerly()) def call(self, inputs, training=None): if training: self.add_loss(tf.reduce_sum(inputs)) return inputs def compute_output_shape(self, input_shape): return input_shape inputs = input_layer.Input((3,)) layer = MyLayer() outputs = layer(inputs) model = training_lib.Model(inputs, outputs) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(loss, 2 * 3) def test_conditional_callable_losses(self): model = sequential.Sequential([ layers.Dense( 1, kernel_regularizer=regularizers.l2(1e-4), input_shape=(1,)) ]) model._run_eagerly = testing_utils.should_run_eagerly() def assert_graph(t): if not tf.executing_eagerly(): self.assertEqual(t.graph, tf.compat.v1.get_default_graph()) @tf.function def get_losses(t): if t < 0: return tf.reduce_sum(model.losses) * t else: return tf.reduce_sum(model.losses) assert_graph(get_losses(tf.constant(2.))) assert_graph(get_losses(tf.constant(0.5))) def test_conditional_metrics_in_call(self): class MyLayer(base_layer.Layer): def __init__(self): super(MyLayer, self).__init__(dynamic=testing_utils.should_run_eagerly()) def call(self, inputs, training=None): if training: self.add_metric(tf.reduce_sum(inputs), name='sum', aggregation='mean') return inputs def compute_output_shape(self, input_shape): return input_shape inputs = input_layer.Input((3,)) layer = MyLayer() outputs = layer(inputs) model = training_lib.Model(inputs, outputs) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(np.ones((2, 3)), np.ones((2, 3))) self.assertEqual(history.history['sum'][-1], 2 * 3) def test_conditional_activity_regularizer_in_call(self): class TestModel(training_lib.Model): def __init__(self): super(TestModel, self).__init__( name='test_model', dynamic=testing_utils.should_run_eagerly()) self.layer = layers.Dense(2, activity_regularizer='l2') def call(self, x, training=None): if tf.greater(tf.reduce_sum(x), 0.0): return self.layer(x) else: return self.layer(x) model = TestModel() model.compile( loss='mse', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) if testing_utils.should_run_eagerly(): model.fit(x, y, epochs=2, batch_size=5) else: with self.assertRaisesRegex(ValueError, 'ActivityRegularizer'): model.fit(x, y, epochs=2, batch_size=5) def test_conditional_activity_regularizer_with_wrappers_in_call(self): class TestModel(training_lib.Model): def __init__(self): super(TestModel, self).__init__( name='test_model', dynamic=testing_utils.should_run_eagerly()) self.layer = layers.TimeDistributed( layers.Dense(2, activity_regularizer='l2'), input_shape=(3, 4)) def call(self, x, training=None): if tf.greater(tf.reduce_sum(x), 0.0): return self.layer(x) else: return self.layer(x) model = TestModel() model.compile( loss='mse', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 3, 4)) y = np.ones(shape=(10, 3, 2)) if testing_utils.should_run_eagerly(): model.fit(x, y, epochs=2, batch_size=5) else: with self.assertRaisesRegex(ValueError, 'ActivityRegularizer'): model.fit(x, y, epochs=2, batch_size=5) class AddLayer(base_layer.Layer): """A layer which adds its input to a variable. Useful for testing a layer with a variable """ def build(self, _): self.v = self.add_weight('v', (), initializer='ones') self.built = True def call(self, inputs): return inputs + self.v class IdentityLayer(base_layer.Layer): """A layer that returns its input. Useful for testing a layer without a variable. """ def call(self, inputs): return inputs @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class DTypeTest(keras_parameterized.TestCase): def _const(self, dtype): return tf.constant(1, dtype=dtype) @testing_utils.enable_v2_dtype_behavior def test_dtype_defaults_to_floatx(self): layer = AddLayer() self.assertEqual(layer.dtype, 'float32') layer(self._const('float64')) self.assertEqual(layer.dtype, 'float32') # dtype should not change try: backend.set_floatx('float64') layer = AddLayer() self.assertEqual(layer.dtype, 'float64') finally: backend.set_floatx('float32') @testing_utils.enable_v2_dtype_behavior def test_passing_dtype_to_constructor(self): layer = IdentityLayer(dtype='float64') layer(self._const('float32')) self.assertEqual(layer.dtype, 'float64') layer = IdentityLayer(dtype='int32') layer(self._const('float32')) self.assertEqual(layer.dtype, 'int32') layer = IdentityLayer(dtype=tf.float64) layer(self._const('float32')) self.assertEqual(layer.dtype, 'float64') @testing_utils.enable_v2_dtype_behavior def input_cast_to_dtype(self): layer = AddLayer() # Input should be cast to layer.dtype, so output should also be layer.dtype self.assertEqual(layer(self._const('float64')).dtype, 'float32') layer = AddLayer(dtype='float64') self.assertEqual(layer(self._const('float32')).dtype, 'float64') # Test inputs are not casted if layer.dtype is not floating-point layer = IdentityLayer(dtype='int32') self.assertEqual(layer(self._const('float64')).dtype, 'float64') # Test inputs are not casted if the inputs are not floating-point layer = IdentityLayer(dtype='float32') self.assertEqual(layer(self._const('int32')).dtype, 'int32') # Test Numpy arrays are casted layer = IdentityLayer(dtype='float64') self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64') # Test Python floats are casted layer = IdentityLayer(dtype='float64') self.assertEqual(layer(1.).dtype, 'float64') @testing_utils.enable_v2_dtype_behavior def multiple_inputs_cast_to_dtype(self): class MultiIdentityLayer(base_layer.Layer): def call(self, inputs): return [tf.identity(x) for x in inputs] # Testing layer with default dtype of float32 layer = MultiIdentityLayer() x, y = layer([self._const('float16'), self._const('float32')]) self.assertEqual(x.dtype, 'float32') self.assertEqual(y.dtype, 'float32') # Test passing dtype to the constructor layer = MultiIdentityLayer(dtype='float64') x, y = layer([self._const('float16'), self._const('float32')]) self.assertEqual(x.dtype, 'float64') self.assertEqual(y.dtype, 'float64') # Test several non-floating point types layer = MultiIdentityLayer(dtype='float64') x, y, z, w = layer([self._const('float16'), self._const('bool'), self._const('float64'), self._constant('complex64')]) self.assertEqual(x.dtype, 'float64') self.assertEqual(y.dtype, 'bool') self.assertEqual(z.dtype, 'float64') self.assertEqual(w.dtype, 'complex64') @testing_utils.enable_v2_dtype_behavior def test_extra_args_and_kwargs_not_casted(self): class IdentityLayerWithArgs(base_layer.Layer): def call(self, inputs, *args, **kwargs): kwargs.pop('training', None) return tf.nest.flatten([inputs, args, kwargs]) layer = IdentityLayerWithArgs(dtype='float64') x, y, z = layer(self._const('float16'), self._const('float16'), kwarg=self._const('float16')) self.assertEqual(x.dtype, 'float64') self.assertEqual(y.dtype, 'float16') self.assertEqual(z.dtype, 'float16') @testing_utils.enable_v2_dtype_behavior def test_layer_without_autocast(self): class IdentityLayerWithoutAutocast(IdentityLayer): def __init__(self, *args, **kwargs): kwargs['autocast'] = False super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs) layer = IdentityLayerWithoutAutocast(dtype='float64') self.assertEqual(layer(self._const('float32')).dtype, 'float32') @testing_utils.enable_v2_dtype_behavior def test_compute_output_signature(self): class IdentityLayerWithOutputShape(IdentityLayer): def compute_output_shape(self, input_shape): return input_shape layer = IdentityLayerWithOutputShape(dtype='float64') output_signature = layer.compute_output_signature( tf.TensorSpec(shape=(), dtype='float32')) self.assertEqual(output_signature.shape, ()) self.assertEqual(output_signature.dtype, 'float64') @testing_utils.enable_v2_dtype_behavior def test_composite_tensors_input_casting(self): sparse = tf.SparseTensor( indices=tf.constant([[0, 1], [2, 3]], dtype='int64'), values=tf.constant([0., 1.], dtype='float32'), dense_shape=tf.constant([4, 4], dtype='int64')) ragged = tf.RaggedTensor.from_row_splits( values=tf.constant([1., 2., 3.], dtype='float32'), row_splits=tf.constant([0, 2, 2, 3], dtype='int64')) layer = IdentityLayer(dtype='float16') for x in sparse, ragged: self.assertEqual(x.dtype, 'float32') y = layer(x) self.assertEqual(y.dtype, 'float16') self.assertEqual(type(x), type(y)) @testing_utils.enable_v2_dtype_behavior def test_passing_non_tensor(self): layer = IdentityLayer() x = object() y = layer(x) # Layer should not cast 'x', as it's not a tensor self.assertIs(x, y) @testing_utils.disable_v2_dtype_behavior def test_v1_behavior(self): # Test dtype defaults to None and inferred from input layer = IdentityLayer() self.assertIsNone(layer.dtype) layer(self._const('float64')) self.assertEqual(layer.dtype, 'float64') # Test layer does not cast to dtype self.assertEqual(layer(self._const('float32')).dtype, 'float32') if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
65,468
34.331355
106
py
keras
keras-master/keras/engine/sequential_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests specific to `Sequential` model.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from tensorflow.python.framework import test_util from keras import keras_parameterized from keras import testing_utils class TestSequential(keras_parameterized.TestCase): """Most Sequential model API tests are covered in `training_test.py`. """ @keras_parameterized.run_all_keras_modes def test_basic_methods(self): model = keras.models.Sequential() model.add(keras.layers.Dense(1, input_dim=2)) model.add(keras.layers.Dropout(0.3, name='dp')) model.add(keras.layers.Dense(2, kernel_regularizer='l2', kernel_constraint='max_norm')) self.assertEqual(len(model.layers), 3) self.assertEqual(len(model.weights), 2 * 2) self.assertEqual(model.get_layer(name='dp').name, 'dp') @keras_parameterized.run_all_keras_modes def test_input_defined_first_layer(self): model = keras.models.Sequential() model.add(keras.Input(shape=(2,), name='input_layer')) model.add(keras.layers.Dense(1)) model.add(keras.layers.Dropout(0.3, name='dp')) model.add(keras.layers.Dense(2, kernel_regularizer='l2', kernel_constraint='max_norm')) self.assertLen(model.layers, 3) self.assertLen(model.weights, 2 * 2) self.assertEqual(model.get_layer(name='dp').name, 'dp') @keras_parameterized.run_all_keras_modes def test_single_layer_in_init(self): model = keras.models.Sequential(keras.layers.Dense(1)) self.assertLen(model.layers, 1) @keras_parameterized.run_all_keras_modes def test_sequential_pop(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = testing_utils.get_small_sequential_mlp( num_hidden, num_classes, input_dim) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) model.pop() self.assertEqual(len(model.layers), 1) self.assertEqual(model.output_shape, (None, num_hidden)) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) y = np.random.random((batch_size, num_hidden)) model.fit(x, y, epochs=1) # Test popping single-layer model model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.pop() self.assertEqual(model.layers, []) self.assertEqual(model.outputs, None) # Invalid use case model = keras.models.Sequential() with self.assertRaises(TypeError): model.pop() @keras_parameterized.run_all_keras_modes def test_sequential_deferred_build_with_np_arrays(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(len(model.layers), 2) with self.assertRaisesRegex( ValueError, 'Weights for model .* have not yet been created'): len(model.weights) self.assertFalse(model.built) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) self.assertTrue(model.built) self.assertEqual(len(model.weights), 2 * 2) @keras_parameterized.run_all_keras_modes def test_sequential_deferred_build_with_dataset_iterators(self): num_hidden = 5 input_dim = 3 num_classes = 2 num_samples = 50 steps_per_epoch = 10 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(len(model.layers), 2) with self.assertRaisesRegex( ValueError, 'Weights for model .* have not yet been created'): len(model.weights) self.assertFalse(model.built) x = tf.ones((num_samples, input_dim)) y = tf.zeros((num_samples, num_classes)) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=steps_per_epoch) self.assertTrue(model.built) self.assertEqual(len(model.weights), 2 * 2) # TODO(kaftan) This test fails w/ run_with_all_keras_modes. File ticket @parameterized.parameters((True,), (False,)) def test_training_and_eval_methods_on_symbolic_tensors(self, deferred): with tf.Graph().as_default(), self.cached_session(): def get_model(): if deferred: model = testing_utils.get_small_sequential_mlp(10, 4) else: model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3) model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) return model inputs = keras.backend.zeros(shape=(10, 3)) targets = keras.backend.zeros(shape=(10, 4)) model = get_model() model.fit(inputs, targets, epochs=10, steps_per_epoch=30) model = get_model() model.evaluate(inputs, targets, steps=2, verbose=0) model = get_model() model.predict(inputs, steps=2) model = get_model() model.train_on_batch(inputs, targets) model = get_model() model.test_on_batch(inputs, targets) model = get_model() model.fit( inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2) @keras_parameterized.run_all_keras_modes def test_invalid_use_cases(self): # Added objects must be layer instances with self.assertRaises(TypeError): model = keras.models.Sequential() model.add(None) @keras_parameterized.run_all_keras_modes def test_nested_sequential_trainability(self): input_dim = 20 num_units = 10 num_classes = 2 inner_model = keras.models.Sequential() inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,))) model = keras.models.Sequential() model.add(inner_model) model.add(keras.layers.Dense(num_classes)) self.assertEqual(len(model.layers), 2) self.assertEqual(len(model.trainable_weights), 4) inner_model.trainable = False self.assertEqual(len(model.trainable_weights), 2) inner_model.trainable = True self.assertEqual(len(model.trainable_weights), 4) @keras_parameterized.run_all_keras_modes def test_sequential_update_disabling(self): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) model = keras.models.Sequential() model.add(keras.layers.BatchNormalization(input_shape=(4,))) model.trainable = False model.compile('sgd', 'mse') x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile('sgd', 'mse') model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 @keras_parameterized.run_all_keras_modes def test_sequential_deferred_build_serialization(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) self.assertFalse(model.built) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.train_on_batch(x, y) self.assertTrue(model.built) config = model.get_config() new_model = keras.models.Sequential.from_config(config) new_model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) new_model.train_on_batch(x, y) self.assertEqual(len(new_model.layers), 2) self.assertEqual(len(new_model.weights), 4) @keras_parameterized.run_all_keras_modes def test_sequential_shape_inference_deferred(self): model = testing_utils.get_small_sequential_mlp(4, 5) output_shape = model.compute_output_shape((None, 7)) self.assertEqual(tuple(output_shape.as_list()), (None, 5)) @keras_parameterized.run_all_keras_modes def test_sequential_build_deferred(self): model = testing_utils.get_small_sequential_mlp(4, 5) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 4) # Test with nested model model = testing_utils.get_small_sequential_mlp(4, 3) inner_model = testing_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 8) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_sequential_deferred_manual_build(self): model = testing_utils.get_small_sequential_mlp(4, 5) self.assertFalse(model.built) model(tf.zeros([1, 2])) self.assertTrue(model.built) model.compile( 'rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5))) @keras_parameterized.run_all_keras_modes def test_sequential_nesting(self): model = testing_utils.get_small_sequential_mlp(4, 3) inner_model = testing_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @test_util.run_v1_only('Behavior changed in V2.') def test_variable_names_deferred(self): model = keras.models.Sequential([keras.layers.Dense(3)]) model.add(keras.layers.Dense(2)) model(tf.ones([2, 4])) # Note that for regular sequential models (wrapping graph network), # the layers' weights are built # without the model name as prefix (because the Functional API __call__ # reset the name scope). This is fixable, but it would be # backwards incompatible. self.assertEqual( ['sequential/dense/kernel:0', 'sequential/dense/bias:0', 'sequential/dense_1/kernel:0', 'sequential/dense_1/bias:0'], [v.name for v in model.variables]) @keras_parameterized.run_all_keras_modes def test_input_assumptions_propagation(self): model = keras.models.Sequential() model.add(keras.layers.Dense(1)) if tf.executing_eagerly(): with self.assertRaisesRegex(ValueError, 'expected min_ndim=2, found ndim=0'): model(1.0) @keras_parameterized.run_all_keras_modes def test_string_input(self): seq = keras.Sequential([ keras.layers.InputLayer(input_shape=(1,), dtype=tf.string), keras.layers.Lambda(lambda x: x[0]) ]) seq.run_eagerly = testing_utils.should_run_eagerly() preds = seq.predict([['tensorflow eager']]) self.assertEqual(preds.shape, (1,)) @keras_parameterized.run_all_keras_modes def test_multi_output_layer_not_accepted(self): class MultiOutputLayer(keras.layers.Layer): def call(self, inputs): return inputs, inputs with self.assertRaisesRegex(ValueError, 'should have a single output tensor'): keras.Sequential([MultiOutputLayer(input_shape=(3,))]) with self.assertRaisesRegex(ValueError, 'should have a single output tensor'): keras.Sequential([ keras.layers.Dense(1, input_shape=(3,)), MultiOutputLayer()]) # Should also raise error in a deferred build mode with self.assertRaisesRegex(ValueError, 'should have a single output tensor'): keras.Sequential([MultiOutputLayer()])(np.zeros((10, 10))) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_layer_add_after_compile_deferred(self): model = keras.Sequential([keras.layers.Dense(3)]) self.assertFalse(model.built) model.compile('adam', loss='mse') model.fit(np.random.random((1, 3)), np.random.random((1, 3))) self.assertTrue(model.built) model.add(keras.layers.Dense(3)) model.compile('adam', loss='mse') model.fit(np.random.random((1, 3)), np.random.random((1, 3))) self.assertTrue(model.built) def test_sequential_layer_tracking(self): """Test that Sequential only tracks layers added in init or `.add`.""" layer = keras.layers.Dense(1) model = keras.Sequential([layer]) self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[-1], layer) model.a = [keras.layers.Dense(3)] # should not be added to the layers list. self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[-1], layer) layer2 = keras.layers.Dense(2) model.add(layer2) self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[-1], layer2) model.a = [keras.layers.Dense(3)] # should not be added to the layers list. self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[-1], layer2) model.pop() self.assertEqual( list(model._flatten_layers(include_self=False, recursive=False))[-1], layer) def test_config_preserves_input_layer(self): model = keras.Sequential([ keras.Input((None,), name='my_embedding_input', dtype='int32'), keras.layers.Embedding(32, 32), keras.layers.Dense(3), ]) config = model.get_config() new_model = keras.Sequential.from_config(config) self.assertTrue(new_model.built) layers = list( new_model._flatten_layers(include_self=False, recursive=False)) self.assertEqual(layers[0].dtype, 'int32') self.assertEqual(layers[0].name, 'my_embedding_input') def test_name_unicity(self): model = keras.Sequential() model.add(keras.layers.Dense(3, name='specific_name')) with self.assertRaisesRegex(ValueError, 'should have unique names'): model.add(keras.layers.Dense(3, name='specific_name')) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_tf_module_call(self): class MyModule(tf.Module): def __init__(self): self.v = tf.Variable(2.) def __call__(self, x): return self.v * x model = keras.Sequential() model.add(MyModule()) model.compile('sgd', 'mse') x, y = np.ones((10, 1)), np.ones((10, 1)) model.fit(x, y, batch_size=2) self.assertLen(model.trainable_variables, 1) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_tf_module_training(self): class MyModule(tf.Module): def __init__(self): self.v = tf.Variable(2.) def call(self, x, training=None): # training should be set by Sequential. assert training is not None return self.v * x model = keras.Sequential() model.add(MyModule()) model.compile('sgd', 'mse') x, y = np.ones((10, 1)), np.ones((10, 1)) model.fit(x, y, batch_size=2) self.assertLen(model.trainable_variables, 1) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_tf_module_error(self): class MyModule(tf.Module): def __init__(self): self.v = tf.Variable(2.) model = keras.Sequential() with self.assertRaisesRegex(ValueError, 'is not defined'): model.add(MyModule()) class TestSequentialEagerIntegration(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_defun_on_call(self): # Check that one can subclass Sequential and place the `call` in a `defun`. class MySequential(keras.Sequential): def __init__(self, name=None): super(MySequential, self).__init__(name=name) self.call = tf.function(self.call) model = MySequential() model.add(keras.layers.Dense(4, activation='relu')) model.add(keras.layers.Dense(5, activation='softmax')) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @keras_parameterized.run_all_keras_modes def test_build_before_fit(self): # Fix for b/112433577 model = testing_utils.get_small_sequential_mlp(4, 5) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) model.build((None, 6)) x = np.random.random((2, 6)) y = np.random.random((2, 5)) model.fit(x, y, epochs=1) @keras_parameterized.run_all_keras_modes def test_build_empty_network(self): x = np.random.random((2, 6)) y = np.random.random((2, 5)) model = keras.Sequential() # Make sure an empty sequential model can still work with build(). model.build((None, 6)) self.assertTrue(model.built) model.add(keras.layers.Dense(5, input_shape=(6,))) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y) model.pop() self.assertFalse(model.built) model.build((None, 6)) self.assertTrue(model.built) if __name__ == '__main__': tf.test.main()
18,841
32.114236
80
py
keras
keras-master/keras/engine/saving.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Model saving utilities. Everything has been moved to keras/saving/. This file will be deleted soon. """ from keras.saving import * # pylint: disable=wildcard-import
895
39.727273
80
py
keras
keras-master/keras/engine/training_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import tensorflow.compat.v2 as tf import collections import io import tempfile import sys from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util as tf_test_util from keras import backend from keras import combinations from keras import keras_parameterized from keras import layers as layers_module from keras import losses from keras import metrics as metrics_module from keras import optimizer_v2 from keras import testing_utils from keras.callbacks import Callback from keras.engine import input_layer from keras.engine import sequential from keras.engine import training as training_module from keras.engine import training_utils_v1 from keras.utils import data_utils from keras.utils import np_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.rmsprop import RMSPropOptimizer try: import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top except ImportError: scipy_sparse = None class TrainingTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_model_instrumentation(self): layers = [ layers_module.Dense(10, dtype=np.float64), layers_module.Dense(10, dtype=np.float64) ] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) self.assertTrue(model._instrumented_keras_api) self.assertTrue(model._instrumented_keras_model_class) self.assertFalse(model._instrumented_keras_layer_class) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_fit_training_arg(self): class ReturnTraining(layers_module.Layer): def call(self, inputs, training): if training: return inputs + tf.constant([100], 'float32') else: return inputs + tf.constant([0], 'float32') model = sequential.Sequential([ReturnTraining()]) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) hist = model.fit(x=np.array([0.]), y=np.array([0.])) self.assertAllClose(hist.history['loss'][0], 10000) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_fit_on_empty(self): model = sequential.Sequential([layers_module.Dense(1)]) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) with self.assertRaisesRegex(ValueError, 'Unexpected result of `train_function`.*'): model.fit(x=np.array([]), y=np.array([])) @keras_parameterized.run_all_keras_modes def test_run_eagerly_setting(self): model = sequential.Sequential([layers_module.Dense(1)]) run_eagerly = testing_utils.should_run_eagerly() model.compile('sgd', 'mse', run_eagerly=run_eagerly) self.assertEqual(model.run_eagerly, run_eagerly) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @parameterized.named_parameters( ('train_on_batch', 'train_on_batch'), ('test_on_batch', 'test_on_batch'), ('predict_on_batch', 'predict_on_batch'), ('fit', 'fit'), ('evaluate', 'evaluate'), ('predict', 'predict'), ) def test_disallow_methods_inside_tf_function(self, method_name): model = sequential.Sequential([layers_module.Dense(1)]) run_eagerly = testing_utils.should_run_eagerly() model.compile('sgd', 'mse', run_eagerly=run_eagerly) @tf.function def my_fn(): getattr(model, method_name)(1) error_msg = 'inside a `tf.function`' with self.assertRaisesRegex(RuntimeError, error_msg): my_fn() @keras_parameterized.run_all_keras_modes def test_fit_and_validate_learning_phase(self): class ReturnTraining(layers_module.Layer): def call(self, inputs): return backend.in_train_phase(lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs)) model = sequential.Sequential([ReturnTraining(input_shape=(2,))]) model.compile( 'sgd', loss='mae', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.ones((40, 2), dtype=np.float32) targets = np.ones((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) # The training loss should be 0.0 self.assertAllClose(history.history['loss'][0], 0.0) # The validation loss should be 1.0. self.assertAllClose(history.history['val_loss'][0], 1.0) @keras_parameterized.run_all_keras_modes def test_fit_and_validate_training_arg(self): class ReturnTraining(layers_module.Layer): def call(self, inputs, training=None): return backend.in_train_phase( lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs), training=training) model = sequential.Sequential([ReturnTraining(input_shape=(2,))]) model.compile( 'sgd', loss='mae', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.ones((40, 2), dtype=np.float32) targets = np.ones((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) # The training loss should be 0.0 self.assertAllClose(history.history['loss'][0], 0.0) # The validation loss should be 1.0. self.assertAllClose(history.history['val_loss'][0], 1.0) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_target_dtype_matches_output(self): def loss_fn(labels, preds): self.assertEqual(labels.dtype, preds.dtype) return labels - preds layers = [ layers_module.Dense(10, dtype=np.float64), layers_module.Dense(10, dtype=np.float64) ] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) inputs = np.ones(shape=(10, 1), dtype=np.float64) targets = np.ones(shape=(10, 1), dtype=np.float64) model.compile( 'sgd', loss=loss_fn, run_eagerly=testing_utils.should_run_eagerly()) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) self.assertEqual(model.predict(inputs).dtype, np.float64) @keras_parameterized.run_all_keras_modes def test_fit_and_validate_nested_training_arg(self): class NestedReturnTraining(layers_module.Layer): def call(self, inputs, training=None): return backend.in_train_phase( lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs), training=training) class ReturnTraining(layers_module.Layer): def __init__(self, input_shape=None, **kwargs): super(ReturnTraining, self).__init__(input_shape=input_shape, **kwargs) self._nested_layer = None def build(self, input_shape): self._nested_layer = NestedReturnTraining() self.built = True def call(self, inputs): return self._nested_layer(inputs) model = sequential.Sequential([ReturnTraining(input_shape=(2,))]) model.compile( 'sgd', loss='mae', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.ones((40, 2), dtype=np.float32) targets = np.ones((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit( train_dataset, epochs=2, verbose=1, validation_data=val_dataset) # The training loss should be 0.0 self.assertAllClose(history.history['loss'][0], 0.0) # The validation loss should be 1.0. self.assertAllClose(history.history['val_loss'][0], 1.0) @keras_parameterized.run_with_all_model_types(exclude_models='sequential') @keras_parameterized.run_all_keras_modes def test_fit_on_arrays(self): input_a = layers_module.Input(shape=(3,), name='input_a') input_b = layers_module.Input(shape=(3,), name='input_b') dense = layers_module.Dense(4, name='dense') dropout = layers_module.Dropout(0.5, name='dropout') branch_a = [input_a, dense] branch_b = [input_b, dense, dropout] model = testing_utils.get_multi_io_model(branch_a, branch_b) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=[metrics_module.CategoricalAccuracy(), 'mae'], loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly()) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) # Test fit at different verbosity model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=0) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=1) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=2, batch_size=5, verbose=2) model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np]) # Test with validation data model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], validation_data=([input_a_np, input_b_np], [output_d_np, output_e_np]), epochs=1, batch_size=5, verbose=0) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], validation_data=([input_a_np, input_b_np], [output_d_np, output_e_np]), epochs=2, batch_size=5, verbose=1) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], validation_data=([input_a_np, input_b_np], [output_d_np, output_e_np]), epochs=2, batch_size=5, verbose=2) # Test with validation split model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=2, batch_size=5, verbose=0, validation_split=0.2) if testing_utils.get_model_type() == 'functional': # Test with dictionary inputs model.fit( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, epochs=1, batch_size=5, verbose=0) model.fit( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, epochs=1, batch_size=5, verbose=1) model.fit( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }), epochs=1, batch_size=5, verbose=0) model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }) # Test with lists for loss, metrics loss = ['mae', 'mse'] model.compile( optimizer, loss, metrics=[metrics_module.CategoricalAccuracy(), 'mae'], run_eagerly=testing_utils.should_run_eagerly()) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=0) # Test with dictionaries for loss, metrics, loss weights if testing_utils.get_model_type() == 'functional': loss = {'dense': 'mse', 'dropout': 'mae'} loss_weights = {'dense': 1., 'dropout': 0.5} metrics = { 'dense': 'mse', 'dropout': metrics_module.CategoricalAccuracy() } model.compile( optimizer, loss, metrics=metrics, loss_weights=loss_weights, run_eagerly=testing_utils.should_run_eagerly()) model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5, verbose=0) # Build single-input model x = layers_module.Input(shape=(3,), name='input_a') y = layers_module.Dense(4)(x) model = training_module.Model(x, y) model.compile( optimizer, loss='mse', run_eagerly=testing_utils.should_run_eagerly()) # This will work model.fit([input_a_np], output_d_np, epochs=1) # Test model on a list of floats input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 4)) # Test execution on inputs that are lists of scalars. # TF2 and TF1 have slightly different semantics: if tf.executing_eagerly(): # In TF2 to avoid any ambiguity when there are nested lists # the entire input gets converted to a # single numpy array (& it only works in the case of a single io model) model.fit(np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np), epochs=2, batch_size=5, verbose=2) else: # In TF1 there was logic to try disambiguating between the individual # inputs when lists are nested. This allowed multi-io functional models # to support lists of scalars as input, but it caused ambiguity issues # for subclass models & made it trickier to pass multi-dimensional inputs # as lists of scalars to single io models. This was an excessive amount # of complexity for what boiled down to a convenience method we were # mainly just using for writing tests. model.fit([np.ndarray.tolist(input_a_np)], [np.ndarray.tolist(input_b_np)], epochs=2, batch_size=5, verbose=2) @keras_parameterized.run_all_keras_modes def test_evaluate_predict_on_arrays(self): a = layers_module.Input(shape=(3,), name='input_a') b = layers_module.Input(shape=(3,), name='input_b') dense = layers_module.Dense(4, name='dense') c = dense(a) d = dense(b) e = layers_module.Dropout(0.5, name='dropout')(c) model = training_module.Model([a, b], [d, e]) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=['mae', metrics_module.CategoricalAccuracy()], loss_weights=loss_weights, sample_weight_mode=None, run_eagerly=testing_utils.should_run_eagerly()) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) # Test evaluate at different verbosity out = model.evaluate( [input_a_np, input_b_np], [output_d_np, output_e_np], batch_size=5, verbose=0) self.assertEqual(len(out), 7) out = model.evaluate( [input_a_np, input_b_np], [output_d_np, output_e_np], batch_size=5, verbose=1) self.assertEqual(len(out), 7) out = model.evaluate( [input_a_np, input_b_np], [output_d_np, output_e_np], batch_size=5, verbose=2) self.assertEqual(len(out), 7) out = model.test_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np]) self.assertEqual(len(out), 7) # Test evaluate with dictionary inputs model.evaluate( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, batch_size=5, verbose=0) model.evaluate( { 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense': output_d_np, 'dropout': output_e_np }, batch_size=5, verbose=1) # Test predict out = model.predict([input_a_np, input_b_np], batch_size=5) self.assertEqual(len(out), 2) out = model.predict({'input_a': input_a_np, 'input_b': input_b_np}) self.assertEqual(len(out), 2) out = model.predict_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }) self.assertEqual(len(out), 2) def _make_sequence_input_functions(self, input_type): # train and test xy_namedtuple = collections.namedtuple('xy_namedtuple', ['x', 'y']) # predict x_namedtuple = collections.namedtuple('x_namedtuple', ['x']) if input_type == 'dataset': dataset = tf.data.Dataset.range(16).map( lambda _: tf.ones(shape=(1,))) xy_dataset = tf.data.Dataset.zip((dataset, dataset)).batch(4) x_dataset = dataset.batch(4) def xy_function(use_namedtuple): return xy_dataset.map(xy_namedtuple) if use_namedtuple else xy_dataset def x_function(use_namedtuple): return x_dataset.map(x_namedtuple) if use_namedtuple else x_dataset return xy_function, x_function elif input_type == 'generator': def xy_generator(use_namedtuple): x, y = np.ones((4, 1)), np.ones((4, 1)) for _ in range(4): if use_namedtuple: yield xy_namedtuple(x, y) else: yield x, y def x_generator(use_namedtuple): x = np.ones((4, 1)) for _ in range(4): if use_namedtuple: yield x_namedtuple(x) else: yield x return xy_generator, x_generator elif input_type == 'sequence': class XYSequence(data_utils.Sequence): def __init__(self, use_namedtuple): self._use_namedtuple = use_namedtuple super(XYSequence, self).__init__() def __getitem__(self, idx): x, y = np.ones((4, 1)), np.ones((4, 1)) if self._use_namedtuple: return xy_namedtuple(x, y) return x, y def __len__(self): return 4 class XSequence(data_utils.Sequence): def __init__(self, use_namedtuple): self._use_namedtuple = use_namedtuple super(XSequence, self).__init__() def __getitem__(self, idx): x = np.ones((4, 1)) if self._use_namedtuple: return x_namedtuple(x) return x def __len__(self): return 4 return XYSequence, XSequence @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @keras_parameterized.run_with_all_model_types @parameterized.named_parameters( ('dataset', 'dataset'), ('generator', 'generator'), ('sequence', 'sequence'), ) def test_sequence_input_types(self, input_type): """Ensure that namedtuples and tuples are plumbed identically.""" if not tf.executing_eagerly(): self.skipTest('Improved checking is only present in data_adapter.') xy_function, x_function = self._make_sequence_input_functions(input_type) fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {} if input_type == 'generator': fit_kwargs['steps_per_epoch'] = 4 evaluate_kwargs['steps'] = 4 predict_kwargs['steps'] = 4 model = testing_utils.get_small_mlp(1, 1, 1) model.compile( loss='mse', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) model.fit(xy_function(use_namedtuple=False), **fit_kwargs) model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs) model.predict(x_function(use_namedtuple=False), **predict_kwargs) @keras_parameterized.run_all_keras_modes def test_custom_mapping_in_config(self): class MyModel(training_module.Model): def call(self, inputs): return inputs def get_config(self): self.a = {} return {'a': self.a} model = MyModel() self.assertIn('{"a": {}}', model.to_json()) def test_training_on_sparse_data_with_dense_placeholders_v1(self): with tf.Graph().as_default(): if scipy_sparse is None: return test_inputs = [ scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2) ] test_outputs = [ scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5) ] in1 = layers_module.Input(shape=(3,)) in2 = layers_module.Input(shape=(3,)) out1 = layers_module.Dropout(0.5, name='dropout')(in1) out2 = layers_module.Dense(4, name='dense_1')(in2) model = training_module.Model([in1, in2], [out1, out2]) model.predict(test_inputs, batch_size=2) optimizer = 'rmsprop' model.compile( optimizer, 'mse', metrics=['mae', metrics_module.CategoricalAccuracy()]) model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5) model.evaluate(test_inputs, test_outputs, batch_size=2) @keras_parameterized.run_all_keras_modes def test_compile_with_sparse_placeholders(self): inputs = layers_module.Input(shape=(10,), sparse=True) weights = tf.Variable( np.ones((10, 1)).astype(np.float32), name='weights') weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights) output_layer = layers_module.Lambda(weights_mult)(inputs) model = training_module.Model([inputs], output_layer) model.compile( loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) @keras_parameterized.run_all_keras_modes def test_that_trainable_disables_updates(self): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) a = layers_module.Input(shape=(4,)) layer = layers_module.BatchNormalization(input_shape=(4,)) b = layer(a) model = training_module.Model(a, b) model.trainable = False if not tf.compat.v1.executing_eagerly_outside_functions(): self.assertEmpty(model.updates) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) if not tf.compat.v1.executing_eagerly_outside_functions(): self.assertEmpty(model.updates) x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) if not tf.compat.v1.executing_eagerly_outside_functions(): self.assertAllGreater(len(model.updates), 0) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 layer.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) if not tf.compat.v1.executing_eagerly_outside_functions(): self.assertEmpty(model.updates) x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) def test_weight_deduplication_in_methods(self): inp = layers_module.Input(shape=(1,)) bn = layers_module.BatchNormalization() d = layers_module.Dense(1) m0 = training_module.Model(inp, d(bn(inp))) m1 = training_module.Model(inp, d(bn(inp))) x0 = m0(inp) x1 = m1(inp) x = layers_module.Add()([x0, x1]) model = training_module.Model(inp, x) self.assertLen(model.trainable_weights, 4) self.assertLen(model.non_trainable_weights, 2) self.assertLen(model.weights, 6) @keras_parameterized.run_all_keras_modes def test_weight_deduplication(self): class WatchingLayer(layers_module.Layer): def __init__(self, dense_to_track): # This will cause the kernel and bias to be double counted, effectively # doubling the learning rate if weights are not deduped. self._kernel = dense_to_track.kernel self._bias = dense_to_track.bias super(WatchingLayer, self).__init__() inp = layers_module.Input(shape=(1,)) dense_layer = layers_module.Dense(1) dense_output = dense_layer(inp) # This will build the dense kernel # Deterministically set weights to make the test repeatable. dense_layer.set_weights([np.ones((1, 1)), np.zeros((1,))]) output = WatchingLayer(dense_layer)(dense_output) model = training_module.Model(inp, output) # 0.25 is the edge of the radius of convergence for the double apply case. # At lr=0.24, the double apply case will very slowly descend while the # correct case will drop very quickly. model.compile( loss='mse', optimizer=optimizer_v2.gradient_descent.SGD(0.24), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((64 * 2,)) y = 4.5 * x - 3. history = model.fit(x, y, batch_size=64, epochs=2, verbose=2) # If the gradient apply is duplicated then the loss after 2 epochs will # be ~0.15, compared to the correct answer of O(1e-7). self.assertLess(history.history['loss'][-1], 1e-6) @keras_parameterized.run_all_keras_modes def test_weight_shared_across_layers(self): class AddWeightLayer(layers_module.Layer): def __init__(self, trainable_var, non_trainable_var): self.trainable_var = trainable_var self.non_trainable_var = non_trainable_var super(AddWeightLayer, self).__init__() def call(self, inputs): return inputs + self.trainable_var class LayerWithWeightSharedLayers(layers_module.Layer): def __init__(self): super(LayerWithWeightSharedLayers, self).__init__() shared_trainable_var = tf.Variable(1.) shared_non_trainable_var = tf.Variable( 1., trainable=False) self.layer1 = AddWeightLayer(shared_trainable_var, shared_non_trainable_var) self.layer2 = AddWeightLayer(shared_trainable_var, shared_non_trainable_var) def call(self, inputs): return self.layer2(self.layer1(inputs)) l = LayerWithWeightSharedLayers() layers = list(l._flatten_layers(include_self=False, recursive=False)) self.assertEqual(layers, [l.layer1, l.layer2]) self.assertEqual(l.variables, [l.layer1.trainable_var, l.layer1.non_trainable_var]) self.assertEqual(l.trainable_variables, [l.layer1.trainable_var]) self.assertEqual(l.non_trainable_variables, [l.layer1.non_trainable_var]) self.assertLen(l.get_weights(), 2) @keras_parameterized.run_all_keras_modes def test_weight_tracking_for_template(self): def variable_scoped_function(trainable=True): return tf.compat.v1.get_variable( 'dummy', shape=[1], trainable=trainable, initializer=tf.compat.v1.zeros_initializer()) def nested_template(): nested1 = tf.compat.v1.make_template('nested', variable_scoped_function) nested2 = tf.compat.v1.make_template('nested', variable_scoped_function) v1 = nested1() v2 = nested2() # nested1 and nested2 should not share variables self.assertIsNot(v1, v2) # Variables created by nested1 should be isolated from variables # created by nested2. self.assertEqual(1, len(nested1.variables)) self.assertEqual(1, len(nested2.variables)) self.assertIs(nested1.variables[0], v1) self.assertIs(nested2.variables[0], v2) self.assertEqual(1, len(nested1.trainable_variables)) self.assertEqual(1, len(nested2.trainable_variables)) self.assertIs(nested1.trainable_variables[0], v1) self.assertIs(nested2.trainable_variables[0], v2) self.assertEqual(len(nested1.non_trainable_variables), 0) self.assertEqual(len(nested2.non_trainable_variables), 0) return v1, v2 tmpl1 = tf.compat.v1.make_template('s1', nested_template) tmpl2 = tf.compat.v1.make_template('s1', nested_template) v1, v2 = tmpl1() v5, v6 = tmpl2() model = training_module.Model() model.template = tmpl1 self.assertEqual(2, len(model.variables)) self.assertIs(model.variables[0], v1) self.assertIs(model.variables[1], v2) self.assertEqual(2, len(model.variables)) self.assertIs(model.trainable_variables[0], v1) self.assertIs(model.trainable_variables[1], v2) self.assertEqual(len(model.non_trainable_variables), 0) model.templates = [tmpl2] for v, w in zip(model.variables, [v1, v2, v5, v6]): self.assertIs(v, w) for v, w in zip(model.trainable_variables, [v1, v2, v5, v6]): self.assertIs(v, w) self.assertEqual(len(model.non_trainable_variables), 0) # Make sure losses, layers, and updates aren't broken by having a Template # in the mix, which does not expose any updates or losses. self.assertEqual([], model.layers) self.assertEqual([], model.updates) self.assertEqual([], model.losses) self.assertEqual([], model.templates.layers) self.assertEqual([], model.templates.updates) self.assertEqual([], model.templates.losses) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_logs_passed_to_callbacks(self): input_dim = 5 num_classes = 1 class TestCallback(Callback): def __init__(self): super(TestCallback, self).__init__() self.epoch_end_logs = None self.batch_end_logs = None self.epoch_end_call_count = 0 self.batch_end_call_count = 0 def on_epoch_end(self, epoch, logs=None): self.epoch_end_logs = logs self.epoch_end_call_count += 1 def on_batch_end(self, batch, logs=None): self.batch_end_logs = logs self.batch_end_call_count += 1 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) model.compile( loss='binary_crossentropy', metrics=['acc'], weighted_metrics=['mae'], optimizer=RMSPropOptimizer(learning_rate=0.01), run_eagerly=testing_utils.should_run_eagerly()) np.random.seed(1337) (x_train, y_train), (_, _) = testing_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) test_callback = TestCallback() model.fit( x_train, y_train, batch_size=2, epochs=2, verbose=0, callbacks=[test_callback], validation_data=(x_train, y_train)) self.assertEqual(test_callback.batch_end_call_count, 10) self.assertEqual(test_callback.epoch_end_call_count, 2) self.assertSetEqual( set(test_callback.batch_end_logs.keys()), set(['acc', 'loss', 'mae'])) self.assertSetEqual( set(test_callback.epoch_end_logs.keys()), set(['acc', 'loss', 'mae', 'val_acc', 'val_loss', 'val_mae'])) @keras_parameterized.run_all_keras_modes def test_mismatched_output_shape_and_target_shape(self): model = sequential.Sequential([ layers_module.Dense(2, input_shape=(3, 4)), layers_module.Dense(5), ]) model.compile( RMSPropOptimizer(learning_rate=0.001), loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly()) # Test with Numpy data x_train = np.random.random((10, 3, 4)).astype(np.float32) y_train = np.random.randint(0, 5, size=(10, 3)).astype(np.float32) model.fit(x_train, y_train, batch_size=5, epochs=1) # Test with iterator dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) dataset = dataset.repeat(10) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2) if tf.executing_eagerly(): # Test with eager execution model.compile(RMSPropOptimizer(learning_rate=0.001), loss='sparse_categorical_crossentropy', run_eagerly=True) model.fit(x_train, y_train, batch_size=5, epochs=1) # Test with eager execution and iterator model.fit(dataset, epochs=1, steps_per_epoch=2) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_losses_in_defun(self): layer = layers_module.Dense(1, kernel_regularizer='l1') layer(tf.ones([1, 10])) @tf.function def get_losses(): return layer.losses self.assertAllEqual( self.evaluate(layer.losses), self.evaluate(get_losses())) @keras_parameterized.run_all_keras_modes def test_logging(self): mock_stdout = io.StringIO() model = sequential.Sequential() model.add(layers_module.Dense(10, activation='relu')) model.add(layers_module.Dense(1, activation='sigmoid')) model.compile( RMSPropOptimizer(learning_rate=0.001), loss='binary_crossentropy', run_eagerly=testing_utils.should_run_eagerly()) with tf.compat.v1.test.mock.patch.object(sys, 'stdout', mock_stdout): model.fit( np.ones((10, 10), 'float32'), np.ones((10, 1), 'float32'), epochs=10) self.assertTrue('Epoch 5/10' in mock_stdout.getvalue()) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_training_with_loss_instance(self): a = layers_module.Input(shape=(3,), name='input_a') b = layers_module.Input(shape=(3,), name='input_b') dense = layers_module.Dense(4, name='dense') c = dense(a) d = dense(b) e = layers_module.Dropout(0.5, name='dropout')(c) model = training_module.Model([a, b], [d, e]) loss_weights = [1., 0.5] model.compile( RMSPropOptimizer(learning_rate=0.001), loss=losses.MeanSquaredError(), metrics=[metrics_module.CategoricalAccuracy(), 'mae'], loss_weights=loss_weights) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) model.fit([input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_static_batch_in_input_layer(self): if tf.executing_eagerly(): self.skipTest('Not inferred in eager.') class Counter(Callback): def __init__(self): self.batches = 0 def on_batch_end(self, batch, logs=None): self.batches += 1 x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32') for batch_size, expected_batches in [(None, 2), (4, 16)]: inputs = input_layer.Input(batch_size=batch_size, shape=(10,)) outputs = layers_module.Dense(1, activation='sigmoid')(inputs) model = training_module.Model(inputs, outputs) model.compile(optimizer_v2.adam.Adam(0.001), 'binary_crossentropy') counter = Counter() model.fit(x, y, callbacks=[counter]) self.assertEqual(counter.batches, expected_batches) model = sequential.Sequential( [layers_module.Dense(1, batch_input_shape=(batch_size, 10))]) model.compile(optimizer_v2.adam.Adam(0.001), 'binary_crossentropy') counter = Counter() model.fit(x, y, callbacks=[counter]) self.assertEqual(counter.batches, expected_batches) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_static_batch_in_input_layer_consistency_checks(self): if tf.executing_eagerly(): self.skipTest('Not inferred in eager.') x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32') inputs = input_layer.Input(batch_size=2, shape=(10,)) outputs = layers_module.Dense(1, activation='sigmoid')(inputs) model = training_module.Model(inputs, outputs) model.compile(optimizer_v2.adam.Adam(0.001), 'binary_crossentropy') with self.assertRaisesRegex(ValueError, 'incompatible with the specified batch size'): model.fit(x, y, batch_size=4) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_compatible_batch_size_functional_model(self): class MyLayer(layers_module.Layer): def call(self, inputs): return tf.concat(inputs, axis=0) input1 = input_layer.Input(batch_size=2, shape=(10,)) input2 = input_layer.Input(batch_size=3, shape=(10,)) outputs = MyLayer()([input1, input2]) with tf.compat.v1.test.mock.patch.object( logging, 'warning') as mock_warn: training_module.Model([input1, input2], outputs) self.assertEqual( mock_warn.call_args_list[0][0][0], 'Found incompatiable static batch sizes among the inputs. ' 'Batch sizes: [2, 3]') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_calling_subclass_model_on_different_datasets(self): class SubclassedModel(training_module.Model): def call(self, inputs): return inputs * 2 model = SubclassedModel() dataset_one = tf.data.Dataset.from_tensor_slices([[0], [1]]).batch(2) dataset_two = tf.data.Dataset.from_tensor_slices( [[3], [4], [5], [6], [7], [8]]).batch(2) self.assertAllEqual([[0], [2]], model.predict(dataset_one, steps=1)) self.assertAllEqual([[6], [8], [10], [12]], model.predict(dataset_two, steps=2)) @combinations.generate(combinations.combine(mode=['eager'])) def test_training_on_sparse_categorical_crossentropy_loss_with_softmax(self): np.random.seed(1337) train_x = np.ones((100, 4)) train_y = np.random.randint(0, 1, size=(100, 1)) reference_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) reference_model.compile(loss='sparse_categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=True) fixed_weights = reference_model.get_weights() reference_model_loss = reference_model.train_on_batch(train_x, train_y) test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) test_model.compile(loss='sparse_categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=False) test_model.set_weights(fixed_weights) test_model_loss = test_model.train_on_batch(train_x, train_y) self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4) @combinations.generate(combinations.combine(mode=['eager'])) def test_training_on_categorical_crossentropy_loss_with_softmax(self): np.random.seed(1337) train_x = np.ones((100, 4)) train_y = np_utils.to_categorical( np.random.randint(0, 1, size=(100, 1)), 2) reference_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) reference_model.compile(loss='categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=True) fixed_weights = reference_model.get_weights() reference_model_loss = reference_model.train_on_batch(train_x, train_y) test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4) test_model.compile(loss='categorical_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=False) test_model.set_weights(fixed_weights) test_model_loss = test_model.train_on_batch(train_x, train_y) self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4) @combinations.generate(combinations.combine(mode=['eager'])) def test_training_on_binary_crossentropy_loss(self): train_x = np.ones((100, 4), dtype=np.float32) train_y = np.ones((100, 1), dtype=np.float32) reference_model = testing_utils.get_small_sequential_mlp(16, 1, input_dim=4) reference_model.compile(loss='binary_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=True) fixed_weights = reference_model.get_weights() reference_model_loss = reference_model.train_on_batch(train_x, train_y) test_model = testing_utils.get_small_sequential_mlp(16, 1, input_dim=4) test_model.compile(loss='binary_crossentropy', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=False) test_model.set_weights(fixed_weights) test_model_loss = test_model.train_on_batch(train_x, train_y) self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( ('default', 1, 4), ('integer_two', 2, 2), ('integer_four', 4, 1), ('simple_list', [1, 3, 4], 3), ('duplicated_list', [4, 2, 2], 2)) def test_validation_freq(self, validation_freq, expected_runs): x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_small_mlp(2, 1, 10) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) class ValCounter(Callback): def __init__(self): self.val_runs = 0 def on_test_begin(self, logs=None): self.val_runs += 1 val_counter = ValCounter() model.fit( x, y, epochs=4, validation_data=(x, y), validation_freq=validation_freq, callbacks=[val_counter]) self.assertEqual(val_counter.val_runs, expected_runs) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_validation_steps_without_data(self): if tf.executing_eagerly(): self.skipTest('Check removed in new `fit`') x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_small_mlp(2, 1, 10) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) with self.assertRaisesRegex( ValueError, '`validation_steps` should not be specified if ' '`validation_data` is None.'): model.fit(x, y, epochs=4, validation_data=None, validation_steps=3) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_layer_with_variable_output(self): class VariableOutputLayer(layers_module.Layer): def build(self, input_shape): self.v = self.add_weight('output_var', shape=(2, 5), initializer='ones') def call(self, inputs): return self.v model = testing_utils.get_model_from_layers( [VariableOutputLayer(), layers_module.Dense(1)], input_shape=(10,)) # TODO(omalleyt): Make this work with `run_eagerly=True`. model.compile('sgd', 'mse', run_eagerly=False) model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=5) self.assertLen(model.trainable_variables, 3) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @testing_utils.enable_v2_dtype_behavior def test_model_dtype(self): class AssertTypeLayer(layers_module.Layer): def call(self, inputs): assert inputs.dtype.name == self.dtype, ( 'Input tensor has type %s which does not match assert type %s' % (inputs.dtype.name, self.assert_type)) return inputs + 1. for dtype in ('float16', 'float32', 'float64'): model = testing_utils.get_model_from_layers( [AssertTypeLayer(dtype=dtype)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((10, 10)) y = np.ones((10, 10)) model.fit(x, y) model.test_on_batch(x, y) model(x) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @testing_utils.enable_v2_dtype_behavior def test_model_input_dtype(self): model = testing_utils.get_small_mlp(1, 10, 10) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((10, 10)).astype(np.float64) y = np.ones((10, 10)).astype(np.float64) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2) model.fit(dataset) self.assertEqual(model._compute_dtype, 'float32') @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_subclassed_model_with_training_arg(self): class LayerWithTrainingArg(layers_module.Layer): def call(self, inputs, training=None): self.training = training return inputs class ModelWithTrainingArg(training_module.Model): def __init__(self): super(ModelWithTrainingArg, self).__init__() self.l1 = LayerWithTrainingArg() def call(self, inputs, training=None): self.training = training inputs = self.l1(inputs, training=training) return inputs x = np.zeros((1, 2)) model = ModelWithTrainingArg() model.compile( loss='mse', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, x, epochs=1) if tf.executing_eagerly(): expected_training_arg = True else: expected_training_arg = backend.symbolic_learning_phase() self.assertIs(model.training, expected_training_arg) self.assertIs(model.l1.training, expected_training_arg) @keras_parameterized.run_all_keras_modes def test_error_when_model_is_not_compiled(self): inputs = input_layer.Input(shape=(1,)) outputs = layers_module.Dense(1)(inputs) model = training_module.Model(inputs, outputs) with self.assertRaisesRegex(RuntimeError, 'must compile your model'): model.fit(np.ones((1, 1)), np.ones((1, 1))) class MyModel(training_module.Model): def call(self, x): self.add_loss(tf.reduce_sum(x)) return x model = MyModel() with self.assertRaisesRegex(RuntimeError, 'must compile your model'): model.fit(np.random.random((32, 1)), epochs=2) @keras_parameterized.run_all_keras_modes @testing_utils.enable_v2_dtype_behavior def test_losses_of_different_dtypes(self): inp = input_layer.Input(shape=(2,)) out_1 = layers_module.Dense( 2, dtype='float32', kernel_regularizer='l2')( inp) out_2 = layers_module.Dense( 2, dtype='float16', kernel_regularizer='l2')( inp) model = training_module.Model(inp, [out_1, out_2]) extra_loss = tf.reduce_sum(tf.cast(out_2, 'float64')) model.add_loss(extra_loss) model.compile('sgd', ['mse', 'mse'], run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 2)), np.ones((10, 2)) model.fit(x, [y, y]) @keras_parameterized.run_all_keras_modes @testing_utils.enable_v2_dtype_behavior def test_losses_of_different_dtypes_with_subclassed_model(self): class MyModel(training_module.Model): def build(self, _): self.dense = layers_module.Dense(2) def call(self, inputs): self.add_loss(tf.cast(tf.nn.l2_loss(inputs), 'float64')) return self.dense(inputs) model = MyModel(dtype='float32') model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 2)), np.ones((10, 2)) model.fit(x, y) @keras_parameterized.run_all_keras_modes @testing_utils.enable_v2_dtype_behavior def test_regularizer_of_different_dtype(self): inp = input_layer.Input(shape=(2,)) def regularizer(weight): return tf.cast(tf.nn.l2_loss(weight), 'float64') out = layers_module.Dense( 2, dtype='float32', kernel_regularizer=regularizer)( inp) model = training_module.Model(inp, out) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 2)), np.ones((10, 2)) model.fit(x, y) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_outputs_are_floats(self): x, y = np.ones((10, 1)), np.ones((10, 1)) model = sequential.Sequential([layers_module.Dense(1)]) model.compile('sgd', 'mse', metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(x, y, epochs=2) self.assertIsInstance(history.history['loss'][0], float) self.assertIsInstance(history.history['accuracy'][0], float) loss, accuracy = model.train_on_batch(x, y) self.assertIsInstance(loss, float) self.assertIsInstance(accuracy, float) loss, accuracy = model.evaluate(x, y) self.assertIsInstance(loss, float) self.assertIsInstance(accuracy, float) loss, accuracy = model.test_on_batch(x, y) self.assertIsInstance(loss, float) self.assertIsInstance(accuracy, float) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_int_output(self): x, y = np.ones((10, 1)), np.ones((10, 1)) model = sequential.Sequential([layers_module.Dense(1)]) class MyMetric(metrics_module.Metric): def update_state(self, y_true, y_pred, sample_weight=None): del y_true, y_pred, sample_weight def result(self): return tf.constant(1, dtype='int64') model.compile('sgd', 'mse', metrics=[MyMetric()], run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(x, y, epochs=2) self.assertIsInstance(history.history['my_metric'][0], int) @keras_parameterized.run_all_keras_modes def test_calling_aggregate_gradient(self): class _Optimizer(optimizer_v2.gradient_descent.SGD): """Mock optimizer to check if _aggregate_gradient is called.""" _HAS_AGGREGATE_GRAD = True def __init__(self): self.aggregate_gradients_called = False super(_Optimizer, self).__init__(name='MyOptimizer') def _aggregate_gradients(self, grads): self.aggregate_gradients_called = True return super(_Optimizer, self)._aggregate_gradients(grads) mock_optimizer = _Optimizer() model = sequential.Sequential() model.add(layers_module.Dense(10, activation='relu')) model.compile(mock_optimizer, 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 10)) model.fit(x, y) self.assertEqual(model.optimizer.aggregate_gradients_called, True) class _OptimizerOverrideApplyGradients(_Optimizer): """Override apply_gradients. To test the case where the optimizer does not define the experimental_aggregate_gradients parameter. """ _HAS_AGGREGATE_GRAD = False def apply_gradients(self, grads_and_vars, name=None): # pylint: disable=useless-super-delegation return super(_OptimizerOverrideApplyGradients, self).apply_gradients(grads_and_vars, name) mock_optimizer = _OptimizerOverrideApplyGradients() model.compile(mock_optimizer, 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 10)) model.fit(x, y) self.assertEqual(model.optimizer.aggregate_gradients_called, True) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_gradients_are_none(self): class DenseWithExtraWeight(layers_module.Dense): def build(self, input_shape): # Gradients w.r.t. extra_weights are None self.extra_weight_1 = self.add_weight('extra_weight_1', shape=(), initializer='ones') super(DenseWithExtraWeight, self).build(input_shape) self.extra_weight_2 = self.add_weight('extra_weight_2', shape=(), initializer='ones') model = sequential.Sequential([DenseWithExtraWeight(4, input_shape=(4,))]) # Test clipping can handle None gradients opt = optimizer_v2.adam.Adam(clipnorm=1.0, clipvalue=1.0) model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.random.normal(size=(64, 4)) targets = np.random.normal(size=(64, 4)) old_kernel = model.get_weights()[1] model.fit(inputs, targets) new_kernel = model.get_weights()[1] self.assertNotAllEqual(old_kernel, new_kernel) @keras_parameterized.run_all_keras_modes def test_layer_ordering(self): class MyLayer(layers_module.Layer): pass class MyModel(training_module.Model): def __init__(self, name): super(MyModel, self).__init__(name=name) self.weight = tf.Variable(0, name=name) self.direct_sublayer = MyLayer(name='direct') self.direct_sublayer.d = {'d': MyLayer(name='direct/dict')} self.dict_sublayer = {'d': MyLayer(name='dict')} self.dict_sublayer['d'].direct = MyLayer(name='dict/direct') model = MyModel('model') # All sublayers, including self and recursive sublayers. self.assertEqual(['model', 'direct', 'direct/dict', 'dict', 'dict/direct'], [l.name for l in model._flatten_layers()]) # Only direct sublayers, including those in data structures. self.assertEqual(['direct', 'dict'], [l.name for l in model.layers]) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_trainable_state_setting(self): class UpdateLayer(layers_module.Layer): def __init__(self): super(UpdateLayer, self).__init__() self.v = tf.Variable(0., trainable=False) def call(self, x): self.add_update(lambda: self.v.assign_add(1.)) return x * self.v layer = UpdateLayer() model_with_updates = sequential.Sequential([layer]) model_with_updates.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) layer.trainable = False model_without_updates = sequential.Sequential([layer]) model_without_updates.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 1)), np.ones((10, 1)) self.assertEqual(self.evaluate(layer.v), 0.) model_with_updates.fit(x, y, batch_size=10) # assign_add called. self.assertEqual(self.evaluate(layer.v), 1.) model_without_updates.fit(x, y, batch_size=10) # assign_add not called. self.assertEqual(self.evaluate(layer.v), 1.) @keras_parameterized.run_all_keras_modes( always_skip_v1=True) @parameterized.named_parameters( ('numpy_array', 'numpy_array'), ('dataset_array', 'dataset_array'), ('dataset_dict', 'dataset_dict')) def test_single_input_no_tuple_wrapping(self, input_type): x = np.ones((10, 1)) if input_type == 'numpy_array': batch_size = 3 expected_data_type = tf.Tensor elif input_type == 'dataset_array': x = tf.data.Dataset.from_tensor_slices(x).batch(3) batch_size = None expected_data_type = tf.Tensor else: x = {'my_input': x} x = tf.data.Dataset.from_tensor_slices(x).batch(3) batch_size = None expected_data_type = dict test_case = self class MyModel(training_module.Model): def train_step(self, data): # No tuple wrapping for single x input and no targets. test_case.assertIsInstance(data, expected_data_type) return super(MyModel, self).train_step(data) def test_step(self, data): test_case.assertIsInstance(data, expected_data_type) return super(MyModel, self).test_step(data) def predict_step(self, data): test_case.assertIsInstance(data, expected_data_type) return super(MyModel, self).predict_step(data) inputs = layers_module.Input(shape=(1,), name='my_input') outputs = layers_module.Dense(1)(inputs) model = MyModel(inputs, outputs) model.add_loss(tf.reduce_sum(outputs)) model.compile('sgd') model.fit(x, batch_size=batch_size) model.evaluate(x, batch_size=batch_size) model.predict(x, batch_size=batch_size) @keras_parameterized.run_all_keras_modes( always_skip_v1=True) @parameterized.named_parameters( ('custom_metrics', False, True), ('compiled_metrics', True, False), ('both_compiled_and_custom_metrics', True, True)) def test_evaluate_with_custom_test_step( self, use_compiled_metrics, use_custom_metrics): class MyModel(training_module.Model): def test_step(self, data): x, y = data pred = self(x) metrics = {} if use_compiled_metrics: self.compiled_metrics.update_state(y, pred) self.compiled_loss(y, pred) for metric in self.metrics: metrics[metric.name] = metric.result() if use_custom_metrics: custom_metrics = { 'mean': tf.reduce_mean(pred), 'sum': tf.reduce_sum(pred) } metrics.update(custom_metrics) return metrics inputs = layers_module.Input((2,)) outputs = layers_module.Dense(3)(inputs) model = MyModel(inputs, outputs) if use_compiled_metrics: model.compile('adam', 'mse', metrics=['mae', 'mape'], run_eagerly=testing_utils.should_run_eagerly()) else: model.compile('adam', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x = np.random.random((4, 2)) y = np.random.random((4, 3)) results_list = model.evaluate(x, y) results_dict = model.evaluate(x, y, return_dict=True) self.assertLen(results_list, len(results_dict)) if use_compiled_metrics and use_custom_metrics: self.assertLen(results_list, 5) self.assertEqual(results_list, [results_dict['loss'], results_dict['mae'], results_dict['mape'], results_dict['mean'], results_dict['sum']]) if use_compiled_metrics and not use_custom_metrics: self.assertLen(results_list, 3) self.assertEqual(results_list, [results_dict['loss'], results_dict['mae'], results_dict['mape']]) if not use_compiled_metrics and use_custom_metrics: self.assertLen(results_list, 2) self.assertEqual(results_list, [results_dict['mean'], results_dict['sum']]) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_model_make_function(self): layers = [ layers_module.Dense(10, dtype=np.float64), layers_module.Dense(10, dtype=np.float64) ] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) original_train_function = model.make_train_function() self.assertIsNotNone(original_train_function) self.assertEqual(model.make_train_function(), original_train_function) # Check that we regenerate it without reusing the cached version. self.assertNotEqual( model.make_train_function(force=True), original_train_function) original_test_function = model.make_test_function() self.assertIsNotNone(original_test_function) self.assertEqual(model.make_test_function(), original_test_function) # Check that we regenerate it without reusing the cached version. self.assertNotEqual( model.make_test_function(force=True), original_test_function) original_predict_function = model.make_predict_function() self.assertIsNotNone(original_predict_function) self.assertEqual(model.make_predict_function(), original_predict_function) # Check that we regenerate it without reusing the cached version. self.assertNotEqual( model.make_predict_function(force=True), original_predict_function) class TestExceptionsAndWarnings(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_fit_on_no_output(self): inputs = layers_module.Input((3,)) outputs = layers_module.Dense(2)(inputs) model = training_module.Model(inputs, outputs) model.compile('rmsprop', 'mse') x = np.zeros((32, 3)) with self.assertRaisesRegex(TypeError, 'Target data is missing..*'): model.fit(x) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_fit_on_wrong_output_type(self): inputs1 = layers_module.Input((3,), name='a') inputs2 = layers_module.Input((3,), name='b') x = layers_module.Concatenate()([inputs1, inputs2]) outputs = layers_module.Dense(2, name='c')(x) model = training_module.Model([inputs1, inputs2], outputs) model.compile('rmsprop', 'mse') x = np.zeros((32, 3)) y = np.zeros((32, 2)) with self.assertRaisesRegex(TypeError, 'Target data is missing..*'): model.fit({'a': x, 'b': x, 'c': y}) @keras_parameterized.run_all_keras_modes def test_compile_warning_for_loss_missing_output(self): with self.cached_session(): inp = layers_module.Input(shape=(16,), name='input_a') out_1 = layers_module.Dense(8, name='dense_1')(inp) out_2 = layers_module.Dense( 3, activation='softmax', name='dense_2')( out_1) model = training_module.Model(inputs=[inp], outputs=[out_1, out_2]) optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile( optimizer, loss={ 'dense_2': 'categorical_crossentropy', }, metrics={ 'dense_2': 'categorical_accuracy', 'dense_1': metrics_module.CategoricalAccuracy(), }, run_eagerly=testing_utils.should_run_eagerly()) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_predict_error_with_empty_x(self): inputs = layers_module.Input(shape=(2,)) outputs = layers_module.Dense(4)(inputs) model = training_module.Model(inputs=inputs, outputs=outputs) model.compile(loss='mse') with self.assertRaisesRegex(ValueError, 'Unexpected result of `predict_function`.*'): model.predict(np.array([])) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_on_batch_error_inconsistent_batch_size(self): input_node1 = layers_module.Input(shape=(5,)) input_node2 = layers_module.Input(shape=(5,)) output_node = layers_module.Concatenate()([input_node1, input_node2]) output_node = layers_module.Dense(4)(output_node) model = training_module.Model([input_node1, input_node2], output_node) model.compile(loss='mse') with self.assertRaisesRegex(ValueError, 'Data cardinality is ambiguous'): model.train_on_batch([np.ones((10, 5)), np.ones((10, 5))], np.ones((11, 4))) with self.assertRaisesRegex(ValueError, 'Data cardinality is ambiguous'): model.test_on_batch([np.ones((10, 5)), np.ones((10, 5))], np.ones((11, 4))) with self.assertRaisesRegex(ValueError, 'Data cardinality is ambiguous'): model.predict_on_batch([np.ones((10, 5)), np.ones((11, 5))]) class LossWeightingTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_class_weights(self): num_classes = 5 batch_size = 5 epochs = 10 weighted_class = 3 weight = .5 train_samples = 1000 test_samples = 1000 input_dim = 5 learning_rate = 0.001 model = testing_utils.get_small_sequential_mlp( num_hidden=10, num_classes=num_classes, input_dim=input_dim) model.compile( loss='categorical_crossentropy', metrics=['acc', metrics_module.CategoricalAccuracy()], weighted_metrics=['mae', metrics_module.CategoricalAccuracy()], optimizer=RMSPropOptimizer(learning_rate=learning_rate), run_eagerly=testing_utils.should_run_eagerly()) np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) int_y_test = y_test.copy() # convert class vectors to binary class matrices y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) test_ids = np.where(int_y_test == np.array(weighted_class))[0] class_weight = dict([(i, 1.) for i in range(num_classes)]) class_weight[weighted_class] = weight model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, class_weight=class_weight, validation_data=(x_train, y_train)) model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 2, verbose=0, class_weight=class_weight) model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs // 2, verbose=0, class_weight=class_weight, validation_split=0.1) model.train_on_batch( x_train[:batch_size], y_train[:batch_size], class_weight=class_weight) ref_score = model.evaluate(x_test, y_test, verbose=0) # pylint: disable=unused-variable score = model.evaluate( # pylint: disable=unused-variable x_test[test_ids, :], y_test[test_ids, :], verbose=0) # TODO(b/152990697): Fix the class weights test here. # self.assertLess(score[0], ref_score[0]) @keras_parameterized.run_all_keras_modes def test_temporal_sample_weights(self): num_classes = 5 batch_size = 5 epochs = 10 weighted_class = 3 weight = 10. train_samples = 1000 test_samples = 1000 input_dim = 5 timesteps = 3 learning_rate = 0.001 with self.cached_session(): model = sequential.Sequential() model.add( layers_module.TimeDistributed( layers_module.Dense(num_classes), input_shape=(timesteps, input_dim))) model.add(layers_module.Activation('softmax')) np.random.seed(1337) (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=train_samples, test_samples=test_samples, input_shape=(input_dim,), num_classes=num_classes) int_y_test = y_test.copy() int_y_train = y_train.copy() # convert class vectors to binary class matrices y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) test_ids = np.where(int_y_test == np.array(weighted_class))[0] sample_weight = np.ones((y_train.shape[0])) sample_weight[int_y_train == weighted_class] = weight temporal_x_train = np.reshape(x_train, (len(x_train), 1, x_train.shape[1])) temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1) temporal_x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1])) temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1) temporal_y_train = np.reshape(y_train, (len(y_train), 1, y_train.shape[1])) temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1) temporal_y_test = np.reshape(y_test, (len(y_test), 1, y_test.shape[1])) temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1) temporal_sample_weight = np.reshape(sample_weight, (len(sample_weight), 1)) temporal_sample_weight = np.repeat( temporal_sample_weight, timesteps, axis=1) model.compile( RMSPropOptimizer(learning_rate=learning_rate), loss='categorical_crossentropy', metrics=['acc', metrics_module.CategoricalAccuracy()], weighted_metrics=['mae', metrics_module.CategoricalAccuracy()], sample_weight_mode='temporal', run_eagerly=testing_utils.should_run_eagerly()) model.fit( temporal_x_train, temporal_y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, sample_weight=temporal_sample_weight) model.fit( temporal_x_train, temporal_y_train, batch_size=batch_size, epochs=epochs // 3, verbose=0, sample_weight=temporal_sample_weight, validation_split=0.1) model.train_on_batch( temporal_x_train[:batch_size], temporal_y_train[:batch_size], sample_weight=temporal_sample_weight[:batch_size]) model.test_on_batch( temporal_x_train[:batch_size], temporal_y_train[:batch_size], sample_weight=temporal_sample_weight[:batch_size]) ref_score = model.evaluate(temporal_x_test, temporal_y_test, verbose=0) if not tf.executing_eagerly(): score = model.evaluate( temporal_x_test[test_ids], temporal_y_test[test_ids], verbose=0) self.assertLess(score[0], ref_score[0]) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types(exclude_models='sequential') def test_fit_with_incorrect_weights(self): input_a = layers_module.Input(shape=(3,), name='input_a') input_b = layers_module.Input(shape=(3,), name='input_b') dense = layers_module.Dense(2, name='output_1') dropout = layers_module.Dropout(0.5, name='output_2') branch_a = [input_a, dense] branch_b = [input_b, dense, dropout] model = testing_utils.get_multi_io_model(branch_a, branch_b) model.compile( optimizer='adam', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) x = np.random.random((10, 3)) y = np.random.random((10, 2)) with self.assertRaises(ValueError): model.fit([x, x], [y, y], epochs=1, sample_weight={'unknown': x}) with self.assertRaises(ValueError): model.fit([x, x], [y, y], epochs=1, class_weight={'unknown': 1}) @keras_parameterized.run_all_keras_modes def test_default_sample_weight(self): """Verifies that fit works without having to set sample_weight.""" num_classes = 5 input_dim = 5 timesteps = 3 learning_rate = 0.001 with self.cached_session(): model = sequential.Sequential() model.add( layers_module.TimeDistributed( layers_module.Dense(num_classes), input_shape=(timesteps, input_dim))) x = np.random.random((10, timesteps, input_dim)) y = np.random.random((10, timesteps, num_classes)) optimizer = RMSPropOptimizer(learning_rate=learning_rate) # sample_weight_mode is a list and mode value is None model.compile( optimizer, loss='mse', sample_weight_mode=[None], run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a list and mode value is `temporal` model.compile( optimizer, loss='mse', sample_weight_mode=['temporal'], run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a dict and mode value is None model.compile( optimizer, loss='mse', sample_weight_mode={'time_distributed': None}, run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a dict and mode value is `temporal` model.compile( optimizer, loss='mse', sample_weight_mode={'time_distributed': 'temporal'}, run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a not a list/dict and mode value is None model.compile( optimizer, loss='mse', sample_weight_mode=None, run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, batch_size=10) # sample_weight_mode is a not a list/dict and mode value is `temporal` model.compile( optimizer, loss='mse', sample_weight_mode='temporal', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, epochs=1, batch_size=10) def test_sample_weight_tensor(self): """Tests that sample weight may be defined as a tensor in the graph.""" with tf.compat.v1.get_default_graph().as_default(): # Create a simple pass-through model inputs = layers_module.Input(shape=1, name='input_layer') model = training_module.Model(inputs=inputs, outputs=inputs) model.compile( loss='mean_absolute_error', optimizer='adam') # Prepare sample weights iterator tensor sample_weights = tf.constant( [[0, .4, 1, 1], [2, .4, .3, 1]]) dataset = tf.data.Dataset.from_tensor_slices(sample_weights) sample_weights = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() sample_weights = training_utils_v1.standardize_sample_weights( sample_weights, model.output_names) # Update model loss with sample weight tensor. model._compile_weights_loss_and_weighted_metrics(sample_weights) feeds = {'input_layer:0': [[0], [0], [0], [0]], 'input_layer_target:0': [[1], [1], [1], [1]]} with self.cached_session() as sess: self.assertAllClose( (.4 + 1 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds)) self.assertAllClose( (2+ .4 + .3 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds)) @keras_parameterized.run_all_keras_modes class MaskingTest(keras_parameterized.TestCase): def _get_model(self, input_shape=None): layers = [ layers_module.Masking(mask_value=0), layers_module.TimeDistributed( layers_module.Dense(1, kernel_initializer='one')) ] model = testing_utils.get_model_from_layers(layers, input_shape) model.compile( loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly()) return model @keras_parameterized.run_with_all_model_types def test_masking(self): model = self._get_model(input_shape=(2, 1)) x = np.array([[[1], [1]], [[0], [0]]]) y = np.array([[[1], [1]], [[1], [1]]]) loss = model.train_on_batch(x, y) self.assertEqual(loss, 0) @keras_parameterized.run_with_all_model_types(exclude_models='functional') def test_masking_deferred(self): model = self._get_model() x = np.array([[[1], [1]], [[0], [0]]]) y = np.array([[[1], [1]], [[1], [1]]]) loss = model.train_on_batch(x, y) self.assertEqual(loss, 0) def test_mask_argument_in_layer(self): # Test that the mask argument gets correctly passed to a layer in the # functional API. class CustomMaskedLayer(layers_module.Layer): def __init__(self): super(CustomMaskedLayer, self).__init__() self.supports_masking = True def call(self, inputs, mask=None): assert mask is not None return inputs def compute_output_shape(self, input_shape): return input_shape x = np.random.random((5, 3)) inputs = layers_module.Input((3,)) masked = layers_module.Masking(mask_value=0)(inputs) outputs = CustomMaskedLayer()(masked) model = training_module.Model(inputs, outputs) model.compile( loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly()) y = np.random.random((5, 3)) model.train_on_batch(x, y) @keras_parameterized.run_all_keras_modes class TestDynamicTrainability(keras_parameterized.TestCase): def test_trainable_warning(self): x = np.random.random((5, 3)) y = np.random.random((5, 2)) model = sequential.Sequential() model.add(layers_module.Dense(2, input_dim=3)) model.trainable = False model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.trainable = True model.train_on_batch(x, y) self.assertRaises(Warning) def test_trainable_argument(self): with self.cached_session(): x = np.random.random((5, 3)) y = np.random.random((5, 2)) model = sequential.Sequential() model.add(layers_module.Dense(2, input_dim=3, trainable=False)) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) out = model.predict(x) model.train_on_batch(x, y) out_2 = model.predict(x) self.assertAllClose(out, out_2) # test with nesting inputs = layers_module.Input(shape=(3,)) output = model(inputs) model = training_module.Model(inputs, output) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) out = model.predict(x) model.train_on_batch(x, y) out_2 = model.predict(x) self.assertAllClose(out, out_2) def test_layer_trainability_switch(self): # with constructor argument, in Sequential model = sequential.Sequential() model.add(layers_module.Dense(2, trainable=False, input_dim=1)) self.assertListEqual(model.trainable_weights, []) # by setting the `trainable` argument, in Sequential model = sequential.Sequential() layer = layers_module.Dense(2, input_dim=1) model.add(layer) self.assertListEqual(model.trainable_weights, layer.trainable_weights) layer.trainable = False self.assertListEqual(model.trainable_weights, []) # with constructor argument, in Model x = layers_module.Input(shape=(1,)) y = layers_module.Dense(2, trainable=False)(x) model = training_module.Model(x, y) self.assertListEqual(model.trainable_weights, []) # by setting the `trainable` argument, in Model x = layers_module.Input(shape=(1,)) layer = layers_module.Dense(2) y = layer(x) model = training_module.Model(x, y) self.assertListEqual(model.trainable_weights, layer.trainable_weights) layer.trainable = False self.assertListEqual(model.trainable_weights, []) def test_model_trainability_switch(self): # a non-trainable model has no trainable weights x = layers_module.Input(shape=(1,)) y = layers_module.Dense(2)(x) model = training_module.Model(x, y) model.trainable = False self.assertListEqual(model.trainable_weights, []) # same for Sequential model = sequential.Sequential() model.add(layers_module.Dense(2, input_dim=1)) model.trainable = False self.assertListEqual(model.trainable_weights, []) def test_nested_model_trainability(self): # a Sequential inside a Model inner_model = sequential.Sequential() inner_model.add(layers_module.Dense(2, input_dim=1)) x = layers_module.Input(shape=(1,)) y = inner_model(x) outer_model = training_module.Model(x, y) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) # a Sequential inside a Sequential inner_model = sequential.Sequential() inner_model.add(layers_module.Dense(2, input_dim=1)) outer_model = sequential.Sequential() outer_model.add(inner_model) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) # a Model inside a Model x = layers_module.Input(shape=(1,)) y = layers_module.Dense(2)(x) inner_model = training_module.Model(x, y) x = layers_module.Input(shape=(1,)) y = inner_model(x) outer_model = training_module.Model(x, y) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) # a Model inside a Sequential x = layers_module.Input(shape=(1,)) y = layers_module.Dense(2)(x) inner_model = training_module.Model(x, y) outer_model = sequential.Sequential() outer_model.add(inner_model) self.assertListEqual(outer_model.trainable_weights, inner_model.trainable_weights) inner_model.trainable = False self.assertListEqual(outer_model.trainable_weights, []) inner_model.trainable = True inner_model.layers[-1].trainable = False self.assertListEqual(outer_model.trainable_weights, []) def test_gan_workflow(self): shared_layer = layers_module.BatchNormalization() inputs1 = input_layer.Input(10) outputs1 = shared_layer(inputs1) model1 = training_module.Model(inputs1, outputs1) shared_layer.trainable = False model1.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) inputs2 = input_layer.Input(10) outputs2 = shared_layer(inputs2) model2 = training_module.Model(inputs2, outputs2) shared_layer.trainable = True model2.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 10)) out1_0 = model1.predict_on_batch(x) model1.train_on_batch(x, y) out1_1 = model1.predict_on_batch(x) self.assertAllClose(out1_0, out1_1) out2_0 = model2.predict_on_batch(x) model2.train_on_batch(x, y) out2_1 = model2.predict_on_batch(x) self.assertNotAllClose(out2_0, out2_1) def test_toggle_value(self): input_0 = layers_module.Input(shape=(1,)) dense_0 = layers_module.Dense( 1, kernel_initializer='ones', bias_initializer='ones') dense_1 = layers_module.Dense( 1, kernel_initializer='ones', bias_initializer='ones') result = layers_module.Add()([dense_0(input_0), dense_1(input_0)]) model = training_module.Model(input_0, result) dense_0.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((10, 1)) y = 5 * x + 2 model.train_on_batch(x, y) dense_0.trainable = True model.train_on_batch(x, y) kernel, bias = dense_0.get_weights() self.assertAllEqual([kernel[0, 0], bias[0]], [1., 1.]) kernel, bias = dense_1.get_weights() self.assertAllClose([kernel[0, 0], bias[0]], [1.1176, 1.1176]) class TestTrainingWithDataTensors(keras_parameterized.TestCase): def test_training_and_eval_methods_on_symbolic_tensors_single_io(self): with tf.Graph().as_default(): x = layers_module.Input(shape=(3,), name='input') y = layers_module.Dense(4, name='dense')(x) model = training_module.Model(x, y) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile( optimizer, loss, metrics=['mae', metrics_module.CategoricalAccuracy()]) inputs = backend.zeros(shape=(10, 3)) targets = backend.zeros(shape=(10, 4)) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0) model.evaluate(inputs, targets, steps=2, verbose=0) model.predict(inputs, steps=2) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2) # Test with dynamic shape inputs = tf.compat.v1.placeholder_with_default( np.zeros((2, 3)), shape=tf.TensorShape([None, 3])) targets = tf.compat.v1.placeholder_with_default( np.zeros((2, 4)), shape=tf.TensorShape([None, 4])) self.assertEqual(inputs.shape.dims[0].value, None) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0) model.evaluate(inputs, targets, steps=2, verbose=0) model.predict(inputs, steps=2) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0, validation_data=(inputs, targets), validation_steps=2) def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self): a = layers_module.Input(shape=(3,), name='input_a') b = layers_module.Input(shape=(3,), name='input_b') dense = layers_module.Dense(4, name='dense') c = dense(a) d = dense(b) e = layers_module.Dropout(0.5, name='dropout')(c) model = training_module.Model([a, b], [d, e]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile( optimizer, loss, metrics=['mae', metrics_module.CategoricalAccuracy()], loss_weights=loss_weights) input_a_tf = tf.zeros(shape=(10, 3)) input_b_tf = tf.zeros(shape=(10, 3)) output_d_tf = tf.zeros(shape=(10, 4)) output_e_tf = tf.zeros(shape=(10, 4)) model.fit([input_a_tf, input_b_tf], [output_d_tf, output_e_tf], epochs=1, steps_per_epoch=2, verbose=0) model.train_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf]) # Test with dictionary inputs model.fit({ 'input_a': input_a_tf, 'input_b': input_b_tf }, { 'dense': output_d_tf, 'dropout': output_e_tf }, epochs=1, steps_per_epoch=2, verbose=0) model.fit({ 'input_a': input_a_tf, 'input_b': input_b_tf }, { 'dense': output_d_tf, 'dropout': output_e_tf }, validation_data=({ 'input_a': input_a_tf, 'input_b': input_b_tf }, { 'dense': output_d_tf, 'dropout': output_e_tf }), epochs=1, steps_per_epoch=2, validation_steps=2, verbose=0) model.train_on_batch({ 'input_a': input_a_tf, 'input_b': input_b_tf }, { 'dense': output_d_tf, 'dropout': output_e_tf }) # Test with validation data model.fit([input_a_tf, input_b_tf], [output_d_tf, output_e_tf], validation_data=([input_a_tf, input_b_tf], [output_d_tf, output_e_tf]), epochs=1, steps_per_epoch=2, validation_steps=2, verbose=0) # Test evaluation / prediction methods model.evaluate([input_a_tf, input_b_tf], [output_d_tf, output_e_tf], steps=2, verbose=0) model.predict([input_a_tf, input_b_tf], steps=2) model.test_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf]) @tf_test_util.run_deprecated_v1 def test_model_with_input_feed_tensor(self): """We test building a model with a TF variable as input. We should be able to call fit, evaluate, predict, by only passing them data for the placeholder inputs in the model. """ with tf.Graph().as_default(), self.cached_session(): input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) input_v = tf.Variable(input_a_np, dtype='float32') self.evaluate(tf.compat.v1.variables_initializer([input_v])) a = input_layer.Input(tensor=input_v) b = input_layer.Input(shape=(3,), name='input_b') a_2 = layers_module.Dense(4, name='dense_1')(a) dp = layers_module.Dropout(0.5, name='dropout') b_2 = dp(b) model = training_module.Model([a, b], [a_2, b_2]) model.summary() optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile(optimizer, loss, metrics=['mean_squared_error'], loss_weights=loss_weights, sample_weight_mode=None) # test train_on_batch out = model.train_on_batch(input_b_np, [output_a_np, output_b_np]) out = model.train_on_batch({'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.test_on_batch({'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.predict_on_batch({'input_b': input_b_np}) # test fit out = model.fit({'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=10) out = model.fit(input_b_np, [output_a_np, output_b_np], epochs=1, batch_size=10) # test evaluate out = model.evaluate({'input_b': input_b_np}, [output_a_np, output_b_np], batch_size=10) out = model.evaluate(input_b_np, [output_a_np, output_b_np], batch_size=10) # test predict out = model.predict({'input_b': input_b_np}, batch_size=10) out = model.predict(input_b_np, batch_size=10) self.assertEqual(len(out), 2) # Now test a model with a single input # i.e. we don't pass any data to fit the model. self.evaluate(tf.compat.v1.variables_initializer([input_v])) a = input_layer.Input(tensor=input_v) a_2 = layers_module.Dense(4, name='dense_1')(a) a_2 = layers_module.Dropout(0.5, name='dropout')(a_2) model = training_module.Model(a, a_2) model.summary() optimizer = 'rmsprop' loss = 'mse' model.compile(optimizer, loss, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, output_a_np) out = model.train_on_batch(None, output_a_np) out = model.test_on_batch(None, output_a_np) out = model.predict_on_batch(None) out = model.train_on_batch([], output_a_np) out = model.train_on_batch({}, output_a_np) # test fit _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3) _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3) # test evaluate _ = model.evaluate(None, output_a_np, steps=3) _ = model.evaluate(None, output_a_np, steps=3) # test predict out = model.predict(None, steps=3) out = model.predict(None, steps=3) self.assertEqual(out.shape, (10 * 3, 4)) # Same, without learning phase # i.e. we don't pass any data to fit the model. self.evaluate(tf.compat.v1.variables_initializer([input_v])) a = input_layer.Input(tensor=input_v) a_2 = layers_module.Dense(4, name='dense_1')(a) model = training_module.Model(a, a_2) model.summary() optimizer = 'rmsprop' loss = 'mse' model.compile(optimizer, loss, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, output_a_np) out = model.train_on_batch(None, output_a_np) out = model.test_on_batch(None, output_a_np) out = model.predict_on_batch(None) out = model.train_on_batch([], output_a_np) out = model.train_on_batch({}, output_a_np) # test fit _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10) _ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10) # test evaluate _ = model.evaluate(None, output_a_np, steps=10) _ = model.evaluate(None, output_a_np, steps=10) # test predict out = model.predict(None, steps=3) out = model.predict(None, steps=3) self.assertEqual(out.shape, (10 * 3, 4)) @keras_parameterized.run_all_keras_modes def test_model_with_partial_loss(self): with self.cached_session(): a = input_layer.Input(shape=(3,), name='input_a') a_2 = layers_module.Dense(4, name='dense_1')(a) dp = layers_module.Dropout(0.5, name='dropout') a_3 = dp(a_2) model = training_module.Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = {'dropout': 'mse'} model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) # test train_on_batch _ = model.train_on_batch(input_a_np, output_a_np) _ = model.test_on_batch(input_a_np, output_a_np) # fit _ = model.fit(input_a_np, output_a_np) # evaluate _ = model.evaluate(input_a_np, output_a_np) # Same without dropout. a = input_layer.Input(shape=(3,), name='input_a') a_2 = layers_module.Dense(4, name='dense_1')(a) a_3 = layers_module.Dense(4, name='dense_2')(a_2) model = training_module.Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = {'dense_2': 'mse'} model.compile(optimizer, loss, metrics={'dense_1': 'mae'}) # test train_on_batch _ = model.train_on_batch(input_a_np, output_a_np) _ = model.test_on_batch(input_a_np, output_a_np) # fit _ = model.fit(input_a_np, output_a_np) # evaluate _ = model.evaluate(input_a_np, output_a_np) def test_model_with_external_loss(self): with tf.Graph().as_default(), self.cached_session(): # None loss, only regularization loss. a = input_layer.Input(shape=(3,), name='input_a') a_2 = layers_module.Dense( 4, name='dense_1', kernel_regularizer='l1', bias_regularizer='l2')( a) dp = layers_module.Dropout(0.5, name='dropout') a_3 = dp(a_2) model = training_module.Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # No dropout, external loss. a = input_layer.Input(shape=(3,), name='input_a') a_2 = layers_module.Dense(4, name='dense_1')(a) a_3 = layers_module.Dense(4, name='dense_2')(a) model = training_module.Model(a, [a_2, a_3]) model.add_loss(backend.mean(a_3 + a_2)) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # Test model with no external data at all. input_v = tf.Variable(input_a_np, dtype='float32') self.evaluate(tf.compat.v1.variables_initializer([input_v])) a = input_layer.Input(tensor=input_v) a_2 = layers_module.Dense(4, name='dense_1')(a) a_2 = layers_module.Dropout(0.5, name='dropout')(a_2) model = training_module.Model(a, a_2) model.add_loss(backend.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # Test multi-output model with no external data at all. self.evaluate(tf.compat.v1.variables_initializer([input_v])) a = input_layer.Input(tensor=input_v) a_1 = layers_module.Dense(4, name='dense_1')(a) a_2 = layers_module.Dropout(0.5, name='dropout')(a_1) model = training_module.Model(a, [a_1, a_2]) model.add_loss(backend.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) out = model.predict(None, steps=3) self.assertEqual(len(out), 2) self.assertEqual(out[0].shape, (10 * 3, 4)) self.assertEqual(out[1].shape, (10 * 3, 4)) def test_target_tensors(self): with tf.Graph().as_default(), self.cached_session(): # single-output, as list model = sequential.Sequential() model.add(layers_module.Dense(4, input_shape=(4,), name='dense')) input_val = np.random.random((10, 4)) target_val = np.random.random((10, 4)) target = backend.variable(target_val) model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target]) model.train_on_batch(input_val, None) # single-output, as single tensor model.compile(optimizer='rmsprop', loss='mse', target_tensors=target) model.train_on_batch(input_val, None) # single-output, as dict model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense': target}) model.train_on_batch(input_val, None) # test invalid arguments with self.assertRaises(TypeError): model.compile(optimizer='rmsprop', loss='mse', target_tensors=set()) with self.assertRaises(ValueError): model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target, target]) with self.assertRaises(ValueError): model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense2': None}) with self.assertRaises(ValueError): model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target]) model.train_on_batch(input_val, target_val) # multi-output, as list input_val = np.random.random((10, 4)) target_val_a = np.random.random((10, 4)) target_val_b = np.random.random((10, 4)) target_a = backend.variable(target_val_a) target_b = backend.variable(target_val_b) inputs = layers_module.Input(shape=(4,)) output_a = layers_module.Dense(4, name='dense_a')(inputs) output_b = layers_module.Dense(4, name='dense_b')(inputs) model = training_module.Model(inputs, [output_a, output_b]) model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target_a, target_b]) model.train_on_batch(input_val, None) # multi-output, as dict model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense_a': target_a, 'dense_b': target_b}) model.train_on_batch(input_val, None) # test with sample weights model.compile( optimizer='rmsprop', loss='mse', metrics=['mae', metrics_module.CategoricalAccuracy()], target_tensors=[target_a, target_b]) model.train_on_batch(input_val, None, sample_weight={'dense_a': np.random.random((10,))}) def test_model_custom_target_tensors(self): with tf.Graph().as_default(), self.cached_session(): a = input_layer.Input(shape=(3,), name='input_a') b = input_layer.Input(shape=(3,), name='input_b') a_2 = layers_module.Dense(4, name='dense_1')(a) dp = layers_module.Dropout(0.5, name='dropout') b_2 = dp(b) y = backend.placeholder([10, 4], name='y') y1 = backend.placeholder([10, 3], name='y1') y2 = backend.placeholder([7, 5], name='y2') model = training_module.Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] # test list of target tensors with self.assertRaises(ValueError): model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors=[y, y1, y2]) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors=[y, y1]) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) _ = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], { 'dense_1': np.random.random((10,)), 'dropout': np.random.random((10,)) }) # test dictionary of target_tensors with self.assertRaises(ValueError): model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors={'does_not_exist': y2}) # test dictionary of target_tensors model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors={'dense_1': y, 'dropout': y1}) _ = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], { 'dense_1': np.random.random((10,)), 'dropout': np.random.random((10,)) }) # test with custom TF placeholder as target pl_target_a = tf.compat.v1.placeholder('float32', shape=(None, 4)) model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense_1': pl_target_a}) model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) class TestTrainingWithMetrics(keras_parameterized.TestCase): """Training tests related to metrics.""" @keras_parameterized.run_all_keras_modes def test_metrics_names(self): a = layers_module.Input(shape=(3,), name='input_a') b = layers_module.Input(shape=(3,), name='input_b') dense = layers_module.Dense(4, name='dense') c = dense(a) d = dense(b) e = layers_module.Dropout(0.5, name='dropout')(c) model = training_module.Model([a, b], [d, e]) optimizer = RMSPropOptimizer(learning_rate=0.001) metrics = ['mse', metrics_module.BinaryAccuracy()] model.compile( optimizer, loss='mae', metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) mse_metric = 'mse' if tf.executing_eagerly() else 'mean_squared_error' reference_metric_names = [ 'loss', 'dense_loss', 'dropout_loss', 'dense_' + mse_metric, 'dense_binary_accuracy', 'dropout_' + mse_metric, 'dropout_binary_accuracy' ] input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) model.fit([input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5) self.assertEqual(reference_metric_names, model.metrics_names) @keras_parameterized.run_all_keras_modes def test_metric_state_reset_between_fit_and_evaluate(self): model = sequential.Sequential() model.add(layers_module.Dense(3, activation='relu', input_dim=4)) model.add(layers_module.Dense(1, activation='sigmoid')) acc_obj = metrics_module.BinaryAccuracy() model.compile( loss='mae', metrics=[acc_obj], optimizer=RMSPropOptimizer(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly()) x_train = np.random.random((100, 4)) y_train = np.random.random((100, 1)) model.fit(x_train, y_train, batch_size=5, epochs=2) self.assertEqual(self.evaluate(acc_obj.count), 100) x_test = np.random.random((10, 4)) y_test = np.random.random((10, 1)) model.evaluate(x_test, y_test, batch_size=5) self.assertEqual(self.evaluate(acc_obj.count), 10) @keras_parameterized.run_with_all_model_types(exclude_models=['sequential']) @keras_parameterized.run_all_keras_modes def test_metrics_valid_compile_input_formats(self): inp_1 = layers_module.Input(shape=(1,), name='input_1') inp_2 = layers_module.Input(shape=(1,), name='input_2') x = layers_module.Dense(3, kernel_initializer='ones', trainable=False) out_1 = layers_module.Dense( 1, kernel_initializer='ones', name='output_1', trainable=False) out_2 = layers_module.Dense( 1, kernel_initializer='ones', name='output_2', trainable=False) branch_a = [inp_1, x, out_1] branch_b = [inp_2, x, out_2] model = testing_utils.get_multi_io_model(branch_a, branch_b) # list of metrics. model.compile( optimizer='rmsprop', loss='mse', metrics=[metrics_module.MeanSquaredError()], weighted_metrics=[metrics_module.MeanSquaredError()], run_eagerly=testing_utils.should_run_eagerly()) # list of list of metrics. model.compile( optimizer='rmsprop', loss='mse', metrics=[ metrics_module.MeanSquaredError(), [metrics_module.MeanSquaredError(), metrics_module.Accuracy()] ], weighted_metrics=[ metrics_module.MeanSquaredError(), [metrics_module.MeanSquaredError(), metrics_module.Accuracy()] ], run_eagerly=testing_utils.should_run_eagerly()) # dict of metrics. model.compile( optimizer='rmsprop', loss='mse', metrics={ 'output_1': metrics_module.MeanSquaredError(), 'output_2': [ metrics_module.MeanSquaredError(), metrics_module.Accuracy() ], }, weighted_metrics={ 'output_1': metrics_module.MeanSquaredError(), 'output_2': [ metrics_module.MeanSquaredError(), metrics_module.Accuracy() ], }, run_eagerly=testing_utils.should_run_eagerly()) @keras_parameterized.run_all_keras_modes def test_metrics_masking(self): np.random.seed(1337) model = sequential.Sequential() model.add(layers_module.Masking(mask_value=0, input_shape=(2, 1))) model.add( layers_module.TimeDistributed( layers_module.Dense(1, kernel_initializer='ones'))) model.compile( RMSPropOptimizer(learning_rate=0.001), loss='mse', weighted_metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly()) # verify that masking is applied. x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]]) y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]]) scores = model.train_on_batch(x, y) self.assertArrayNear(scores, [0.25, 0.75], 0.1) # verify that masking is combined with sample weights. w = np.array([3, 2, 4]) scores = model.train_on_batch(x, y, sample_weight=w) self.assertArrayNear(scores, [0.3328, 0.8], 0.001) @keras_parameterized.run_all_keras_modes def test_add_metric_with_tensor_on_model(self): x = layers_module.Input(shape=(1,)) y = layers_module.Dense(1, kernel_initializer='ones')(x) model = training_module.Model(x, y) model.add_metric( tf.reduce_sum(y), name='metric_1', aggregation='mean') if tf.executing_eagerly(): # This is not a use case in v1 graph mode. mean_result = metrics_module.Mean()(y) with self.assertRaisesRegex( ValueError, 'Expected a symbolic Tensor for the metric value'): model.add_metric(mean_result, name='metric_2') else: with self.assertRaisesRegex( ValueError, 'Using the result of calling a `Metric` object '): with backend.get_graph().as_default(): model.add_metric(metrics_module.Mean(name='metric_2')(y)) model.compile( 'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.ones(shape=(10, 1)) targets = np.ones(shape=(10, 1)) history = model.fit( inputs, targets, epochs=2, batch_size=5, validation_data=(inputs, targets)) self.assertEqual(history.history['metric_1'][-1], 5) self.assertEqual(history.history['val_metric_1'][-1], 5) eval_results = model.evaluate(inputs, targets, batch_size=5) self.assertEqual(eval_results[-1], 5) model.predict(inputs, batch_size=5) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) @keras_parameterized.run_all_keras_modes def test_add_metric_in_model_call(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') self.mean = metrics_module.Mean(name='metric_1') def call(self, x): self.add_metric( tf.reduce_sum(x), name='metric_2', aggregation='mean') # Provide same name as in the instance created in __init__ # for eager mode self.add_metric(self.mean(x), name='metric_1') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0) self.assertAlmostEqual(history.history['val_metric_1'][-1], 1, 0) self.assertAlmostEqual(history.history['metric_2'][-1], 5, 0) self.assertAlmostEqual(history.history['val_metric_2'][-1], 5, 0) eval_results = model.evaluate(x, y, batch_size=5) self.assertAlmostEqual(eval_results[1], 1, 0) self.assertAlmostEqual(eval_results[2], 5, 0) model.predict(x, batch_size=5) model.train_on_batch(x, y) model.test_on_batch(x, y) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes def test_add_metric_in_layer_call(self): class TestLayer(layers_module.Layer): def build(self, input_shape): self.a = self.add_variable( 'a', (1, 1), initializer='ones', trainable=False) self.built = True def call(self, inputs): self.add_metric( tf.reduce_sum(inputs), name='metric_1', aggregation='mean') return inputs + 1 layers = [ TestLayer(input_shape=(1,)), layers_module.Dense(2, kernel_initializer='ones') ] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertEqual(history.history['metric_1'][-1], 5) self.assertAlmostEqual(history.history['val_metric_1'][-1], 5, 0) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_model_metrics_list(self): class LayerWithAddMetric(layers_module.Layer): def __init__(self): super(LayerWithAddMetric, self).__init__() self.dense = layers_module.Dense(1, kernel_initializer='ones') def __call__(self, inputs): outputs = self.dense(inputs) self.add_metric( tf.reduce_sum(outputs), name='metric_1', aggregation='mean') return outputs class LayerWithNestedAddMetricLayer(layers_module.Layer): def __init__(self): super(LayerWithNestedAddMetricLayer, self).__init__() self.layer = LayerWithAddMetric() def call(self, inputs): outputs = self.layer(inputs) self.add_metric( tf.reduce_sum(outputs), name='metric_2', aggregation='mean') return outputs x = layers_module.Input(shape=(1,)) y = LayerWithNestedAddMetricLayer()(x) model = training_module.Model(x, y) model.add_metric( tf.reduce_sum(y), name='metric_3', aggregation='mean') if tf.executing_eagerly(): # This is not a use case in v1 graph mode. mean_result = metrics_module.Mean()(y) with self.assertRaisesRegex( ValueError, 'Expected a symbolic Tensor for the metric value'): model.add_metric(mean_result, name='metric_4') else: with self.assertRaisesRegex( ValueError, 'Using the result of calling a `Metric` object '): with backend.get_graph().as_default(): model.add_metric(metrics_module.Mean(name='metric_4')(y)) model.compile( 'sgd', loss='mse', metrics=[metrics_module.Accuracy('metric_4')], run_eagerly=testing_utils.should_run_eagerly()) model.fit(np.ones((10, 1)), np.ones((10, 1)), batch_size=10) # Verify that the metrics added using `compile` and `add_metric` API are # included self.assertEqual([m.name for m in model.metrics], ['loss', 'metric_4', 'metric_2', 'metric_1', 'metric_3']) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_model_metrics_list_in_call(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') def call(self, x): self.add_metric( tf.reduce_sum(x), name='metric_1', aggregation='mean') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), metrics=[metrics_module.Accuracy('acc')], run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertEqual([m.name for m in model.metrics], ['loss', 'acc', 'metric_1']) @keras_parameterized.run_all_keras_modes def test_multiple_add_metric_calls(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') self.mean1 = metrics_module.Mean(name='metric_1') self.mean2 = metrics_module.Mean(name='metric_2') def call(self, x): self.add_metric(self.mean2(x), name='metric_2') self.add_metric(self.mean1(x), name='metric_1') self.add_metric( tf.reduce_sum(x), name='metric_3', aggregation='mean') return self.dense1(x) model = TestModel() self.assertListEqual([m.name for m in model.metrics], ['metric_1', 'metric_2']) model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0) self.assertAlmostEqual(history.history['metric_2'][-1], 1, 0) self.assertAlmostEqual(history.history['metric_3'][-1], 5, 0) eval_results = model.evaluate(x, y, batch_size=5) self.assertArrayNear(eval_results[1:4], [1, 1, 5], 0.1) model.predict(x, batch_size=5) model.train_on_batch(x, y) model.test_on_batch(x, y) @keras_parameterized.run_all_keras_modes def test_multiple_add_metric_calls_layer(self): class TestLayer(layers_module.Layer): def __init__(self): super(TestLayer, self).__init__(name='test_layer') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') self.m1 = metrics_module.Mean(name='m_1') self.m2 = [ metrics_module.Mean(name='m_2'), metrics_module.Mean(name='m_3') ] self.m3 = { 'mean4': metrics_module.Mean(name='m_4'), 'mean5': metrics_module.Mean(name='m_5') } def call(self, x): self.add_metric(self.m2[0](x)) self.add_metric(self.m2[1](x)) self.add_metric(self.m1(x)) self.add_metric(self.m3['mean4'](x)) self.add_metric(self.m3['mean5'](x)) self.add_metric(tf.reduce_sum(x), name='m_6', aggregation='mean') return self.dense1(x) layer = TestLayer() self.assertListEqual([m.name for m in layer.metrics], ['m_1', 'm_2', 'm_3', 'm_4', 'm_5']) layer(np.ones((10, 10))) self.assertListEqual([m.name for m in layer.metrics], ['m_1', 'm_2', 'm_3', 'm_4', 'm_5', 'm_6']) @keras_parameterized.run_all_keras_modes def test_duplicate_metric_name_in_add_metric(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') self.mean = metrics_module.Mean(name='metric_1') self.mean2 = metrics_module.Mean(name='metric_1') def call(self, x): self.add_metric(self.mean(x), name='metric_1') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) with self.assertRaisesRegex( ValueError, 'Please provide different names for the metrics you have added. ' 'We found 2 metrics with the name: "metric_1"'): model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) @keras_parameterized.run_all_keras_modes def test_add_metric_without_name(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') def call(self, x): self.add_metric(tf.reduce_sum(x), aggregation='mean') return self.dense1(x) model = TestModel() model.compile( loss='mse', optimizer=RMSPropOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) x = np.ones(shape=(10, 1)) y = np.ones(shape=(10, 2)) with self.assertRaisesRegex(ValueError, 'Please provide a name for your metric like'): model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y)) @keras_parameterized.run_all_keras_modes def test_add_metric_correctness(self): inputs = input_layer.Input(shape=(1,)) targets = input_layer.Input(shape=(1,)) class Bias(layers_module.Layer): def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') self.mae = metrics_module.MeanAbsoluteError(name='mae_1') def call(self, inputs): inputs, targets = inputs outputs = inputs + self.bias self.add_metric(self.mae(targets, outputs), name='mae_1') return outputs outputs = Bias()([inputs, targets]) model = training_module.Model([inputs, targets], outputs) model.add_metric( metrics_module.mean_absolute_error(targets, outputs), name='mae_2', aggregation='mean') model.compile( loss='mae', optimizer=optimizer_v2.gradient_descent.SGD(0.1), metrics=[metrics_module.MeanAbsoluteError(name='mae_3')], run_eagerly=testing_utils.should_run_eagerly()) x = np.array([[0.], [1.], [2.]]) y = np.array([[0.5], [2.], [3.5]]) history = model.fit([x, y], y, batch_size=3, epochs=5) expected_val = [1., 0.9, 0.8, 0.7, 0.6] for key in ['loss', 'mae_1', 'mae_2', 'mae_3']: self.assertAllClose(history.history[key], expected_val, 1e-3) @keras_parameterized.run_all_keras_modes def test_add_metric_order(self): class MyLayer(layers_module.Layer): def call(self, inputs, training=None, mask=None): self.add_metric( tf.ones([32]) * 2.0, name='two', aggregation='mean') return inputs class MyModel(training_module.Model): def __init__(self, **kwargs): super(MyModel, self).__init__(**kwargs) self._sampler = MyLayer(name='sampler') def call(self, inputs, training=None, mask=None): z = self._sampler(inputs) self.add_metric( tf.ones([32]) * 1.0, name='one', aggregation='mean') self.add_metric( tf.ones([32]) * 3.0, name='three', aggregation='mean') return z xdata = np.random.uniform(size=[32, 16]).astype(np.float32) dataset_train = tf.data.Dataset.from_tensor_slices((xdata, xdata)) dataset_train = dataset_train.batch(32, drop_remainder=True) model = MyModel() model.compile( optimizer='sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(dataset_train, epochs=3) self.assertDictEqual( history.history, { 'loss': [0.0, 0.0, 0.0], 'three': [3.0, 3.0, 3.0], 'two': [2.0, 2.0, 2.0], 'one': [1.0, 1.0, 1.0] }) @keras_parameterized.run_all_keras_modes def test_add_metric_aggregation_mean(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') def call(self, x): self.add_metric( tf.reduce_sum(x), name='metric_1', aggregation='mean') return self.dense1(x) model = TestModel() model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(np.ones(shape=(10, 1)), np.ones(shape=(10, 2)), batch_size=5) @keras_parameterized.run_all_keras_modes def test_add_metric_aggregation_none(self): class TestModel(training_module.Model): def __init__(self): super(TestModel, self).__init__(name='test_model') self.dense1 = layers_module.Dense(2, kernel_initializer='ones') self.mean = metrics_module.Mean(name='metric_1') def call(self, x): self.add_metric(self.mean(x), name='metric_1', aggregation=None) return self.dense1(x) model = TestModel() model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(np.ones(shape=(10, 1)), np.ones(shape=(10, 2)), batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def DISABLED_test_add_metric_invalid_aggregation(self): # TODO(psv): Reenable test once it is fixed. x = layers_module.Input(shape=(1,)) y = layers_module.Dense(1, kernel_initializer='ones')(x) model = training_module.Model(x, y) with self.assertRaisesRegex(ValueError, 'only `mean` sample-wise metric aggregation'): model.add_metric( tf.reduce_sum(y), name='metric_1', aggregation='sum') with self.assertRaisesRegex(ValueError, 'only `mean` sample-wise metric aggregation'): model.add_metric( tf.reduce_sum(y), name='metric_1', aggregation=None) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_calling_evaluate_in_callback_during_fit(self): # Check fix for a bug that caused `evaluate` to hit a cached dataset # when run from inside a fit callback. x = layers_module.Input(shape=(2,)) y = layers_module.Dense(2, kernel_initializer='ones', use_bias=False)(x) model = training_module.Model(x, y) ones = np.ones((10, 2), dtype=np.float32) zeros = np.zeros((10, 2), dtype=np.float32) train_ds = tf.data.Dataset.from_tensor_slices( (ones, ones)).batch(5) val_ds_1 = tf.data.Dataset.from_tensor_slices( (ones, ones)).batch(5) val_ds_2 = tf.data.Dataset.from_tensor_slices( (zeros, zeros)).batch(5) model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) class MyCallback(Callback): def on_epoch_end(self, *args, **kwargs): eval_result = self.model.evaluate(val_ds_2) if abs(eval_result) > 1e-7: raise AssertionError( 'Expected to hit the zeros dataset but got high loss value of %s' % eval_result) history = model.fit( train_ds, validation_data=val_ds_1, callbacks=[MyCallback()]) # Evaluate at the end of fit should hit the ones dataset (cached) self.assertGreater(abs(history.history['val_loss'][-1]), 0.1) # Standalone call to evaluate should not hit the cached dataset eval_result = model.evaluate(val_ds_2) self.assertLess(abs(eval_result), 1e-7) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_model_with_nested_compiled_model(self): class LayerWithAddMetric(layers_module.Layer): def __init__(self): super(LayerWithAddMetric, self).__init__() self.dense = layers_module.Dense(1, kernel_initializer='ones') def call(self, inputs): outputs = self.dense(inputs) self.add_metric( tf.reduce_sum(outputs), name='mean', aggregation='mean') return outputs x = layers_module.Input(shape=(1,)) y = LayerWithAddMetric()(x) inner_model = training_module.Model(x, y) inner_model.add_metric( tf.reduce_sum(y), name='mean1', aggregation='mean') inner_model.compile( 'sgd', loss='mse', metrics=[metrics_module.Accuracy('acc')], run_eagerly=testing_utils.should_run_eagerly()) inner_model.fit(np.ones((10, 1)), np.ones((10, 1)), batch_size=10) self.assertEqual([m.name for m in inner_model.metrics], ['loss', 'acc', 'mean', 'mean1']) x = layers_module.Input(shape=[1]) y = inner_model(x) outer_model = training_module.Model(x, y) outer_model.add_metric( tf.reduce_sum(y), name='mean2', aggregation='mean') outer_model.compile( 'sgd', loss='mse', metrics=[metrics_module.Accuracy('acc2')], run_eagerly=testing_utils.should_run_eagerly()) outer_model.fit(np.ones((10, 1)), np.ones((10, 1)), batch_size=10) self.assertEqual([m.name for m in outer_model.metrics], ['loss', 'acc2', 'mean', 'mean1', 'mean2']) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_model_with_metric_class_that_returns_dict(self): x = layers_module.Input(shape=(2,)) y = layers_module.Dense(3)(x) model = training_module.Model(x, y) class DictMetric(metrics_module.Metric): def __init__(self): super(DictMetric, self).__init__() self.sample_count = tf.Variable(0) self.l2_sum = tf.Variable(0.) def update_state(self, y_true, y_pred, sample_weight=None): self.l2_sum.assign_add( tf.reduce_sum(tf.square(y_true - y_pred))) self.sample_count.assign_add(tf.shape(y_true)[0]) def reset_state(self): self.sample_count.assign(0) self.l2_sum.assign(0.) def result(self): mse = self.l2_sum / tf.cast(self.sample_count, 'float32') rmse = tf.sqrt(mse) return {'my_mse': mse, 'my_rmse': rmse} model.compile('sgd', 'mse', metrics=['mae', DictMetric()], run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(np.ones((10, 2)), np.ones((10, 3))) self.assertEqual(list(history.history.keys()), ['loss', 'mae', 'my_mse', 'my_rmse']) list_evaluate_res = model.evaluate( np.ones((10, 2)), np.ones((10, 3))) self.assertEqual(len(list_evaluate_res), 4) dict_evaluate_res = model.evaluate( np.ones((10, 2)), np.ones((10, 3)), return_dict=True) self.assertEqual(list(dict_evaluate_res.keys()), ['loss', 'mae', 'my_mse', 'my_rmse']) list_train_on_batch_res = model.train_on_batch( np.ones((10, 2)), np.ones((10, 3))) self.assertEqual(len(list_train_on_batch_res), 4) dict_train_on_batch_res = model.train_on_batch( np.ones((10, 2)), np.ones((10, 3)), return_dict=True) self.assertEqual(list(dict_train_on_batch_res.keys()), ['loss', 'mae', 'my_mse', 'my_rmse']) list_test_on_batch_res = model.test_on_batch( np.ones((10, 2)), np.ones((10, 3))) self.assertEqual(len(list_test_on_batch_res), 4) dict_test_on_batch_res = model.test_on_batch( np.ones((10, 2)), np.ones((10, 3)), return_dict=True) self.assertEqual(list(dict_test_on_batch_res.keys()), ['loss', 'mae', 'my_mse', 'my_rmse']) class BareUpdateLayer(layers_module.Layer): def build(self, input_shape): self.counter = self.add_weight( 'counter', dtype='int32', shape=(), initializer='zeros', trainable=False) def call(self, inputs): tf.compat.v1.assign_add(self.counter, 1) return tf.cast(self.counter, inputs.dtype) * inputs class LambdaUpdateLayer(layers_module.Layer): def build(self, input_shape): self.counter = self.add_weight( 'counter', dtype='int32', shape=(), initializer='zeros', trainable=False) def call(self, inputs): # Make sure update isn't run twice. self.add_update(lambda: tf.compat.v1.assign_add(self.counter, 1)) return tf.cast(self.counter, inputs.dtype) * inputs class NestedUpdateLayer(layers_module.Layer): def build(self, input_shape): self.layer = BareUpdateLayer() self.layer.build(input_shape) @property def counter(self): return self.layer.counter def call(self, inputs): return self.layer(inputs) class SubgraphUpdateLayer(layers_module.Layer): def build(self, input_shape): self.counter = self.add_weight( 'counter', dtype='int32', shape=(), initializer='zeros', trainable=False) def call(self, inputs, training=None): if training is None: training = backend.learning_phase() if training: self.counter.assign(self.counter + 1) return inputs @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestAutoUpdates(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types @parameterized.named_parameters( ('bare_update', BareUpdateLayer), ('lambda_update', LambdaUpdateLayer), ('nested_update', NestedUpdateLayer)) def test_updates_in_model(self, layer_builder): layer = layer_builder() x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_model_from_layers( [layer, layers_module.Dense(1)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) @keras_parameterized.run_with_all_model_types def test_lambda_updates_trainable_false(self): x, y = np.ones((10, 10)), np.ones((10, 1)) layer = LambdaUpdateLayer() model = testing_utils.get_model_from_layers( [layer, layers_module.Dense(1)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) layer.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) @keras_parameterized.run_with_all_model_types def test_subgraph_updates_in_model(self): layer = SubgraphUpdateLayer() x, y = np.ones((10, 10)), np.ones((10, 1)) model = testing_utils.get_model_from_layers( [layer, layers_module.Dense(1)], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, batch_size=2, epochs=1) self.assertEqual(self.evaluate(layer.counter), 5) @parameterized.named_parameters( ('bare_update', BareUpdateLayer), ('lambda_update', LambdaUpdateLayer), ('nested_update', NestedUpdateLayer)) def test_updates_standalone_layer(self, layer_builder): layer = layer_builder() y = layer(np.ones((10, 10))) self.evaluate(layer.counter.initializer) self.evaluate(y) self.assertEqual(self.evaluate(layer.counter), 1) def test_trainable_false_standalone_layer(self): layer = LambdaUpdateLayer() y = layer(np.ones((10, 10))) self.evaluate(layer.counter.initializer) self.evaluate(y) self.assertEqual(self.evaluate(layer.counter), 1) layer.trainable = False y = layer(np.ones((10, 10))) self.evaluate(y) self.assertEqual(self.evaluate(layer.counter), 1) @keras_parameterized.run_with_all_model_types def test_batchnorm_trainable_false(self): bn = layers_module.BatchNormalization() model = testing_utils.get_model_from_layers([bn, layers_module.Dense(1)], input_shape=(10,)) bn.trainable = False model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 1)) model.fit(x, y, batch_size=2, epochs=1) self.assertAllEqual(self.evaluate(bn.moving_mean), np.zeros((10,))) self.assertAllEqual(self.evaluate(bn.moving_variance), np.ones((10,))) class TestFunctionTracing(keras_parameterized.TestCase): def _seq_model_and_data(self): model = sequential.Sequential([layers_module.Dense(4, activation='relu')]) model.compile(loss='mse', optimizer='rmsprop') x = np.random.random((10, 6)) y = np.random.random((10, 4)) return model, x, y @keras_parameterized.run_all_keras_modes( always_skip_v1=True, always_skip_eager=True) def test_no_tracing_between_epoch(self): model, x, y = self._seq_model_and_data() logging.set_verbosity(1) with self.assertLogs(level=1) as logs: model.fit(x, y, epochs=10, batch_size=5, validation_data=(x, y)) new_func_graph = 'INFO:absl:Creating new FuncGraph for Python function' self.assertEqual(sum(new_func_graph in log for log in logs.output), 9) @keras_parameterized.run_all_keras_modes( always_skip_v1=True, always_skip_eager=True) def test_evaluate_no_cached_data(self): model, x, y = self._seq_model_and_data() new_func_graph = 'INFO:absl:Creating new FuncGraph for Python function' logging.set_verbosity(1) with self.assertLogs(level=1) as eval_logs: for _ in range(6): model.evaluate(x, y, batch_size=5) self.assertEqual(sum(new_func_graph in log for log in eval_logs.output), 20) class TestBuildCustomModel(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_build_list_of_inputs(self): class MyModel(training_module.Model): def __init__(self): super(MyModel, self).__init__() self.l1 = layers_module.Dense(1) self.l2 = layers_module.Dense(2) def call(self, x): a, b = x return self.l1(a) + self.l2(b) # List of tuples model = MyModel() model.build([(None, 1), (None, 2)]) self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1]) self.assertEqual(model.l2.kernel.shape.as_list(), [2, 2]) # List of lists model = MyModel() model.build([[None, 1], [None, 2]]) self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1]) self.assertEqual(model.l2.kernel.shape.as_list(), [2, 2]) @keras_parameterized.run_all_keras_modes def test_build_single_inputs(self): class MyModel(training_module.Model): def __init__(self): super(MyModel, self).__init__() self.l1 = layers_module.Dense(1) def call(self, x): return self.l1(x) model = MyModel() model.build((None, 1)) self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1]) model = MyModel() model.build([None, 1]) self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1]) @keras_parameterized.run_all_keras_modes def test_build_dict_inputs(self): class MyModel(training_module.Model): def __init__(self): super(MyModel, self).__init__() self.l1 = layers_module.Dense(1) def call(self, inputs): return self.l1(inputs['x']) model = MyModel() model.build({'x': [None, 16]}) self.assertEqual(model.l1.kernel.shape.as_list(), [16, 1]) def test_save_top_level_model_weights_h5(self): class MyModel(training_module.Model): def __init__(self): super(MyModel, self).__init__() self.class_token = self.add_weight(shape=(1,), name='class_token') self.inner_layer = layers_module.Dense(1) def call(self, inputs): return self.inner_layer(inputs) * self.class_token h5_file = tempfile.mktemp('.h5') m1 = MyModel() m1.build((1, 1)) m1.save_weights(h5_file) m2 = MyModel() m2.build((1, 1)) m2.load_weights(h5_file) self.assertAllEqual(m1.get_weights(), m2.get_weights()) m2.load_weights(h5_file, by_name=True) self.assertAllEqual(m1.get_weights(), m2.get_weights()) class ScalarDataModelTest(keras_parameterized.TestCase): def test_scalar_loss_reduction(self): class MyModel(training_module.Model): def __init__(self): super().__init__() self.w = self.add_weight((), initializer='ones') self.b = self.add_weight((), initializer='zeros') def call(self, inputs): return inputs * self.w + self.b model = MyModel() model.compile(optimizer_v2.gradient_descent.SGD(1e-2), loss='mse', metrics=['binary_accuracy']) # learn y = x * 2 + 0.5 x = np.array([3, 5, 5, 3, 5], dtype='float32') y = x * 2 + 0.5 x2d = np.expand_dims(x, axis=-1) y2d = np.expand_dims(y, axis=-1) loss, acc = model.evaluate(x, y) loss2d, acc2d = model.evaluate(x2d, y2d) self.assertAllClose([loss, acc], [loss2d, acc2d], atol=1e-6) model.fit(x, y, epochs=20) preds = model.predict(x) self.assertEqual(preds.shape, (5,)) self.assertAllClose(preds, y, atol=2e-1) if __name__ == '__main__': tf.test.main()
140,775
34.820865
103
py
keras
keras-master/keras/engine/deferred_sequential_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests specific to deferred-build `Sequential` models.""" import tensorflow.compat.v2 as tf import os import unittest import numpy as np import keras from keras import keras_parameterized from keras import testing_utils try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None class TestDeferredSequential(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_build_behavior(self): # Test graph network creation after __call__ model = get_model() model(np.random.random((2, 6))) self.assertLen(model.weights, 4) self.assertTrue(model._is_graph_network) self.assertLen(model.inputs, 1) self.assertLen(model.outputs, 1) self.assertEqual(model.inputs[0].shape.as_list(), [2, 6]) self.assertEqual(model.outputs[0].shape.as_list(), [2, 2]) # Test effect of new __call__ with a different shape model(np.random.random((3, 6))) self.assertLen(model.inputs, 1) self.assertLen(model.outputs, 1) self.assertEqual(model.inputs[0].shape.as_list(), [None, 6]) self.assertEqual(model.outputs[0].shape.as_list(), [None, 2]) model(np.random.random((4, 6))) self.assertLen(model.inputs, 1) self.assertLen(model.outputs, 1) self.assertEqual(model.inputs[0].shape.as_list(), [None, 6]) self.assertEqual(model.outputs[0].shape.as_list(), [None, 2]) # Test graph network creation after build model = get_model() model.build((None, 6)) self.assertLen(model.weights, 4) self.assertTrue(model._is_graph_network) self.assertLen(model.inputs, 1) self.assertLen(model.outputs, 1) self.assertEqual(model.inputs[0].shape.as_list(), [None, 6]) self.assertEqual(model.outputs[0].shape.as_list(), [None, 2]) # Test graph network creation after compile/fit model = get_model() model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly()) model.fit(np.zeros((2, 6)), np.zeros((2, 2))) self.assertLen(model.weights, 4) self.assertTrue(model._is_graph_network) self.assertLen(model.inputs, 1) self.assertLen(model.outputs, 1) # Inconsistency here: with eager `fit`, the model is built with shape # (2, 6), but with graph function `fit`, it is built with shape `(None, 6)`. # This is likely due to our assumption "the batch size should be dynamic" # at the level of `Model`. TODO(fchollet): investigate and resolve. self.assertEqual(model.inputs[0].shape.as_list()[-1], 6) self.assertEqual(model.outputs[0].shape.as_list()[-1], 2) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_add_and_pop(self): model = get_model() model.build((None, 6)) self.assertTrue(model.built) self.assertTrue(model._is_graph_network) self.assertLen(model.layers, 3) self.assertLen(model.weights, 4) model.pop() self.assertTrue(model.built) self.assertTrue(model._is_graph_network) self.assertLen(model.layers, 2) self.assertLen(model.weights, 2) model.add(keras.layers.Dense(2)) self.assertTrue(model.built) self.assertTrue(model._is_graph_network) self.assertLen(model.layers, 3) self.assertLen(model.weights, 4) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_feature_extraction(self): # This tests layer connectivity reset when rebuilding model = get_model() model(np.random.random((3, 6))) # First build model(np.random.random((4, 6))) # Triggers a rebuild # Classic feature extractor pattern extractor = keras.Model(inputs=model.inputs, outputs=[layer.output for layer in model.layers]) # Check that inputs and outputs are connected _ = extractor(np.random.random((4, 6))) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_saving_savedmodel(self): model = get_model() model(np.random.random((3, 6))) # Build model path = os.path.join(self.get_temp_dir(), 'model_path') model.save(path) new_model = keras.models.load_model(path) model_layers = model._flatten_layers(include_self=True, recursive=False) new_model_layers = new_model._flatten_layers( include_self=True, recursive=False) for layer1, layer2 in zip(model_layers, new_model_layers): self.assertEqual(layer1.name, layer2.name) for w1, w2 in zip(layer1.weights, layer2.weights): self.assertAllClose(w1, w2) @unittest.skipIf(h5py is None, 'Test requires h5py') @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_saving_h5(self): path = os.path.join(self.get_temp_dir(), 'model_path.h5') model = get_model() model(np.random.random((3, 6))) # Build model path = os.path.join(self.get_temp_dir(), 'model_path.h5') model.save(path) new_model = keras.models.load_model(path) model_layers = model._flatten_layers(include_self=True, recursive=False) new_model_layers = new_model._flatten_layers( include_self=True, recursive=False) for layer1, layer2 in zip(model_layers, new_model_layers): self.assertEqual(layer1.name, layer2.name) for w1, w2 in zip(layer1.weights, layer2.weights): self.assertAllClose(w1, w2) @keras_parameterized.run_all_keras_modes def test_shared_layer(self): # This tests that preexisting layer connectivity is preserved # when auto-building graph networks shared_layer = keras.layers.Dense(2) m1 = keras.Sequential([shared_layer]) m1(np.random.random((3, 6))) m2 = keras.Sequential([shared_layer]) m2(np.random.random((3, 6))) # Nesting case shared_layer = keras.layers.Dense(2) m1 = keras.Sequential([shared_layer]) m2 = keras.Sequential([shared_layer, m1]) m2(np.random.random((3, 2))) @keras_parameterized.run_all_keras_modes def test_loss_layer(self): class LossLayer(keras.layers.Layer): def call(self, inputs): self.add_loss(tf.reduce_sum(inputs)) return inputs # Test loss layer alone model = keras.Sequential([LossLayer()]) model.compile('rmsprop', run_eagerly=testing_utils.should_run_eagerly()) loss = model.train_on_batch(np.ones((2, 2))) self.assertAllClose(loss, 4.) model(np.random.random((4, 2))) # Triggers a rebuild loss = model.train_on_batch(np.ones((1, 2))) self.assertAllClose(loss, 2.) # Test loss layer combined with another layer model = keras.Sequential([ keras.layers.Dense(1, kernel_initializer='ones'), LossLayer()]) model.compile('rmsprop', run_eagerly=testing_utils.should_run_eagerly()) loss = model.train_on_batch(np.ones((2, 2))) self.assertAllClose(loss, 4.) model(np.random.random((4, 2))) # Triggers a rebuild loss = model.train_on_batch(np.ones((1, 2))) self.assertLess(loss, 2.) # Test loss layer combined with external loss model = keras.Sequential([ keras.layers.Dense(1, kernel_initializer='ones'), LossLayer()]) model.compile('rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly()) loss = model.train_on_batch(np.ones((2, 2)), np.ones((2, 2))) model(np.random.random((4, 2))) # Triggers a rebuild loss = model.train_on_batch(np.ones((1, 2)), np.ones((1, 2))) def get_model(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, name='first_layer')) model.add(keras.layers.Dropout(0.3, name='dp')) model.add(keras.layers.Dense(2, name='last_layer')) return model if __name__ == '__main__': tf.compat.v1.enable_v2_behavior() tf.test.main()
8,405
37.559633
80
py
keras
keras-master/keras/engine/__init__.py
0
0
0
py
keras
keras-master/keras/engine/training_v1.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """V1 Training-related part of the Keras engine.""" # pylint: disable=g-classes-have-attributes import tensorflow.compat.v2 as tf import collections import warnings import numpy as np from keras import backend from keras import losses from keras import metrics as metrics_module from keras import optimizer_v1 from keras import optimizers from keras.distribute import distributed_training_utils from keras.distribute import distributed_training_utils_v1 from keras.engine import base_layer from keras.engine import training as training_lib from keras.engine import training_arrays_v1 from keras.engine import training_distributed_v1 from keras.engine import training_eager_v1 from keras.engine import training_generator_v1 from keras.engine import training_utils from keras.engine import training_utils_v1 from keras.mixed_precision import loss_scale_optimizer from keras.mixed_precision import policy from keras.optimizer_v2 import optimizer_v2 from keras.saving import saving_utils from keras.saving.saved_model import model_serialization from keras.utils import data_utils from keras.utils import layer_utils from keras.utils import losses_utils from keras.utils import tf_inspect from keras.utils import tf_utils from keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging try: from scipy.sparse import issparse # pylint: disable=g-import-not-at-top except ImportError: issparse = None class Model(training_lib.Model): """`Model` groups layers into an object with training and inference features. There are two ways to instantiate a `Model`: 1 - With the "functional API", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super().__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super().__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` """ def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs) # initializing _distribution_strategy here since it is possible to call # predict on a model without compiling it. self._distribution_strategy = None self._compile_time_distribution_strategy = None if (tf.compat.v1.executing_eagerly_outside_functions() and tf.distribute.has_strategy()): self._set_strategy( tf.distribute.get_strategy()) # This flag is used to track if the user is using the deprecated path of # passing distribution strategy to compile rather than creating the model # under distribution strategy scope. self._compile_distribution = False self._run_eagerly = None self._experimental_run_tf_function = ( tf.compat.v1.executing_eagerly_outside_functions()) self._v1_compile_was_called = False def _init_batch_counters(self): pass # Batch counters should not be created in legacy graph mode. @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_strategy(self, strategy): self._compile_time_distribution_strategy = strategy def get_weights(self): """Retrieves the weights of the model. Returns: A flat list of Numpy arrays. """ strategy = (self._distribution_strategy or self._compile_time_distribution_strategy) if strategy: with strategy.scope(): return base_layer.Layer.get_weights(self) return base_layer.Layer.get_weights(self) def load_weights(self, filepath, by_name=False, skip_mismatch=False): """Loads all layer weights, either from a TensorFlow or an HDF5 weight file. If `by_name` is False weights are loaded based on the network's topology. This means the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. Only topological loading (`by_name=False`) is supported when loading weights from the TensorFlow format. Note that topological loading differs slightly between TensorFlow and HDF5 formats for user-defined classes inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. Args: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). by_name: Boolean, whether to load weights by name or by topological order. Only topological loading is supported for weight files in TensorFlow format. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name=True`). Returns: When loading a weight file in TensorFlow format, returns the same status object as `tf.train.Checkpoint.restore`. When graph building, restore ops are run automatically as soon as the network is built (on first call for user-defined classes inheriting from `Model`, immediately if it is already built). When loading weights in HDF5 format, returns `None`. Raises: ImportError: If h5py is not available and the weight file is in HDF5 format. ValueError: If `skip_mismatch` is set to `True` when `by_name` is `False`. """ if backend.is_tpu_strategy(self._distribution_strategy): if (self._distribution_strategy.extended.steps_per_run > 1 and (not saving_utils.is_hdf5_filepath(filepath))): # pylint: disable=protected-access raise ValueError('Load weights is not yet supported with TPUStrategy ' 'with steps_per_run greater than 1.') return super(Model, self).load_weights(filepath, by_name, skip_mismatch) @tf.__internal__.tracking.no_automatic_dependency_tracking def compile(self, optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs): """Configures the model for training. Args: optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or `tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective function is any callable with the signature `scalar_loss = fn(y_true, y_pred)`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. metrics: List of metrics to be evaluated by the model during training and testing. Typically you will use `metrics=['accuracy']`. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary, such as `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`. You can also pass a list (len = len(outputs)) of lists of metrics such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a tensor, it is expected to map output names (strings) to scalar coefficients. sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to `"temporal"`. `None` defaults to sample-wise weights (1D). If the model has multiple outputs, you can use a different `sample_weight_mode` on each output by passing a dictionary or a list of modes. weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing. target_tensors: By default, Keras will create placeholders for the model's target, which will be fed with the target data during training. If instead you would like to use your own target tensors (in turn, Keras will not expect external Numpy data for these targets at training time), you can specify them via the `target_tensors` argument. It can be a single tensor (for a single-output model), a list of tensors, or a dict mapping output names to target tensors. distribute: NOT SUPPORTED IN TF 2.0, please create and compile the model under distribution strategy scope instead of passing it to compile. **kwargs: Any additional arguments. Raises: ValueError: In case of invalid arguments for `optimizer`, `loss`, `metrics` or `sample_weight_mode`. """ self._assert_built_as_v1() self._run_eagerly = kwargs.pop('run_eagerly', None) self._experimental_run_tf_function = kwargs.pop( 'experimental_run_tf_function', True) self._v1_compile_was_called = True # Prepare Session arguments (legacy). kwargs.pop('cloning', None) # Legacy DistStrat argument, never used. self._from_serialized = kwargs.pop('from_serialized', False) allowed_kwargs = {'feed_dict', 'fetches', 'options', 'run_metadata'} unknown_kwargs = set(kwargs.keys()) - allowed_kwargs if unknown_kwargs: raise TypeError( 'Invalid keyword argument(s) in `compile`: %s' % (unknown_kwargs,)) self._function_kwargs = kwargs if self._function_kwargs: self._experimental_run_tf_function = False if self.run_eagerly: raise ValueError( 'Session keyword arguments are not supported ' 'when `run_eagerly=True`. You passed the following ' 'Session arguments: %s' % (self._function_kwargs,)) self._set_optimizer(optimizer) is_any_keras_optimizer_v1 = any( (isinstance(opt, optimizer_v1.Optimizer) and not isinstance(opt, optimizer_v1.TFOptimizer) ) for opt in tf.nest.flatten(self.optimizer)) if is_any_keras_optimizer_v1 and tf.compat.v1.executing_eagerly_outside_functions(): raise ValueError('`tf.compat.v1.keras` Optimizer (', optimizer, ') is ' 'not supported when eager execution is enabled. Use a ' '`tf.keras` Optimizer instead, or disable eager ' 'execution.') if ((target_tensors is not None) or not tf.compat.v1.executing_eagerly_outside_functions()): # Fallback out of things that aren't supported with v2 loops self._experimental_run_tf_function = False if distribute is not None: if tf.__internal__.tf2.enabled() or self._experimental_run_tf_function: raise ValueError( 'Distribute argument in compile is not available in TF 2.0 please ' 'create the model under the distribution strategy scope.') logging.warning('Distribute argument in compile is deprecated please ' 'create the model under the distribution strategy scope.') self._distribution_strategy = distribute self._compile_distribution = True else: if tf.distribute.has_strategy(): # When the user builds the model in the DS scope and cross replica # context we want distribution strategy to be set but when building the # replica copies of the models internally we should not be compiling # with distribution strategy and use the default compilation path. if tf.distribute.in_cross_replica_context(): self._distribution_strategy = ( tf.distribute.get_strategy()) if isinstance(self._distribution_strategy, tf.compat.v1.distribute.experimental.ParameterServerStrategy): raise NotImplementedError( '`tf.compat.v1.distribute.experimental.ParameterServerStrategy` ' 'currently only works with the tf.Estimator API') if isinstance(self._distribution_strategy, tf.distribute.experimental.ParameterServerStrategy): raise NotImplementedError( '`tf.distribute.experimental.ParameterServerStrategy` is only ' 'supported in TF2.') if not self._experimental_run_tf_function: self._validate_compile_param_for_distribution_strategy(self.run_eagerly, sample_weight_mode, target_tensors, weighted_metrics) # We've disabled automatic dependency tracking for this method, but do want # to add a checkpoint dependency on the optimizer if it's trackable. if isinstance(self.optimizer, tf.__internal__.tracking.Trackable): self._track_trackable( self.optimizer, name='optimizer', overwrite=True) self.loss = loss or {} self.loss_weights = loss_weights self.sample_weight_mode = sample_weight_mode self._compile_metrics = metrics or [] self._compile_weighted_metrics = weighted_metrics if self.run_eagerly and target_tensors is not None: raise ValueError( 'target_tensors argument is not supported when ' 'running a model eagerly.') # _training_endpoints contains a list of _TrainingEndpoint object, which has # all the model output/target/loss and related metadata. self._training_endpoints = [] # Used to freeze the behavior of the Model once `compile` has been called. self._compiled_trainable_state = self._get_trainable_state() # Set tf.distribute.Strategy specific parameters. self._distributed_model_cache = {} self._distributed_function_cache = {} # Clear any `_eager_losses` that was added. self._clear_losses() if (not tf.executing_eagerly() and self._distribution_strategy is not None): # Ensures a Session is created and configured correctly for Distribution # Strategy. backend.configure_and_create_distributed_session( self._distribution_strategy) # Initialize model metric attributes. self._init_metric_attributes() if not self.built or not self.inputs or not self.outputs: # Model is not compilable because it does not know its number of inputs # and outputs, nor their shapes and names. We will compile after the first # time the model gets called on training data. return self._is_compiled = True base_layer.keras_api_gauge.get_cell('compile').set(True) # Prepare list of loss functions, same size of model outputs. self.loss_functions = training_utils_v1.prepare_loss_functions( self.loss, self.output_names) target_tensors = self._process_target_tensor_for_compile(target_tensors) for o, n, l, t in zip(self.outputs, self.output_names, self.loss_functions, target_tensors): endpoint = _TrainingEndpoint(o, n, l) endpoint.create_training_target(t, run_eagerly=self.run_eagerly) self._training_endpoints.append(endpoint) # Prepare list loss weights, same size of model outputs. training_utils_v1.prepare_loss_weights(self._training_endpoints, loss_weights) # Initialization for Eager mode execution. if self.run_eagerly: self._compile_eagerly(metrics, weighted_metrics, sample_weight_mode) return with backend.get_graph().as_default(): # Save all metric attributes per output of the model. self._cache_output_metric_attributes(metrics, weighted_metrics) # Set metric attributes on model. self._set_metric_attributes() # Invoke metric functions (unweighted) for all the outputs. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), masks=self._prepare_output_masks()) # Prepare sample weight modes. List with the same length as model outputs. training_utils_v1.prepare_sample_weight_modes( self._training_endpoints, sample_weight_mode) # Creates the model loss and weighted metrics sub-graphs. self._compile_weights_loss_and_weighted_metrics() # Functions for train, test and predict will # be compiled lazily when required. # This saves time when the user is not using all functions. self.train_function = None self.test_function = None self.predict_function = None # Collected trainable weights, sorted in topological order. self._collected_trainable_weights = self.trainable_weights # Validate all variables were correctly created in distribution scope. if self._distribution_strategy and not self._compile_distribution: for v in self.variables: strategy = self._distribution_strategy if not strategy.extended.variable_created_in_scope(v): raise ValueError( 'Variable (%s) was not created in the distribution strategy ' 'scope of (%s). It is most likely due to not all layers or ' 'the model or optimizer being created outside the distribution ' 'strategy scope. Try to make sure your code looks similar ' 'to the following.\n' 'with strategy.scope():\n' ' model=_create_model()\n' ' model.compile(...)'% (v, strategy)) @tf.__internal__.tracking.no_automatic_dependency_tracking def _init_distributed_function_cache_if_not_compiled(self): if not hasattr(self, '_distributed_function_cache'): self._distributed_function_cache = {} @property def metrics(self): """Returns the model's metrics added using `compile`, `add_metric` APIs.""" metrics = [] if self._is_compiled: if not hasattr(self, '_v1_compile_was_called'): # See b/155687393 for more details, the model is created as a v2 # instance but converted to v1. Fallback to use base Model to retrieve # the metrics. return super(Model, self).metrics metrics += self._compile_metric_functions metrics.extend(self._metrics) metrics.extend( _get_metrics_from_layers( list(self._flatten_layers(include_self=False, recursive=False)))) return metrics @property def metrics_names(self): """Returns the model's display labels for all outputs.""" # This property includes all output names including `loss` and per-output # losses for backward compatibility. metrics_names = ['loss'] if self._is_compiled: if not hasattr(self, '_v1_compile_was_called'): # See b/155687393 for more details, the model is created as a v2 # instance but converted to v1. Fallback to use base Model to retrieve # the metrics name return super(Model, self).metrics_names # Add output loss metric names to the metric names list. if len(self._training_endpoints) > 1: metrics_names.extend([ e.loss_name() for e in self._training_endpoints if not e.should_skip_target() ]) # Add all metric names. metrics_names += [m.name for m in self.metrics] return metrics_names @property def run_eagerly(self): """Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly. """ if self._run_eagerly is True and not tf.executing_eagerly(): raise ValueError('You can only set `run_eagerly=True` if eager execution ' 'is enabled.') if not self.dynamic: if self._run_eagerly is None: # Respect `tf.config.run_functions_eagerly` unless # `run_eagerly` was explicitly passed to `compile`. return tf.config.functions_run_eagerly() else: return self._run_eagerly else: if not tf.executing_eagerly(): raise ValueError('Your model contains layers that can only be ' 'successfully run in eager execution (layers ' 'constructed with `dynamic=True`). ' 'You must enable eager execution with ' '`tf.enable_eager_execution()`.') if self._run_eagerly is False: # TODO(fchollet): consider using py_func to enable this. raise ValueError('Your model contains layers that can only be ' 'successfully run in eager execution (layers ' 'constructed with `dynamic=True`). ' 'You cannot set `run_eagerly=False`.') return tf.executing_eagerly() @run_eagerly.setter def run_eagerly(self, value): self._run_eagerly = value def _select_training_loop(self, inputs): """Select training loop for fit/eval/predict based on the inputs.""" # TODO(kaftan) or TODO(scottzhu): This check should eventually be nicely # integrated into the data adapters in the v2 loop. We can't do this yet # because we currently have to fall back for unhandled data types. if isinstance(inputs, (tf.compat.v1.data.Iterator, tf.data.Iterator)): raise ValueError('For performance reasons Keras `fit`, `evaluate` and' '`predict` accept tf.data `Datasets` as input but not ' 'iterators that have been manually generated from ' 'Datasets by users. Please directly pass in the ' 'original `Dataset` object instead of passing in ' '`iter(dataset)`.') # Case 1: distribution strategy. if self._distribution_strategy: if self._in_multi_worker_mode(): return training_distributed_v1.DistributionMultiWorkerTrainingLoop( training_distributed_v1.DistributionSingleWorkerTrainingLoop()) else: return training_distributed_v1.DistributionSingleWorkerTrainingLoop() # Case 2: generator-like. Input is Python generator, or Sequence object, # or a non-distributed Dataset or iterator in eager execution. if data_utils.is_generator_or_sequence(inputs): return training_generator_v1.GeneratorOrSequenceTrainingLoop() if training_utils_v1.is_eager_dataset_or_iterator(inputs): return training_generator_v1.EagerDatasetOrIteratorTrainingLoop() # Case 3: Symbolic tensors or Numpy array-like. # This includes Datasets and iterators in graph mode (since they # generate symbolic tensors). if self.run_eagerly: return training_generator_v1.GeneratorLikeTrainingLoop() else: return training_arrays_v1.ArrayLikeTrainingLoop() def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs): """Trains the model for a fixed number of epochs (iterations on a dataset). Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A generator or `keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, targets, sample weights)`. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator, or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from `x`). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. Note that in conjunction with `initial_epoch`, `epochs` is to be understood as "final epoch". The model is not trained for a number of iterations given by `epochs`, but merely until the epoch of index `epochs` is reached. verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See `tf.keras.callbacks`. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset, generator or `keras.utils.Sequence` instance. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. `validation_data` could be: - tuple `(x_val, y_val)` of Numpy arrays or tensors - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays - dataset For the first two cases, `batch_size` must be provided. For the last case, `validation_steps` could be provided. shuffle: Boolean (whether to shuffle the training data before each epoch) or str (for 'batch'). 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. Has no effect when `steps_per_epoch` is not `None`. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class. sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset, generator, or `keras.utils.Sequence` instance, instead provide the sample_weights as the third element of `x`. initial_epoch: Integer. Epoch at which to start training (useful for resuming a previous training run). steps_per_epoch: Integer or `None`. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps_per_epoch' is None, the epoch will run until the input dataset is exhausted. This argument is not supported with array inputs. validation_steps: Only relevant if `validation_data` is provided and is a `tf.data` dataset. Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch. If 'validation_steps' is None, validation will run until the `validation_data` dataset is exhausted. In the case of a infinite dataset, it will run into a infinite loop. If 'validation_steps' is specified and only part of the dataset will be consumed, the evaluation will start from the beginning of the dataset at each epoch. This ensures that the same validation samples are used every time. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. **kwargs: Used for backwards compatibility. Returns: A `History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). Raises: RuntimeError: If the model was never compiled. ValueError: In case of mismatch between the provided input data and what the model expects. """ self._assert_built_as_v1() base_layer.keras_api_gauge.get_cell('fit').set(True) # Legacy support if 'nb_epoch' in kwargs: logging.warning( 'The `nb_epoch` argument in `fit` has been renamed `epochs`.') epochs = kwargs.pop('nb_epoch') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) self._assert_compile_was_called() self._check_call_args('fit') func = self._select_training_loop(x) return func.fit( self, x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_split=validation_split, validation_data=validation_data, shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Returns the loss value & metrics values for the model in test mode. Computation is done in batches (see the `batch_size` arg.) Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, generator or `keras.utils.Sequence` instance, `y` should not be specified (since targets will be obtained from the iterator/dataset). batch_size: Integer or `None`. Number of samples per batch of computation. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar. sample_weight: Optional Numpy array of weights for the test samples, used for weighting the loss function. You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset, instead pass sample weights as the third element of `x`. steps: Integer or `None`. Total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, 'evaluate' will run until the dataset is exhausted. This argument is not supported with array inputs. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. """ self._assert_built_as_v1() base_layer.keras_api_gauge.get_cell('evaluate').set(True) self._assert_compile_was_called() self._check_call_args('evaluate') func = self._select_training_loop(x) return func.evaluate( self, x=x, y=y, batch_size=batch_size, verbose=verbose, sample_weight=sample_weight, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def predict(self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False): """Generates output predictions for the input samples. Computation is done in batches (see the `batch_size` arg.) Args: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. - A generator or `keras.utils.Sequence` instance. batch_size: Integer or `None`. Number of samples per batch of computation. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, dataset, generators, or `keras.utils.Sequence` instances (since they generate batches). verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of `None`. If x is a `tf.data` dataset and `steps` is None, `predict` will run until the input dataset is exhausted. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during prediction. See [callbacks](/api_docs/python/tf/keras/callbacks). max_queue_size: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Used for generator or `keras.utils.Sequence` input only. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or `keras.utils.Sequence` input only. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size. """ self._assert_built_as_v1() base_layer.keras_api_gauge.get_cell('predict').set(True) self._check_call_args('predict') func = self._select_training_loop(x) return func.predict( self, x=x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing) def reset_metrics(self): """Resets the state of metrics.""" metrics = self._get_training_eval_metrics() for m in metrics: m.reset_state() # Reset metrics on all the distributed (cloned) models. if self._distribution_strategy: distributed_training_utils_v1._reset_metrics(self) # pylint: disable=protected-access def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True): """Runs a single gradient update on a single batch of data. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ self._assert_compile_was_called() self._check_call_args('train_on_batch') # If at this point we are in the replica context, then it is okay to execute # the Eager code path. The expected way to get here is to call `fit` that # calls `train_on_batch` on each replica. if (self._distribution_strategy and tf.distribute.in_cross_replica_context()): raise NotImplementedError('`train_on_batch` is not supported for models ' 'distributed with tf.distribute.Strategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, extract_tensors_from_dataset=True) # If `self._distribution_strategy` is True, then we are in a replica context # at this point because of the check above. `train_on_batch` is being run # for each replica by `self._distribution_strategy` and the same code path # as Eager is expected to be taken. if self.run_eagerly or self._distribution_strategy: output_dict = training_eager_v1.train_on_batch( self, x, y, sample_weights=sample_weights, output_loss_metrics=self._output_loss_metrics) outputs = (output_dict['total_loss'] + output_dict['output_losses'] + output_dict['metrics']) outputs = [_non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access else: x = training_utils_v1.ModelInputs(x).as_list() ins = x + list(y or []) + list(sample_weights or []) if not isinstance(backend.symbolic_learning_phase(), int): ins += [True] # Add learning phase value. self._update_sample_weight_modes(sample_weights=sample_weights) self._make_train_function() outputs = self.train_function(ins) # pylint: disable=not-callable if reset_metrics: self.reset_metrics() if len(outputs) == 1: return outputs[0] return outputs def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True): """Test the model on a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset. reset_metrics: If `True`, the metrics returned will be only for this batch. If `False`, the metrics will be statefully accumulated across batches. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ self._assert_compile_was_called() self._check_call_args('test_on_batch') if (self._distribution_strategy and tf.distribute.in_cross_replica_context()): raise NotImplementedError('`test_on_batch` is not supported for models ' 'distributed with tf.distribute.Strategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True) # If `self._distribution_strategy` is True, then we are in a replica context # at this point. if self.run_eagerly or self._distribution_strategy: output_dict = training_eager_v1.test_on_batch( self, x, y, sample_weights=sample_weights, output_loss_metrics=self._output_loss_metrics) outputs = (output_dict['total_loss'] + output_dict['output_losses'] + output_dict['metrics']) outputs = [_non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access else: x = training_utils_v1.ModelInputs(x).as_list() inputs = x + list(y or []) + list(sample_weights or []) self._update_sample_weight_modes(sample_weights=sample_weights) self._make_test_function() outputs = self.test_function(inputs) # pylint: disable=not-callable if reset_metrics: self.reset_metrics() if len(outputs) == 1: return outputs[0] return outputs def predict_on_batch(self, x): """Returns predictions for a single batch of samples. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between given number of inputs and expectations of the model. """ self._check_call_args('predict_on_batch') if (self._distribution_strategy and tf.distribute.in_cross_replica_context()): raise NotImplementedError( '`predict_on_batch` is not supported for models distributed with' ' tf.distribute.Strategy.') # Validate and standardize user data. inputs, _, _ = self._standardize_user_data( x, extract_tensors_from_dataset=True) # If `self._distribution_strategy` is True, then we are in a replica context # at this point. if self.run_eagerly or self._distribution_strategy: inputs = training_utils_v1.cast_if_floating_dtype(inputs) if isinstance(inputs, collections.abc.Sequence): # Unwrap lists with only one input, as we do when training on batch if len(inputs) == 1: inputs = inputs[0] return self(inputs) # pylint: disable=not-callable self._make_predict_function() outputs = self.predict_function(inputs) if len(outputs) == 1: return outputs[0] return outputs def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """Fits the model on data yielded batch-by-batch by a Python generator. DEPRECATED: `Model.fit` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn('`model.fit_generator` is deprecated and ' 'will be removed in a future version. ' 'Please use `Model.fit`, which supports generators.') return self.fit( generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, validation_freq=validation_freq, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch) def evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Evaluates the model on a data generator. DEPRECATED: `Model.evaluate` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn('`Model.evaluate_generator` is deprecated and ' 'will be removed in a future version. ' 'Please use `Model.evaluate`, which supports generators.') self._check_call_args('evaluate_generator') return self.evaluate( generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks) def predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Generates predictions for the input samples from a data generator. DEPRECATED: `Model.predict` now supports generators, so there is no longer any need to use this endpoint. """ warnings.warn('`Model.predict_generator` is deprecated and ' 'will be removed in a future version. ' 'Please use `Model.predict`, which supports generators.') return self.predict( generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks) def _check_call_args(self, method_name): """Check that `call` has only one positional arg.""" # Always allow first arg, regardless of arg name. fullargspec = self._call_full_argspec if fullargspec.defaults: positional_args = fullargspec.args[:-len(fullargspec.defaults)] else: positional_args = fullargspec.args if 'training' in positional_args: positional_args.remove('training') # self and first arg can be positional. if len(positional_args) > 2: extra_args = positional_args[2:] raise ValueError( 'Models passed to `' + method_name + '` can only have `training` ' 'and the first argument in `call` as positional arguments, ' 'found: ' + str(extra_args) + '.') def _set_optimizer(self, optimizer): """Sets self.optimizer. Sets self.optimizer to `optimizer`, potentially wrapping it with a LossScaleOptimizer. Args: optimizer: The optimizer(s) to assign to self.optimizer. """ if isinstance(optimizer, (list, tuple)): self.optimizer = [optimizers.get(opt) for opt in optimizer] else: self.optimizer = optimizers.get(optimizer) if isinstance(self._dtype_policy, policy.PolicyV1): loss_scale = self._dtype_policy.loss_scale elif self._dtype_policy.name == 'mixed_float16': loss_scale = 'dynamic' else: loss_scale = None if (loss_scale is not None and not isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer)): if isinstance(self.optimizer, list): raise ValueError('When a dtype policy with a loss scale is used, you ' 'can only pass a single optimizer. Using policy %s ' 'and got optimizers: %s' % self._dtype_policy, self.optimizer) if not isinstance(self.optimizer, optimizer_v2.OptimizerV2): raise ValueError('"optimizer" must be an instance of ' 'tf.keras.optimizers.Optimizer when a dype policy ' 'with a loss scale used, but got: %s. Using policy: ' '%s' % (self.optimizer, self._dtype_policy)) if loss_scale == 'dynamic': self.optimizer = loss_scale_optimizer.LossScaleOptimizer(self.optimizer) else: self.optimizer = loss_scale_optimizer.LossScaleOptimizerV1( self.optimizer, loss_scale) def _prepare_validation_data(self, validation_data, batch_size, validation_steps): """Unpack and check the validation data.""" val_x, val_y, val_sample_weights = training_utils_v1.unpack_validation_data( validation_data) return self._standardize_user_data( val_x, val_y, sample_weight=val_sample_weights, batch_size=batch_size, steps=validation_steps, steps_name='validation_steps') def _validate_compile_param_for_distribution_strategy( self, run_eagerly, sample_weight_mode, target_tensors, weighted_metrics): # Validate that arguments passed by the user to `compile` are supported by # tf.distribute.Strategy. if self._distribution_strategy: if sample_weight_mode: raise NotImplementedError('sample_weight_mode is not supported with ' 'tf.distribute.Strategy.') if weighted_metrics: raise NotImplementedError('weighted_metrics is not supported with ' 'tf.distribute.Strategy.') if target_tensors: raise ValueError('target_tensors is not supported with ' 'tf.distribute.Strategy.') if run_eagerly: raise ValueError( 'We currently do not support enabling `run_eagerly` with ' 'distribution strategy.') if (distributed_training_utils_v1.is_distributing_by_cloning(self) and (not self.built or not self.inputs or not self.outputs)): raise ValueError( 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.') def _process_target_tensor_for_compile(self, target_tensors): if self.run_eagerly: # target tensor is not supported with run_eagerly. Create a list with None # as placeholder for each output. return [None for _ in self.output_names] if target_tensors is not None and not (isinstance(target_tensors, list) and target_tensors == []): # pylint: disable=g-explicit-bool-comparison if isinstance(target_tensors, list): if len(target_tensors) != len(self.outputs): raise ValueError( 'When passing a list as `target_tensors`, ' 'it should have one entry per model output. ' 'The model has %s outputs, but you passed target_tensors=%s' % (len(self.outputs), target_tensors)) elif isinstance(target_tensors, dict): unexpected_target_tensor_names = set(target_tensors.keys()).difference( self.output_names) if unexpected_target_tensor_names: raise ValueError( 'Unknown entry in `target_tensors` dictionary: "{name}". ' 'Only expected the following keys: {keys}'.format( name=unexpected_target_tensor_names, keys=str(self.output_names))) tmp_target_tensors = [] for name in self.output_names: tmp_target_tensors.append(target_tensors.get(name, None)) target_tensors = tmp_target_tensors elif tf.is_tensor(target_tensors): target_tensors = [target_tensors] else: raise TypeError('Expected `target_tensors` to be a list or tuple or ' 'dict or a single tensor, but got:', target_tensors) else: # In case target tensor is empty or None, create a list with Nones # that has same length as self.output_names. With that, the None check of # target tensor can be skipped downstream. target_tensors = [None for _ in self.output_names] return target_tensors def _compile_eagerly(self, metrics, weighted_metrics, sample_weight_mode): # Prepare sample weight modes. List with the same length as model outputs. training_utils_v1.prepare_sample_weight_modes( self._training_endpoints, sample_weight_mode) # Prepare sample weights. self._prepare_sample_weights() # Save all metric attributes per output of the model. self._cache_output_metric_attributes(metrics, weighted_metrics) self.total_loss = None # Set metric attributes on model. self._set_metric_attributes() self._collected_trainable_weights = self.trainable_weights def _update_sample_weight_modes(self, sample_weights=None): """Updates sample weight modes based on training/eval inputs. Sample weight placeholders will be created for all or no outputs based on whether sample_weight is provided for any output. If model contains `_sample_weight_modes` we check if the input `sample_weights` corresponds to the sample weight modes. 1. Set sample weight mode to be 'temporal' for output i, if `compile` sample_weight_mode was set to `temporal` and sample weight inputs are given for one or more outputs. 2. Set sample weight mode to be 'samplewise' for output i, if `compile` sample_weight_mode was not set and sample weight inputs are given for one or more outputs. 3. Reset sample weight mode to None for output i if sample weight mode was set but there is no sample weight input. Args: sample_weights: List of sample weights of the same length as model outputs or None. """ if not self._is_compiled: return if sample_weights and any(s is not None for s in sample_weights): for endpoint in self._training_endpoints: endpoint.sample_weight_mode = ( endpoint.sample_weight_mode or 'samplewise') else: for endpoint in self._training_endpoints: endpoint.sample_weight_mode = None def _recompile_weights_loss_and_weighted_metrics(self): if not self._is_compiled: return False recompile = any( e.sample_weights_mismatch() for e in self._training_endpoints) if recompile: self._compile_weights_loss_and_weighted_metrics() return recompile @tf.__internal__.tracking.no_automatic_dependency_tracking def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None): """Compiles the model loss and weighted metric sub-graphs. This may be used to set graph tensors as sample weights (instead of creating placeholders). This functionality is necessary for `tf.keras.estimator.model_to_estimator`, which calls Keras models in a v1 graph, and creates iterator tensors for inputs, targets, and sample weights. Args: sample_weights: List of tensors to use as the sample weights. Must be the same length as the number of outputs. If left as `None`, placeholders are used instead. """ with backend.get_graph().as_default(): if sample_weights is not None: self._update_sample_weight_modes(sample_weights) self._prepare_sample_weights(sample_weights) masks = self._prepare_output_masks() # Compute weighted metrics. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True) # Compute total loss. # Used to keep track of the total loss value (stateless). # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) + # loss_weight_2 * output_2_loss_fn(...) + # layer losses. self.total_loss = self._prepare_total_loss(masks) def _prepare_skip_target_masks(self): """Boolean mask for whether the target in the output list should be skipped. If the loss function corresponding to a model output is None, then this output will be skipped during total loss calculation and feed targets preparation. Returns: A boolean list for whether the corresponding target in the output list should be skipped during loss calculation. """ return [l is None for l in self.loss_functions] def _prepare_output_masks(self): """Returns masks corresponding to model outputs.""" return [getattr(x, '_keras_mask', None) for x in self.outputs] def _prepare_total_loss(self, masks): """Computes total loss from loss functions. Args: masks: List of mask values corresponding to each model output. Returns: A list of loss weights of python floats. Raises: TypeError: If model run_eagerly is True. """ if self.run_eagerly: raise TypeError('total loss can not be computed when compiled with ' 'run_eagerly = True.') loss_list = [] with backend.name_scope('loss'): for endpoint, mask in zip(self._training_endpoints, masks): if endpoint.should_skip_target(): continue y_true = endpoint.training_target.target y_pred = endpoint.output loss_fn = endpoint.loss_fn loss_weight = endpoint.loss_weight loss_name = endpoint.loss_name() sample_weight = endpoint.sample_weight with backend.name_scope(loss_name): if mask is not None: mask = tf.cast(mask, y_pred.dtype) # Update weights with mask. if sample_weight is None: sample_weight = mask else: # Update dimensions of weights to match with mask if possible. mask, _, sample_weight = ( losses_utils.squeeze_or_expand_dimensions( mask, sample_weight=sample_weight)) sample_weight *= mask if hasattr(loss_fn, 'reduction'): per_sample_losses = loss_fn.call(y_true, y_pred) weighted_losses = losses_utils.compute_weighted_loss( per_sample_losses, sample_weight=sample_weight, reduction=losses_utils.ReductionV2.NONE) loss_reduction = loss_fn.reduction # `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all # compile use cases. if loss_reduction == losses_utils.ReductionV2.AUTO: loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE # Compute the stateless loss value. output_loss = losses_utils.reduce_weighted_loss( weighted_losses, reduction=loss_reduction) else: # Compute the stateless loss value for a custom loss class. # Here we assume that the class takes care of loss reduction # because if this class returns a vector value we cannot # differentiate between use case where a custom optimizer # expects a vector loss value vs unreduced per-sample loss value. output_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight) loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE if len(self.outputs) > 1: # Keep track of stateful result tensor for the loss. endpoint.output_loss_metric(output_loss) # Scale output loss for distribution. For custom losses we assume # reduction was mean. if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: output_loss = losses_utils.scale_loss_for_distribution(output_loss) loss_list.append(loss_weight * output_loss) if not loss_list and not self.losses: raise ValueError('The model cannot be compiled ' 'because it has no loss to optimize.') # Add regularization penalties and other layer-specific losses. custom_losses = self.get_losses_for(None) + self.get_losses_for( self.inputs) if custom_losses: total_custom_loss = tf.add_n( losses_utils.cast_losses_to_common_dtype(custom_losses)) loss_list.append( losses_utils.scale_loss_for_distribution(total_custom_loss)) loss_list = losses_utils.cast_losses_to_common_dtype(loss_list) if loss_list: total_loss = tf.add_n(loss_list) else: total_loss = 0. return total_loss def _get_callback_model(self): """Returns the Callback Model for this Model.""" if hasattr(self, '_replicated_model') and self._replicated_model: # When using training_distributed, we set the callback model # to an instance of the `DistributedModel` that we create in # the `compile` call. The `DistributedModel` is initialized # with the first replicated model. We need to set the callback # model to a DistributedModel to allow us to override saving # and loading weights when we checkpoint the model during training. return self._replicated_model if hasattr(self, 'callback_model') and self.callback_model: return self.callback_model return self @tf.__internal__.tracking.no_automatic_dependency_tracking def _make_callback_model(self, grouped_model): first_replicated_model = self._distribution_strategy.unwrap( grouped_model)[0] # We initialize the callback model with the first replicated model. self._replicated_model = DistributedCallbackModel(first_replicated_model) self._replicated_model.set_original_model(self) def _validate_or_infer_batch_size(self, batch_size, steps, x): """Validates that the `batch_size` provided is consistent with InputLayer. It's possible that the user specified a static batch size in their InputLayer. If so, this method checks the provided `batch_size` and `x` arguments are consistent with this static batch size. Also, if `batch_size` is `None`, this method will attempt to infer the batch size from the static batch size of the InputLayer. Lastly, ValueError will be raised if `x` is a tf.data.Dataset and `batch_size` is specified as we expect users to provide batched datasets. Args: batch_size: The batch_size provided as an argument to fit/evaluate/predict. steps: The steps provided as an argument to fit/evaluate/predict. x: The data passed as `x` to fit/evaluate/predict. Returns: The validated batch_size, auto-inferred from the first layer if not provided. """ if (isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset, data_utils.Sequence)) or tf_inspect.isgenerator(x)): if batch_size is not None: raise ValueError( 'The `batch_size` argument must not be specified for the given ' 'input type. Received input: {}, batch_size: {}'.format( x, batch_size)) return # Avoids the override in Sequential.layers which filters Input layers. # (Which are often the very layers that we're after.) layers = self._flatten_layers(include_self=False, recursive=False) first_layer = next(layers, None) if first_layer: # The per-replica static batch size. static_batch_size = training_utils.get_static_batch_size(first_layer) if static_batch_size is not None: # Determine number of times the user-supplied batch size will be split. if (self._distribution_strategy and distributed_training_utils.global_batch_size_supported( self._distribution_strategy)): num_splits_for_ds = self._distribution_strategy.num_replicas_in_sync else: num_splits_for_ds = 1 # Check `batch_size` argument is consistent with InputLayer. if batch_size is not None: if batch_size % num_splits_for_ds != 0: raise ValueError('The `batch_size` argument ({}) must be divisible ' 'the by number of replicas ({})'.format( batch_size, num_splits_for_ds)) per_replica_batch_size = batch_size // num_splits_for_ds if per_replica_batch_size != static_batch_size: raise ValueError('The `batch_size` argument value {} is ' 'incompatible with the specified batch size of ' 'your Input Layer: {}'.format( per_replica_batch_size, static_batch_size)) # Check Dataset/Iterator batch size is consistent with InputLayer. if isinstance(x, (tf.data.Dataset, tf.compat.v1.data.Iterator, tf.data.Iterator)): ds_batch_size = tf.compat.v1.Dimension( tf.nest.flatten(tf.compat.v1.data.get_output_shapes(x))[0][0]).value if ds_batch_size is not None: if ds_batch_size % num_splits_for_ds != 0: raise ValueError( 'The batch output shape of your `Dataset` {} ' 'cannot be divisible by number of replicas {}'.format( ds_batch_size, num_splits_for_ds)) ds_per_replica_batch_size = ds_batch_size // num_splits_for_ds if ds_per_replica_batch_size != static_batch_size: raise ValueError('The batch output shape of your `Dataset` is ' '{}, which is incompatible with the specified ' 'batch size of your Input Layer: {}'.format( ds_per_replica_batch_size, static_batch_size)) # Set inferred batch size from the InputLayer. if steps is None: batch_size = static_batch_size * num_splits_for_ds if batch_size is None and steps is None: # Backwards compatibility batch_size = 32 return batch_size def _prepare_sample_weights(self, sample_weights=None): """Sets sample weight attribute on the model.""" # List with the same length as model outputs. if sample_weights is not None: if len(sample_weights) != len(self._training_endpoints): raise ValueError('Provided sample weights must have same length as the ' 'number of outputs. Expected: {}, got: {}.'.format( len(self._training_endpoints), len(sample_weights))) else: sample_weights = [None] * len(self._training_endpoints) for endpoint, weight in zip(self._training_endpoints, sample_weights): endpoint.populate_sample_weight(weight, endpoint.sample_weight_mode) def _cache_output_metric_attributes(self, metrics, weighted_metrics): """Caches metric name and function attributes for every model output.""" output_shapes = [] for output in self.outputs: if output is None or output.shape.rank is None: output_shapes.append(None) else: output_shapes.append(output.shape.as_list()) self._per_output_metrics = training_utils_v1.collect_per_output_metric_info( metrics, self.output_names, output_shapes, self.loss_functions, from_serialized=self._from_serialized) self._per_output_weighted_metrics = ( training_utils_v1.collect_per_output_metric_info( weighted_metrics, self.output_names, output_shapes, self.loss_functions, from_serialized=self._from_serialized, is_weighted=True)) def _add_unique_metric_name(self, metric_name, metric_fn, output_index): """Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name """ # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # "accuracy") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn, '_from_serialized', False): metric_name = '%s_%s' % (self.output_names[output_index], metric_name) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = '%s_%d' % (base_metric_name, j) j += 1 return metric_name def _init_metric_attributes(self): """Initialized model metric attributes.""" # List of stateful metric functions. Used for resetting metric state during # training/eval. self._compile_metric_functions = [] def _set_per_output_metric_attributes(self, metrics_dict, output_index): """Sets the metric attributes on the model for the given output. Args: metrics_dict: A dict with metric names as keys and metric fns as values. output_index: The index of the model output for which the metric attributes are added. Returns: Metrics dict updated with unique metric names as keys. """ updated_metrics_dict = collections.OrderedDict() for metric_name, metric_fn in metrics_dict.items(): metric_name = self._add_unique_metric_name( metric_name, metric_fn, output_index) # Update the name on the metric class to be the unique generated name. metric_fn._name = metric_name # pylint: disable=protected-access updated_metrics_dict[metric_name] = metric_fn # Keep track of metric name and function. self._compile_metric_functions.append(metric_fn) return updated_metrics_dict def _set_metric_attributes(self): """Sets the metric attributes on the model for all the model outputs.""" updated_per_output_metrics = [] updated_per_output_weighted_metrics = [] for i, endpoint in enumerate(self._training_endpoints): if endpoint.should_skip_target(): updated_per_output_metrics.append(self._per_output_metrics[i]) updated_per_output_weighted_metrics.append( self._per_output_weighted_metrics[i]) continue updated_per_output_metrics.append( self._set_per_output_metric_attributes(self._per_output_metrics[i], i)) updated_per_output_weighted_metrics.append( self._set_per_output_metric_attributes( self._per_output_weighted_metrics[i], i)) # Create a metric wrapper for each output loss. This computes mean of an # output loss across mini-batches (irrespective of how we reduce within a # batch). if len(self._training_endpoints) > 1: for endpoint in self._training_endpoints: if not endpoint.should_skip_target(): endpoint.output_loss_metric = metrics_module.Mean( name=endpoint.loss_name()) self._per_output_metrics = updated_per_output_metrics self._per_output_weighted_metrics = updated_per_output_weighted_metrics def _handle_per_output_metrics(self, metrics_dict, y_true, y_pred, mask, weights=None): """Calls metric functions for a single output. Args: metrics_dict: A dict with metric names as keys and metric fns as values. y_true: Target output. y_pred: Predicted output. mask: Computed mask value for the current output. weights: Weights to be applied on the current output. Returns: A list of metric result tensors. """ metric_results = [] for metric_name, metric_fn in metrics_dict.items(): with backend.name_scope(metric_name): metric_result = training_utils_v1.call_metric_function( metric_fn, y_true, y_pred, weights=weights, mask=mask) metric_results.append(metric_result) return metric_results def _handle_metrics(self, outputs, targets=None, skip_target_masks=None, sample_weights=None, masks=None, return_weighted_metrics=False, return_weighted_and_unweighted_metrics=False): """Handles calling metric functions. Args: outputs: List of outputs (predictions). targets: List of targets. skip_target_masks: Optional. List of boolean for whether the corresponding target should be ignored or not. sample_weights: Optional list of sample weight arrays. masks: List of computed output mask values. return_weighted_metrics: Flag that indicates whether weighted metrics should be computed instead of unweighted metrics. This flag is ignored when `return_weighted_and_unweighted_metrics` is enabled. return_weighted_and_unweighted_metrics: Flag that is used to indicate whether both weighted and unweighted metrics should be computed. When this is not enabled, we use `return_weighted_metrics` param to indicate whether weighted or unweighted metrics should be returned. Returns: A list of metric result tensors. """ # TODO(scottzhu): Update this to use the new training_endpoints. Currently # the eager and graph logic is bit different. skip_target_masks = skip_target_masks or [False] * len(outputs) metric_results = [] with backend.name_scope('metrics'): # Invoke all metrics added using `compile`. for i in range(len(outputs)): if skip_target_masks[i]: continue output = outputs[i] if outputs else None target = targets[i] if targets else None output_mask = masks[i] if masks else None if (return_weighted_and_unweighted_metrics or not return_weighted_metrics): metric_results.extend( self._handle_per_output_metrics(self._per_output_metrics[i], target, output, output_mask)) if return_weighted_and_unweighted_metrics or return_weighted_metrics: metric_results.extend( self._handle_per_output_metrics( self._per_output_weighted_metrics[i], target, output, output_mask, weights=sample_weights[i] if sample_weights else None)) return metric_results def _check_trainable_weights_consistency(self): """Check trainable weights count consistency. This will raise a warning if `trainable_weights` and `_collected_trainable_weights` are inconsistent (i.e. have different number of parameters). Inconsistency will typically arise when one modifies `model.trainable` without calling `model.compile` again. """ if not hasattr(self, '_collected_trainable_weights'): return if len(self.trainable_weights) != len(self._collected_trainable_weights): logging.log_first_n( logging.WARN, 'Discrepancy between trainable weights and collected' ' trainable weights, did you set `model.trainable`' ' without calling `model.compile` after ?', 1) def _make_train_function(self): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() self._check_trainable_weights_consistency() if isinstance(self.optimizer, list): raise ValueError('The `optimizer` in `compile` should be a single ' 'optimizer.') # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() self._set_trainable_state(self._compiled_trainable_state) inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if not isinstance(backend.symbolic_learning_phase(), int): inputs += [backend.symbolic_learning_phase()] with backend.get_graph().as_default(): with backend.name_scope('training'): # Training updates updates = self.optimizer.get_updates( params=self._collected_trainable_weights, loss=self.total_loss) # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access ] with backend.name_scope('training'): # Gets loss and metrics. Updates weights at each call. fn = backend.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) setattr(self, 'train_function', fn) # Restore the current trainable state self._set_trainable_state(current_trainable_state) def _make_test_function(self): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() # If we have re-compiled the loss/weighted metric sub-graphs then create # test function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'test_function', None) is None or has_recompiled: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) with backend.get_graph().as_default(): metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access ] with backend.name_scope('evaluation'): updates = self.state_updates # Return loss and metrics, no gradient updates. # Does update the network states. fn = backend.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name='test_function', **self._function_kwargs) setattr(self, 'test_function', fn) def _make_predict_function(self): if not hasattr(self, 'predict_function'): self.predict_function = None if self.predict_function is None: inputs = self._feed_inputs # Gets network outputs. Does not update weights. # Does update the network states. kwargs = getattr(self, '_function_kwargs', {}) with backend.name_scope(ModeKeys.PREDICT): self.predict_function = backend.function( inputs, self.outputs, updates=self.state_updates, name='predict_function', **kwargs) def _make_execution_function(self, mode): if mode == ModeKeys.TRAIN: self._make_train_function() return self.train_function if mode == ModeKeys.TEST: self._make_test_function() return self.test_function if mode == ModeKeys.PREDICT: self._make_predict_function() return self.predict_function def _distribution_standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, validation_split=0., shuffle=False, epochs=1, allow_partial_batch=False): """Runs validation checks on input and target data passed by the user. This is called when using tf.distribute.Strategy to train, evaluate or serve the model. Args: x: Input data. A numpy array or `tf.data` dataset. y: Target data. A numpy array or None if x is a `tf.data` dataset. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. shuffle: Boolean whether to shuffle the training data before each epoch. epochs: Integer epochs. If > 1, repeat the numpy training data epochs times when converting to training dataset. allow_partial_batch: Boolean whether to enforce that all batches have the same size. Returns: Dataset instance. Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if class_weight: raise NotImplementedError('`class_weight` is currently not supported ' 'when using tf.distribute.Strategy.') if (sample_weight is not None and sample_weight.all() and backend.is_tpu_strategy(self._distribution_strategy)): raise NotImplementedError('`sample_weight` is currently not supported ' 'when using TPUStrategy.') # Validates `steps` and `shuffle` arguments right at the beginning # since we use it to construct the dataset object. # TODO(anjalisridhar): Remove this check once we refactor the # _standardize_user_data code path. This check is already present elsewhere # in the codebase. if isinstance(x, tf.data.Dataset): if shuffle: training_utils_v1.verify_dataset_shuffled(x) strategy = self._distribution_strategy with strategy.scope(): # We should be sure to call get_session() inside the strategy.scope() # so the strategy can affect the session options. if tf.compat.v1.executing_eagerly_outside_functions(): session = None else: session = backend.get_session() first_x_value = tf.nest.flatten(x)[0] if isinstance(first_x_value, np.ndarray): x = training_utils.list_to_tuple(x) if y is not None: y = training_utils.list_to_tuple(y) if sample_weight is not None: sample_weight = training_utils.list_to_tuple(sample_weight) in_tuple = (x, y, sample_weight) else: in_tuple = (x, y) else: in_tuple = x ds = strategy.extended.experimental_make_numpy_dataset(in_tuple, session=session) if shuffle: # We want a buffer size that is larger than the batch size provided by # the user and provides sufficient randomness. Note that larger # numbers introduce more memory usage based on the size of each # sample. ds = ds.shuffle(max(1024, batch_size * 8)) if epochs > 1: ds = ds.repeat(epochs) # We need to use the drop_remainder argument to get a known static # input shape which is required for TPUs. drop_remainder = (not allow_partial_batch and strategy.extended.experimental_require_static_shapes) # TODO(b/131720208): We still drop remainder here if number of examples # is divisible by batch size, as sometimes dynamic padder will time out # with keras.metrics.CategoricalAccuracy() metric. if backend.is_tpu_strategy(strategy) and not drop_remainder: dataset_size = first_x_value.shape[0] if dataset_size % batch_size == 0: drop_remainder = True x = ds.batch(batch_size, drop_remainder=drop_remainder) else: assert isinstance(x, tf.data.Dataset) training_utils_v1.validate_dataset_input(x, y, sample_weight, validation_split) return x def _standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name='steps', steps=None, validation_split=0., shuffle=False, extract_tensors_from_dataset=False): """Runs validation checks on input and target data passed by the user. Also standardizes the data to lists of arrays, in order. Also builds and compiles the model on the fly if it is a subclassed model that has never been called before (and thus has no inputs/outputs). This is a purely internal method, subject to refactoring at any time. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. check_steps: boolean, True if we want to check for validity of `steps` and False, otherwise. For example, when we are standardizing one batch of data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps` value is not required and we should not check for its validity in these cases. steps_name: The public API's parameter name for `steps`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. shuffle: Boolean whether to shuffle the training data before each epoch. extract_tensors_from_dataset: Boolean. When `x` is a dataset instance, this indicates whether to extract actual tensors from the dataset or instead output the dataset instance itself. Set to True when calling from `train_on_batch`/etc. Returns: A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict or not), target arrays, sample-weight arrays. If the model's input and targets are symbolic, these lists are empty (since the model takes no user-provided data, instead the data comes from the symbolic inputs/targets). Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)): # Graph mode dataset. We'll pass the dataset as-is (unless # `extract_tensors_from_dataset` is True, in which case we extract # the tensors from the dataset and we output them. training_utils_v1.validate_dataset_input(x, y, sample_weight, validation_split) if shuffle: training_utils_v1.verify_dataset_shuffled(x) is_dataset = True if extract_tensors_from_dataset: # We do this for `train_on_batch`/etc. x, y, sample_weight = training_utils_v1.extract_tensors_from_dataset(x) elif isinstance(x, tf.compat.v1.data.Iterator): # Graph mode iterator. We extract the symbolic tensors. training_utils_v1.validate_dataset_input(x, y, sample_weight, validation_split) iterator = x x, y, sample_weight = training_utils_v1.unpack_iterator_input(iterator) is_dataset = True else: is_dataset = False # Validates `steps` argument based on x's type. if check_steps: training_utils_v1.check_steps_argument(x, steps, steps_name) # First, we build the model on the fly if necessary. if not self.inputs: all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y) is_build_called = True else: all_inputs = [] # Whether this is a subclassed model that expects dictionary inputs # rather than list inputs (e.g. FeatureColumn-based models). dict_inputs = isinstance(self.inputs, dict) is_build_called = False y_input = y # Second, we compile the model on the fly if necessary, mostly for subclass # models. is_compile_called = False if not self._is_compiled and self.optimizer: self._compile_from_inputs(all_inputs, y_input, x, y) is_compile_called = True # In graph mode, if we had just set inputs and targets as symbolic tensors # by invoking build and compile on the model respectively, we do not have to # feed anything to the model. Model already has input and target data as # part of the graph. # Note: in this case, `any` and `all` are equivalent since we disallow # mixed symbolic/value inputs. # self.run_eagerly is not free to compute, so we want to reuse the value. run_eagerly = self.run_eagerly if (not run_eagerly and is_build_called and is_compile_called and not is_dataset and any(_is_symbolic_tensor(v) for v in all_inputs)): return [], [], None return self._standardize_tensors( x, y, sample_weight, run_eagerly=run_eagerly, dict_inputs=dict_inputs, is_dataset=is_dataset, class_weight=class_weight, batch_size=batch_size) def _standardize_tensors(self, x, y, sample_weight, run_eagerly, dict_inputs, is_dataset, class_weight=None, batch_size=None): if run_eagerly: # In eager mode, do not do shape validation # since the network has no input nodes (placeholders) to be fed. feed_input_names = self.input_names feed_input_shapes = None elif not self._is_graph_network: # Case: symbolic-mode subclassed network. Do not do shape validation. feed_input_names = self._feed_input_names feed_input_shapes = None else: # Case: symbolic-mode graph network. # In this case, we run extensive shape validation checks. feed_input_names = self._feed_input_names feed_input_shapes = self._feed_input_shapes # Standardize the inputs. if not isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)): # TODO(fchollet): run static checks with dataset output shape(s). x = training_utils_v1.standardize_input_data( x, feed_input_names, feed_input_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='input') # Get typespecs for the input data and sanitize it if necessary. # TODO(momernick): This should be capable of doing full input validation # at all times - validate that this is so and refactor the standardization # code. if isinstance(x, tf.data.Dataset): x_shapes = tf.data.experimental.get_structure(x) if isinstance(x_shapes, tuple): # If the output of a Dataset is a tuple, we assume it's either of the # form (x_data, y_data) or (x_data, y_data, sample_weights). In either # case, we only care about x_data here. x_shapes = x_shapes[0] else: flat_inputs = tf.nest.flatten(x, expand_composites=False) flat_expected_inputs = tf.nest.flatten(self.inputs, expand_composites=False) converted_x = [] for (a, b) in zip(flat_inputs, flat_expected_inputs): converted_x.append(_convert_scipy_sparse_tensor(a, b)) x = tf.nest.pack_sequence_as(x, converted_x, expand_composites=False) def _type_spec_from_value(value): """Grab type_spec without converting array-likes to tensors.""" if tf_utils.is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, 'shape') and hasattr(value, 'dtype'): return tf.TensorSpec(value.shape, value.dtype) else: return tf.type_spec_from_value(value) x_shapes = tf.nest.map_structure(_type_spec_from_value, x) flat_inputs = tf.nest.flatten(x_shapes, expand_composites=False) flat_expected_inputs = tf.nest.flatten(self.inputs, expand_composites=False) for (a, b) in zip(flat_inputs, flat_expected_inputs): tf.nest.assert_same_structure(a, b, expand_composites=True) if y is not None: # Prepare self._sample_weight_modes. List with the same length as # model outputs. training_utils_v1.prepare_sample_weight_modes(self._training_endpoints, self.sample_weight_mode) feed_output_names = self._feed_output_names feed_sample_weight_modes = self._sample_weight_modes if not self._is_graph_network: feed_output_shapes = None else: feed_output_shapes = self._feed_output_shapes # Standardize the outputs. y = training_utils_v1.standardize_input_data( y, feed_output_names, # Don't enforce target shapes to match output shapes. # Precise checks will be run in `check_loss_and_target_compatibility`. shapes=None, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='target') # Generate sample-wise weight values given the `sample_weight` and # `class_weight` arguments. sample_weights = training_utils_v1.standardize_sample_weights( sample_weight, feed_output_names) class_weights = training_utils_v1.standardize_class_weights( class_weight, feed_output_names) sample_weights = [ training_utils_v1.standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights, feed_sample_weight_modes) ] # Check that all arrays have the same length. if not self._distribution_strategy: training_utils_v1.check_array_lengths(x, y, sample_weights) if self._is_graph_network and not run_eagerly: # Additional checks to avoid users mistakenly using improper loss fns. training_utils_v1.check_loss_and_target_compatibility( y, self._feed_loss_fns, feed_output_shapes) sample_weights, _, _ = training_utils.handle_partial_sample_weights( y, sample_weights, feed_sample_weight_modes, check_all_flat=True) else: y = [] sample_weights = None if self.stateful and batch_size and not is_dataset: # Check that for stateful networks, number of samples is a multiple # of the static batch size. if x[0].shape[0] % batch_size != 0: raise ValueError('In a stateful network, ' 'you should only pass inputs with ' 'a number of samples that can be ' 'divided by the batch size. Found: ' + str(x[0].shape[0]) + ' samples') # If dictionary inputs were provided, we return a dictionary as well. if dict_inputs and not isinstance(x, (tf.compat.v1.data.Dataset, tf.data.Dataset)): x = dict(zip(feed_input_names, x)) return x, y, sample_weights def _build_model_with_inputs(self, inputs, targets): """Build the model (set model inputs/outputs), mainly for subclass model.""" processed_inputs = [] is_dict_inputs = False orig_inputs = inputs # We need to use `inputs` to set the model inputs. # If input data is a dataset iterator in graph mode or if it is an eager # iterator and only one batch of samples is required, we fetch the data # tensors from the iterator and then standardize them. if isinstance(inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset)): inputs, targets, _ = training_utils_v1.extract_tensors_from_dataset( inputs) # We type-check that `inputs` and `targets` are either single arrays # or lists of arrays, and extract a flat list of inputs from the passed # structure. training_utils_v1.validate_input_types(inputs, orig_inputs) if isinstance(inputs, (list, tuple)): processed_inputs += list(inputs) elif isinstance(inputs, dict): is_dict_inputs = True keys = sorted(inputs.keys()) processed_inputs = [inputs[k] for k in keys] else: processed_inputs.append(inputs) # Now that we have a flat set of inputs, we make sure that none of them # are CompositeTensors or CompositeTensorValues of any type (or scipy # sparse arrays, which we treat as SparseTensor values). We cannot safely # infer input data from an arbitrary composite tensor, so we don't try - # users should explicitly add composite tensor inputs to their subclassed # models. for input_tensor in processed_inputs: if training_utils_v1.is_composite_or_composite_value(input_tensor): # TODO(b/132691975): Document subclass-model CT input handling. raise ValueError( 'All SparseTensor and RaggedTensor inputs must be explicitly ' 'declared using a keras.Input() with sparse=True or ragged=True. ' 'We found an undeclared input %s. For Sequential models, please ' 'add a keras.Input() as your first Layer. For subclassed models, ' 'please call self._set_inputs() on your input set, which you can ' 'create using keras.Input() for each input to your model.' % (input_tensor,)) # Build the model using the retrieved inputs (value or symbolic). # If values are generated from a dataset, then in symbolic-mode # placeholders will be created to match the value shapes. if isinstance(orig_inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset, tf.compat.v1.data.Iterator)): if not self.inputs: # For subclassed models, a robust input spec is not available so we # must cast to the model dtype. inputs = training_utils_v1.cast_if_floating_dtype(inputs, self.dtype) def create_tensor_spec(t): return tf.TensorSpec(t.shape, t.dtype) cast_inputs = tf.nest.map_structure(create_tensor_spec, inputs) elif training_utils_v1.has_tensors(inputs): cast_inputs = training_utils_v1.cast_if_floating_dtype(inputs) else: cast_inputs = inputs self._set_inputs(cast_inputs) return processed_inputs, targets, is_dict_inputs def _compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target): if target is not None: # We need to use `y` to set the model targets. if training_utils_v1.has_tensors(target): target = training_utils_v1.cast_if_floating_dtype_and_mismatch( target, self.outputs) training_utils_v1.validate_input_types( target, orig_target, allow_dict=False, field_name='target') if isinstance(target, (list, tuple)): all_inputs += list(target) else: all_inputs.append(target) # Type check that all inputs are *either* value *or* symbolic. # TODO(fchollet): this check could be removed in Eager mode? if any(tf.is_tensor(v) for v in all_inputs): if not all(tf.is_tensor(v) for v in all_inputs): raise ValueError('Do not pass inputs that mix Numpy arrays and ' 'TensorFlow tensors. ' 'You passed: x=' + str(orig_inputs) + '; y=' + str(orig_target)) is_dataset = isinstance(orig_inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset, tf.compat.v1.data.Iterator)) if is_dataset or tf.executing_eagerly(): target_tensors = None else: # Handle target tensors if any passed. if target is not None: if not isinstance(target, (list, tuple)): target = [target] target_tensors = [v for v in target if _is_symbolic_tensor(v)] else: target_tensors = None self.compile( optimizer=self.optimizer, loss=self.loss, metrics=self._compile_metrics, weighted_metrics=self._compile_weighted_metrics, loss_weights=self.loss_weights, target_tensors=target_tensors, sample_weight_mode=self.sample_weight_mode, run_eagerly=self.run_eagerly, experimental_run_tf_function=self._experimental_run_tf_function) # TODO(omalleyt): Consider changing to a more descriptive function name. def _set_inputs(self, inputs, outputs=None, training=None): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Single array, or list of arrays. The arrays could be placeholders, Numpy arrays, data tensors, or TensorSpecs. - if placeholders: the model is built on top of these placeholders, and we expect Numpy data to be fed for them when calling `fit`/etc. - if Numpy data or TensorShapes: we create placeholders matching the TensorShapes or shapes of the Numpy arrays. We expect Numpy data to be fed for these placeholders when calling `fit`/etc. - if data tensors: the model is built on top of these tensors. We do not expect any Numpy data to be provided when calling `fit`/etc. outputs: None, a data tensor, or a list of tensors. If None, the outputs will be determined by invoking `self.call()`, otherwise the provided value will be used. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the Keras learning phase (None). Raises: ValueError: If dict inputs are passed to a Sequential Model where the first layer isn't FeatureLayer. """ self._set_save_spec(inputs) inputs = self._set_input_attrs(inputs) if outputs is None: kwargs = {} if self._expects_training_arg: # In V2 mode, feeding `training=None` is not allowed because any value # explicitly passed by the user is respected, even `None`.` if training is None and not tf.compat.v1.executing_eagerly_outside_functions(): training = backend.learning_phase() if training is not None: kwargs['training'] = training try: outputs = self(inputs, **kwargs) except NotImplementedError: # This Model or a submodel is dynamic and hasn't overridden # `compute_output_shape`. outputs = None self._set_output_attrs(outputs) @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_input_attrs(self, inputs): """Sets attributes related to the inputs of the Model.""" if self.inputs: raise ValueError('Model inputs are already set.') if self.__class__.__name__ == 'Sequential' and not self.built: if tf.is_tensor(inputs): input_shape = (None,) + tuple(inputs.shape.as_list()[1:]) elif isinstance(inputs, tf.TensorShape): input_shape = (None,) + tuple(inputs.as_list()[1:]) elif isinstance(inputs, dict): # We assert that the first layer is a FeatureLayer. if not training_utils_v1.is_feature_layer(self.layers[0]): raise ValueError('Passing a dictionary input to a Sequential Model ' 'which doesn\'t have FeatureLayer as the first layer' ' is an error.') input_shape = (None,) else: input_shape = (None,) + tuple(inputs.shape[1:]) self._build_input_shape = input_shape # Cast inputs to the compute dtype. This is primarily used # when saving to determine the correct dtype in the input signature. inputs = self._maybe_cast_inputs(inputs) # On-the-fly setting of symbolic model inputs (either by using the tensor # provided, or by creating a placeholder if Numpy data was provided). model_inputs = training_utils_v1.ModelInputs(inputs) inputs = model_inputs.get_symbolic_inputs() self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.input_names = model_inputs.get_input_names() self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] for k, v in model_inputs.as_dict(): if backend.is_placeholder(v): self._feed_input_names.append(k) self._feed_inputs.append(v) self._feed_input_shapes.append(backend.int_shape(v)) return inputs @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_output_attrs(self, outputs): """Sets attributes related to the outputs of the Model.""" # NOTE(taylorrobie): This convention cannot be changed without updating the # data adapter since it assumes nest.flatten ordering. outputs = tf.nest.flatten(outputs) self.outputs = outputs self.output_names = training_utils_v1.generic_output_names(outputs) # TODO(scottzhu): Should we cleanup the self._training_endpoints here? self.built = True @property def _targets(self): """The output target tensors for the model.""" return [ e.training_target.target for e in self._training_endpoints if e.has_training_target() ] @property def _feed_targets(self): return [ e.training_target.target for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_output_names(self): return [ e.output_name for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_output_shapes(self): return [ e.feed_output_shape for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _feed_loss_fns(self): return [ e.loss_fn for e in self._training_endpoints if e.has_feedable_training_target() ] @property def _loss_weights_list(self): return [e.loss_weight for e in self._training_endpoints] @property def _output_loss_metrics(self): if hasattr(self, '_training_endpoints'): return [ e.output_loss_metric for e in self._training_endpoints if e.output_loss_metric is not None ] return None @property def sample_weights(self): return [e.sample_weight for e in self._training_endpoints] @property def _sample_weight_modes(self): return [e.sample_weight_mode for e in self._training_endpoints] @property def _feed_sample_weights(self): return [e.sample_weight for e in self._training_endpoints if e.sample_weight is not None] def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode): """Maybe load initial epoch from ckpt considering possible worker recovery. Refer to tensorflow/python/keras/distribute/worker_training_state.py for more information. Args: initial_epoch: The original initial_epoch user passes in in `fit()`. mode: The mode for running `model.fit()`. Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the `initial_epoch` the user passes in. """ if self._training_state is not None: return self._training_state.maybe_load_initial_epoch_from_ckpt( initial_epoch, mode) return initial_epoch def _get_training_eval_metrics(self): """Returns all the metrics that are to be reported. This includes the output loss metrics, compile metrics/weighted metrics, add_metric metrics. """ metrics = [] metrics.extend(getattr(self, '_output_loss_metrics', None) or []) metrics.extend(getattr(self, 'metrics', None) or []) return metrics def _assert_compile_was_called(self): # Checks whether `compile` has been called. If it has been called, # then the optimizer is set. This is different from whether the # model is compiled # (i.e. whether the model is built and its inputs/outputs are set). if not self._compile_was_called: raise RuntimeError('You must compile your model before ' 'training/testing. ' 'Use `model.compile(optimizer, loss)`.') def _in_multi_worker_mode(self): """Method to infer if this `Model` is working in multi-worker settings. Multi-worker training refers to the setup where the training is distributed across multiple workers, as opposed to the case where only a local process performs the training. This function is used to infer for example whether or not a distribute coordinator should be run, and thus TensorFlow servers should be started for communication with other servers in the cluster, or whether or not saving/restoring checkpoints is relevant for preemption fault tolerance. Experimental. Signature and implementation are subject to change. Returns: Whether this model indicates it's working in multi-worker settings. """ strategy = self._distribution_strategy # Otherwise, use the strategy whose scope this is in. if not strategy and tf.distribute.has_strategy(): strategy = tf.distribute.get_strategy() return strategy and strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access @property def _trackable_saved_model_saver(self): return model_serialization.ModelSavedModelSaver(self) def _get_compile_args(self, user_metrics=True): del user_metrics self._assert_compile_was_called() kwargs = { 'loss': self.loss, 'metrics': self._compile_metrics, 'loss_weights': self.loss_weights, 'sample_weight_mode': self.sample_weight_mode, 'weighted_metrics': self._compile_weighted_metrics, } return kwargs @property def _compile_was_called(self): return self._v1_compile_was_called class DistributedCallbackModel(Model): """Model that is used for callbacks with tf.distribute.Strategy.""" def __init__(self, model): super(DistributedCallbackModel, self).__init__() self.optimizer = model.optimizer def set_original_model(self, orig_model): self._original_model = orig_model def save_weights(self, filepath, overwrite=True, save_format=None): self._replicated_model.save_weights(filepath, overwrite=overwrite, save_format=save_format) def save(self, filepath, overwrite=True, include_optimizer=True): # save weights from the distributed model to the original model distributed_model_weights = self.get_weights() self._original_model.set_weights(distributed_model_weights) # TODO(anjalisridhar): Do we need to save the original model here? # Saving the first replicated model works as well. self._original_model.save(filepath, overwrite=True, include_optimizer=False) def load_weights(self, filepath, by_name=False): self._original_model.load_weights(filepath, by_name=False) # Copy the weights from the original model to each of the replicated models. orig_model_weights = self._original_model.get_weights() distributed_training_utils_v1.set_weights( self._original_model._distribution_strategy, self, # pylint: disable=protected-access orig_model_weights) def __getattr__(self, item): # Allowed attributes of the model that can be accessed by the user # during a callback. if item not in ('_setattr_tracking', '_layers'): logging.warning('You are accessing attribute ' + item + ' of the ' 'DistributedCallbackModel that may not have been set ' 'correctly.') return super(DistributedCallbackModel, self).__getattr__(item) class _TrainingEndpoint: """A container for the training output/target and related entities. In the case of model with multiple outputs, there is a one-to-one mapping between model output (y_pred), model target (y_true), loss, metrics etc. By unifying these entities into one class, different entity can access information between each other, rather than currently access different list of attributes of the model. """ def __init__(self, output, output_name, loss_fn, loss_weight=None, training_target=None, output_loss_metric=None, sample_weight=None, sample_weight_mode=None): """Initialize the _TrainingEndpoint. Note that the output and output_name should be stable as long as the model structure doesn't change. The training_target suppose to be mutable since the information is provided via `compile()` Args: output: the output tensor of the model. output_name: the unique name of the output tensor. loss_fn: the loss function for the output tensor. loss_weight: float, the weights for the loss. training_target: the _TrainingTarget for the model. output_loss_metric: the metric object for the loss function. sample_weight: the weights for how a sample is weighted during metric and loss calculation. Could be None. sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for how the sample_weight is populated. """ self._output = output self._output_name = output_name self._loss_fn = loss_fn self._loss_weight = loss_weight self._training_target = training_target self._output_loss_metric = output_loss_metric self._sample_weight = sample_weight self._sample_weight_mode = sample_weight_mode @property def output(self): return self._output @property def output_name(self): return self._output_name @property def shape(self): return backend.int_shape(self.output) @property def loss_fn(self): return self._loss_fn @property def loss_weight(self): return self._loss_weight @loss_weight.setter def loss_weight(self, value): self._loss_weight = value @property def training_target(self): return self._training_target @training_target.setter def training_target(self, value): self._training_target = value def create_training_target(self, target, run_eagerly=False): """Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated. """ if self.has_training_target(): raise ValueError('The training_target field for the _TrainingEndpoint ' 'instance has already been populated') if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output)) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + '_target', sparse=backend.is_sparse(self.output), dtype=target_dtype) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights) @property def output_loss_metric(self): return self._output_loss_metric @output_loss_metric.setter def output_loss_metric(self, value): self._output_loss_metric = value @property def sample_weight(self): return self._sample_weight @sample_weight.setter def sample_weight(self, value): self._sample_weight = value @property def sample_weight_mode(self): return self._sample_weight_mode @sample_weight_mode.setter def sample_weight_mode(self, value): self._sample_weight_mode = value def should_skip_target(self): return self._loss_fn is None def should_skip_target_weights(self): return (self.should_skip_target() or self.training_target is None or self.training_target.skip_target_weights) def has_training_target(self): return self.training_target is not None def has_feedable_training_target(self): return (not self.should_skip_target() and self.training_target is not None and self.training_target.feedable) def loss_name(self): if self._loss_fn is not None: return self._output_name + '_loss' return None @property def feed_output_shape(self): """The output shape for the feedable target.""" if not self.has_feedable_training_target(): return None if ((isinstance(self.loss_fn, losses.LossFunctionWrapper) and self.loss_fn.fn == losses.sparse_categorical_crossentropy)) or ( isinstance(self.loss_fn, losses.SparseCategoricalCrossentropy)): if backend.image_data_format() == 'channels_first': return (self.shape[0], 1) + self.shape[2:] else: return self.shape[:-1] + (1,) elif (not isinstance(self.loss_fn, losses.Loss) or (isinstance(self.loss_fn, losses.LossFunctionWrapper) and (getattr(losses, self.loss_fn.fn.__name__, None) is None))): # If the given loss is not an instance of the `Loss` class (custom # class) or if the loss function that is wrapped is not in the # `losses` module, then it is a user-defined loss and we make no # assumptions about it. return None else: return self.shape def sample_weights_mismatch(self): """Check if the sample weight and the mode match or not.""" # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( (self.sample_weight_mode is not None and self.sample_weight is None) or (self.sample_weight_mode is None and self.sample_weight is not None)) def populate_sample_weight(self, sample_weight, sample_weight_mode): """Populate the sample weight and based on the sample weight mode.""" if (sample_weight is None and (self.should_skip_target_weights() or sample_weight_mode is None or tf.executing_eagerly())): self._sample_weight = None return assert sample_weight_mode in ['temporal', 'samplewise'] if sample_weight_mode == 'temporal': default_value = [[1.]] shape = [None, None] else: # sample_weight_mode == 'samplewise' default_value = [1.] shape = [None] if sample_weight is not None: if not sample_weight.shape.is_compatible_with(shape): raise ValueError('Received sample weight with shape {}. Expected shape ' '{}.'.format(sample_weight.shape, shape)) self._sample_weight = sample_weight else: self._sample_weight = tf.compat.v1.placeholder_with_default( tf.constant(default_value, dtype=backend.floatx()), shape=shape, name=self.output_name + '_sample_weights') class _TrainingTarget: """Container for a target tensor (y_true) and its metadata (shape, loss...). Args: target: A target tensor for the model. It may be `None` if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If the target is None, the rest of the attributes will be None as well. feedable: Boolean, whether the target is feedable (requires data to be passed in `fit` or `train_on_batch`), or not (model compiled with `target_tensors` argument). skip_target_weights: Boolean, whether the target should be skipped during weights calculation. """ def __init__(self, target, feedable=False, skip_target_weights=True): self._target = target self._feedable = feedable self._skip_target_weights = skip_target_weights @property def target(self): return self._target @property def feedable(self): return self._feedable @property def skip_target_weights(self): return self._skip_target_weights def _is_symbolic_tensor(x): return tf.is_tensor(x) def _convert_scipy_sparse_tensor(value, expected_input): """Handle scipy sparse tensor conversions. This method takes a value 'value' and returns the proper conversion. If value is a scipy sparse tensor and the expected input is a dense tensor, we densify 'value'. If value is a scipy sparse tensor and the expected input is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is not a scipy sparse tensor, or scipy is not imported, we pass it through unchanged. Args: value: An object that may be a scipy sparse tensor expected_input: The expected input placeholder. Returns: The possibly-converted 'value'. """ if issparse is not None and issparse(value): if backend.is_sparse(expected_input): sparse_coo = value.tocoo() row, col = sparse_coo.row, sparse_coo.col data, shape = sparse_coo.data, sparse_coo.shape indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1) return tf.SparseTensor(indices, data, shape) else: if tf.compat.v1.executing_eagerly_outside_functions(): # In TF2 we do not silently densify sparse matrices. raise ValueError('A SciPy sparse matrix was passed to a model ' 'that expects dense inputs. Please densify your ' 'inputs first, such as by calling `x.toarray().') return value.toarray() else: return value def _get_metrics_from_layers(layers): """Returns list of metrics from the given layers. This will not include the `compile` metrics of a model layer. Args: layers: List of layers. Returns: List of metrics. """ metrics = [] layers = layer_utils.filter_empty_layer_containers(layers) for layer in layers: if isinstance(layer, Model): # We cannot call 'metrics' on the model because we do not want to # include the metrics that were added in compile API of a nested model. metrics.extend(layer._metrics) # pylint: disable=protected-access metrics.extend(_get_metrics_from_layers(layer.layers)) else: metrics.extend(layer.metrics) return metrics def _non_none_constant_value(v): constant_value = tf.get_static_value(v) return constant_value if constant_value is not None else v
137,440
41.923485
111
py
keras
keras-master/keras/engine/compile_utils_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for compile utitilies.""" import tensorflow.compat.v2 as tf from keras import backend from keras import keras_parameterized from keras import losses as losses_mod from keras import metrics as metrics_mod from keras.engine import compile_utils class LossesContainerTest(keras_parameterized.TestCase): def test_single_loss(self): loss_container = compile_utils.LossesContainer('mse') y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5)) total_loss = loss_container(y_t, y_p) self.assertTrue(loss_container._built) self.assertLen(loss_container._losses, 1) self.assertEqual(total_loss.numpy(), 1.) self.assertLen(loss_container.metrics, 1) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 1.) loss_container.reset_state() self.assertEqual(loss_metric.result().numpy(), 0.) def test_loss_list(self): loss_container = compile_utils.LossesContainer(['mse', 'mae'], [1, 0.5]) y_t = [tf.ones((10, 1)), tf.zeros((10, 1))] y_p = [tf.ones((10, 1)), tf.ones((10, 1))] sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) total_loss = loss_container(y_t, y_p, sample_weight=sw) self.assertEqual(loss_container._output_names, ['output_1', 'output_2']) self.assertLen(loss_container._losses, 2) self.assertEqual(total_loss.numpy(), 0.25) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 0.25) output_1_metric = loss_container.metrics[1] self.assertEqual(output_1_metric.name, 'output_1_loss') self.assertEqual(output_1_metric.result().numpy(), 0) output_2_metric = loss_container.metrics[2] self.assertEqual(output_2_metric.name, 'output_2_loss') self.assertEqual(output_2_metric.result().numpy(), 0.5) loss_container.reset_state() self.assertEqual(loss_metric.result().numpy(), 0) self.assertEqual(output_1_metric.result().numpy(), 0) self.assertEqual(output_2_metric.result().numpy(), 0) def test_loss_dict(self): loss_container = compile_utils.LossesContainer( { 'out1': 'mse', 'out2': 'mae' }, { 'out1': 1, 'out2': 0.5 }) y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))} y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))} sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) total_loss = loss_container(y_t, y_p, sample_weight=sw) self.assertLen(loss_container._losses, 2) self.assertEqual(total_loss.numpy(), 0.25) self.assertLen(loss_container.metrics, 3) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 0.25) out1_metric = loss_container.metrics[1] self.assertEqual(out1_metric.name, 'out1_loss') self.assertEqual(out1_metric.result().numpy(), 0) out2_metric = loss_container.metrics[2] self.assertEqual(out2_metric.name, 'out2_loss') self.assertEqual(out2_metric.result().numpy(), 0.5) loss_container.reset_state() self.assertEqual(loss_metric.result().numpy(), 0) self.assertEqual(out1_metric.result().numpy(), 0) self.assertEqual(out2_metric.result().numpy(), 0) def test_loss_partial_dict_with_output_names(self): loss_container = compile_utils.LossesContainer( {'out2': 'mae'}, {'out2': 1.}, output_names=['out1', 'out2']) y_t = [tf.ones((10, 1)), tf.zeros((10, 1))] y_p = [tf.ones((10, 1)), tf.ones((10, 1))] sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) total_loss = loss_container(y_t, y_p, sample_weight=sw) self.assertEqual(total_loss.numpy(), 0.5) self.assertLen(loss_container.metrics, 2) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 0.5) out2_metric = loss_container.metrics[1] self.assertEqual(out2_metric.name, 'out2_loss') self.assertEqual(out2_metric.result().numpy(), 0.5) def test_loss_dict_with_nones(self): loss_container = compile_utils.LossesContainer({ 'out1': None, 'out2': 'mae' }) y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))} y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))} sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) total_loss = loss_container(y_t, y_p, sample_weight=sw) self.assertEqual(total_loss.numpy(), 0.5) self.assertLen(loss_container.metrics, 2) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 0.5) out2_metric = loss_container.metrics[1] self.assertEqual(out2_metric.name, 'out2_loss') self.assertEqual(out2_metric.result().numpy(), 0.5) def test_nested_structure(self): loss_container = compile_utils.LossesContainer( { 'b': ['mse', None], 'a': 'mae' }, loss_weights={ 'b': [0.5, 0], 'a': 1 }) y_t = { 'b': [tf.ones((10, 1)), tf.zeros((10, 1))], 'a': tf.zeros((10, 1)) } y_p = { 'b': [tf.zeros((10, 1)), tf.zeros((10, 1))], 'a': tf.ones((10, 1)) } sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) total_loss = loss_container(y_t, y_p, sample_weight=sw) self.assertEqual(total_loss.numpy(), 0.75) self.assertLen(loss_container.metrics, 3) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 0.75) a_metric = loss_container.metrics[1] self.assertEqual(a_metric.name, 'a_loss') self.assertEqual(a_metric.result().numpy(), 0.5) b_1_metric = loss_container.metrics[2] self.assertEqual(b_1_metric.name, 'b_1_loss') self.assertEqual(b_1_metric.result().numpy(), 0.5) def test_broadcast_single_loss(self): loss_container = compile_utils.LossesContainer('mse') y_t = [tf.ones((10, 1)), tf.zeros((10, 1))] y_p = [tf.ones((10, 1)), tf.ones((10, 1))] sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) total_loss = loss_container(y_t, y_p, sample_weight=sw) self.assertEqual(total_loss.numpy(), 0.5) self.assertLen(loss_container.metrics, 3) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 0.5) output_1_metric = loss_container.metrics[1] self.assertEqual(output_1_metric.name, 'output_1_loss') self.assertEqual(output_1_metric.result().numpy(), 0.) output_2_metric = loss_container.metrics[2] self.assertEqual(output_2_metric.name, 'output_2_loss') self.assertEqual(output_2_metric.result().numpy(), 0.5) def test_missing_label_with_no_loss(self): # It's ok to exclude a label if that label has no # losses or metrics associated with it. loss_container = compile_utils.LossesContainer({ 'output1': 'mse', 'output3': 'mae' }) y_p = { 'output1': tf.convert_to_tensor([[0], [1], [2]]), 'output2': tf.convert_to_tensor([[3], [4], [5]]), 'output3': tf.convert_to_tensor([[6], [7], [8]]) } y_t = { 'output1': tf.convert_to_tensor([[1], [2], [3]]), 'output3': tf.convert_to_tensor([[4], [5], [6]]) } total_loss = loss_container(y_t, y_p) self.assertEqual(total_loss.numpy(), 3.) self.assertLen(loss_container.metrics, 3) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertEqual(loss_metric.result().numpy(), 3.) output_1_metric = loss_container.metrics[1] self.assertEqual(output_1_metric.name, 'output1_loss') self.assertEqual(output_1_metric.result().numpy(), 1.) output_3_metric = loss_container.metrics[2] self.assertEqual(output_3_metric.name, 'output3_loss') self.assertEqual(output_3_metric.result().numpy(), 2.) def test_mismatched_dtypes(self): y_t = tf.constant([1, 9, 2, -5], shape=(2, 2)) y_p = tf.constant([4, 8, 12, 8], shape=(2, 2), dtype=tf.float32) def my_mae(labels, preds): self.assertEqual(labels.dtype, tf.int32) self.assertEqual(preds.dtype, tf.float32) labels = tf.cast(labels, preds.dtype) return backend.mean(tf.abs(preds - labels), axis=-1) loss_container = compile_utils.LossesContainer(my_mae) total_loss = loss_container(y_t, y_p) self.assertEqual(total_loss.dtype, tf.float32) def test_integer_dtypes(self): y_t = tf.constant([1, 9, 2, -5], shape=(2, 2)) y_p = tf.constant([4, 8, 12, 8], shape=(2, 2), dtype=tf.int64) def my_mae(labels, preds): self.assertEqual(labels.dtype, tf.int64) self.assertEqual(preds.dtype, tf.int64) return backend.mean(tf.abs(preds - labels), axis=-1) loss_container = compile_utils.LossesContainer(my_mae) total_loss = loss_container(y_t, y_p) self.assertEqual(total_loss.dtype, tf.int64) def test_float_dtypes(self): y_t = tf.constant([1, 9, 2, -5], shape=(2, 2), dtype=tf.float32) y_p = tf.constant([4, 8, 12, 8], shape=(2, 2), dtype=tf.float64) def my_mae(labels, preds): self.assertEqual(labels.dtype, tf.float64) self.assertEqual(preds.dtype, tf.float64) return backend.mean(tf.abs(preds - labels), axis=-1) loss_container = compile_utils.LossesContainer(my_mae) total_loss = loss_container(y_t, y_p) self.assertEqual(total_loss.dtype, tf.float64) def test_loss_masking(self): loss_container = compile_utils.LossesContainer('mae') y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32) y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32) y_p._keras_mask = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) total_loss = loss_container(y_t, y_p) self.assertAlmostEqual(total_loss.numpy(), .25) # sum over batch size self.assertLen(loss_container.metrics, 1) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertAlmostEqual(loss_metric.result().numpy(), .25) def test_loss_sample_weight(self): loss_container = compile_utils.LossesContainer('mae') y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32) y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32) sw = tf.constant([[.2, .3], [.5, 0]], dtype=tf.float32) total_loss = loss_container(y_t, y_p, sample_weight=sw) # (0 * .2 + 0 * .3 + 1 * .5 + 1 * 0) / 4 self.assertAlmostEqual(total_loss.numpy(), .125) self.assertLen(loss_container.metrics, 1) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertAlmostEqual(loss_metric.result().numpy(), .125) def test_loss_masking_sample_weight(self): loss_container = compile_utils.LossesContainer('mae') y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32) y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32) sw = tf.constant([[.2, .3], [.5, 0]], dtype=tf.float32) y_p._keras_mask = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) total_loss = loss_container(y_t, y_p, sample_weight=sw) # (0 * .2 + 1 * .5) / 4 self.assertAlmostEqual(total_loss.numpy(), .125) # sum over batch size self.assertLen(loss_container.metrics, 1) loss_metric = loss_container.metrics[0] self.assertEqual(loss_metric.name, 'loss') self.assertAlmostEqual(loss_metric.result().numpy(), .125) def test_custom_loss_callables(self): def custom_loss_fn(y_true, y_pred): return tf.reduce_sum(y_true - y_pred) class CustomLossClass: def __call__(self, y_true, y_pred): return tf.reduce_sum(y_true - y_pred) loss_container = compile_utils.LossesContainer( [custom_loss_fn, CustomLossClass()]) y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5)) loss_container(y_t, y_p) self.assertEqual(loss_container._losses[0].name, 'custom_loss_fn') self.assertEqual(loss_container._losses[1].name, 'custom_loss_class') def test_ragged_tensor_output(self): """Ensure that ragged tensors can be passed as targets and predictions.""" def custom_loss_fn(y_true, y_pred): """MSE supports RaggedTensors directly.""" return losses_mod.mse(y_true, y_pred) class CustomLossClass(losses_mod.Loss): """User defined loss function must implement RaggedTensor support.""" def call(self, y_true, y_pred): losses = tf.ragged.map_flat_values( tf.math.squared_difference, y_true, y_pred) return tf.reduce_mean(losses) loss_container = compile_utils.LossesContainer( [custom_loss_fn, CustomLossClass()]) v_t = tf.constant([[3., 4.], [1., 2.], [3., 5.]]) v_p = tf.constant([[3.1, 4.], [1., 2.], [3., 5.]]) y_t = tf.expand_dims( tf.RaggedTensor.from_row_splits(v_t, [0, 2, 3]), 0) y_p = tf.expand_dims( tf.RaggedTensor.from_row_splits(v_p, [0, 2, 3]), 0) loss_container(y_t, y_p) self.assertEqual(loss_container._losses[0].name, 'custom_loss_fn') class MetricsContainerTest(keras_parameterized.TestCase): def test_single_metric(self): metric_container = compile_utils.MetricsContainer('mse') y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5)) metric_container.update_state(y_t, y_p) self.assertLen(metric_container.metrics, 1) metric = metric_container.metrics[0] self.assertEqual(metric.name, 'mse') self.assertEqual(metric.result().numpy(), 1.) metric_container.reset_state() self.assertEqual(metric.result().numpy(), 0.) def test_list_of_metrics_one_output(self): metric_container = compile_utils.MetricsContainer(['mse', 'mae']) y_t, y_p = 2 * tf.ones((10, 5)), tf.zeros((10, 5)) metric_container.update_state(y_t, y_p) self.assertLen(metric_container.metrics, 2) mse_metric = metric_container.metrics[0] self.assertEqual(mse_metric.name, 'mse') self.assertEqual(mse_metric.result().numpy(), 4.) mae_metric = metric_container.metrics[1] self.assertEqual(mae_metric.name, 'mae') self.assertEqual(mae_metric.result().numpy(), 2.) metric_container.reset_state() self.assertEqual(mse_metric.result().numpy(), 0.) self.assertEqual(mae_metric.result().numpy(), 0.) def test_list_of_metrics_list_of_outputs(self): metric_container = compile_utils.MetricsContainer( metrics=['mse', 'mae'], # Should broadcast to both outputs. weighted_metrics=['accuracy']) # Should broadcast to both outputs. y_t = [tf.ones((10, 1)), tf.zeros((10, 1))] y_p = [tf.ones((10, 1)), 2 * tf.ones((10, 1))] sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) metric_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metric_container.metrics, 6) mse_metric = metric_container.metrics[0] self.assertEqual(mse_metric.name, 'output_1_mse') self.assertEqual(mse_metric.result().numpy(), 0.) mse_metric = metric_container.metrics[1] self.assertEqual(mse_metric.name, 'output_1_mae') self.assertEqual(mse_metric.result().numpy(), 0.) acc_metric_1 = metric_container.metrics[2] self.assertEqual(acc_metric_1.name, 'output_1_accuracy') self.assertEqual(acc_metric_1.result().numpy(), 1.) self.assertEqual(acc_metric_1._fn, metrics_mod.binary_accuracy) mae_metric = metric_container.metrics[3] self.assertEqual(mae_metric.name, 'output_2_mse') self.assertEqual(mae_metric.result().numpy(), 4.) mae_metric = metric_container.metrics[4] self.assertEqual(mae_metric.name, 'output_2_mae') self.assertEqual(mae_metric.result().numpy(), 2.) acc_metric_2 = metric_container.metrics[5] self.assertEqual(acc_metric_2.name, 'output_2_accuracy') self.assertEqual(acc_metric_2.result().numpy(), 0.) self.assertEqual(acc_metric_2._fn, metrics_mod.binary_accuracy) weighted_metrics = metric_container.weighted_metrics self.assertLen(weighted_metrics, 2) self.assertEqual(weighted_metrics[0].name, 'output_1_accuracy') self.assertEqual(weighted_metrics[1].name, 'output_2_accuracy') unweighted_metrics = metric_container.unweighted_metrics self.assertLen(unweighted_metrics, 4) self.assertEqual(unweighted_metrics[0].name, 'output_1_mse') self.assertEqual(unweighted_metrics[1].name, 'output_1_mae') self.assertEqual(unweighted_metrics[2].name, 'output_2_mse') self.assertEqual(unweighted_metrics[3].name, 'output_2_mae') def test_metric_dict(self): metric_container = compile_utils.MetricsContainer( metrics={ 'out1': 'mse', 'out2': 'mae' }, weighted_metrics={ 'out1': 'mse', 'out2': 'mae' }) y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))} y_p = {'out1': tf.ones((10, 1)), 'out2': 2 * tf.ones((10, 1))} sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) metric_container.update_state(y_t, y_p, sample_weight=sw) mse_metric = metric_container.metrics[0] self.assertEqual(mse_metric.name, 'out1_mse') self.assertEqual(mse_metric.result().numpy(), 0.) weighted_mse_metric = metric_container.metrics[1] self.assertEqual(weighted_mse_metric.name, 'out1_weighted_mse') self.assertEqual(weighted_mse_metric.result().numpy(), 0.) mae_metric = metric_container.metrics[2] self.assertEqual(mae_metric.name, 'out2_mae') self.assertEqual(mae_metric.result().numpy(), 2.) weighted_mae_metric = metric_container.metrics[3] self.assertEqual(weighted_mae_metric.name, 'out2_weighted_mae') self.assertEqual(weighted_mae_metric.result().numpy(), 2.) metric_container.reset_state() self.assertEqual(mse_metric.result().numpy(), 0.) self.assertEqual(weighted_mse_metric.result().numpy(), 0.) self.assertEqual(mae_metric.result().numpy(), 0.) self.assertEqual(weighted_mae_metric.result().numpy(), 0.) def test_metric_partial_dict_with_output_names(self): metric_container = compile_utils.MetricsContainer( {'out2': 'mae'}, output_names=['out1', 'out2']) y_t = [tf.ones((10, 1)), tf.zeros((10, 1))] y_p = [tf.ones((10, 1)), tf.ones((10, 1))] sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) metric_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metric_container.metrics, 1) mae_metric = metric_container.metrics[0] self.assertEqual(mae_metric.name, 'out2_mae') self.assertEqual(mae_metric.result().numpy(), 1.) def test_metric_partial_dict_with_nones(self): metric_container = compile_utils.MetricsContainer({ 'out1': None, 'out2': 'mae' }) y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))} y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))} sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) metric_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metric_container.metrics, 1) mae_metric = metric_container.metrics[0] self.assertEqual(mae_metric.name, 'out2_mae') self.assertEqual(mae_metric.result().numpy(), 1.) def test_nested_structure(self): metric_container = compile_utils.MetricsContainer( metrics={ 'b': ['mse', None], 'a': 'mae' }, weighted_metrics={ 'b': [None, None], 'a': 'mse' }) y_t = { 'b': [2 * tf.ones((10, 1)), tf.zeros((10, 1))], 'a': tf.zeros((10, 1)) } y_p = { 'b': [tf.zeros((10, 1)), tf.zeros((10, 1))], 'a': tf.ones((10, 1)) } sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) metric_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metric_container.metrics, 3) a_mae_metric = metric_container.metrics[0] self.assertEqual(a_mae_metric.name, 'a_mae') self.assertEqual(a_mae_metric.result().numpy(), 1.) weighted_a_mae_metric = metric_container.metrics[1] self.assertEqual(weighted_a_mae_metric.name, 'a_mse') self.assertEqual(weighted_a_mae_metric.result().numpy(), 1.) b_1_mse_metric = metric_container.metrics[2] self.assertEqual(b_1_mse_metric.name, 'b_1_mse') self.assertEqual(b_1_mse_metric.result().numpy(), 4.) def test_crossentropy(self): metric_container = compile_utils.MetricsContainer('crossentropy') y_t, y_p = tf.ones((10, 1)), tf.ones((10, 1)) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.binary_crossentropy) metric_container = compile_utils.MetricsContainer('crossentropy') y_t, y_p = tf.ones((10, 1)), tf.ones((10, 20)) self.assertEqual(y_p.shape.as_list()[-1], 20) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.sparse_categorical_crossentropy) metric_container = compile_utils.MetricsContainer('crossentropy') y_t, y_p = tf.ones((10, 20)), tf.ones((10, 20)) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.categorical_crossentropy) def test_accuracy(self): metric_container = compile_utils.MetricsContainer('accuracy') y_t, y_p = tf.ones((10, 1)), tf.ones((10, 1)) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.binary_accuracy) metric_container = compile_utils.MetricsContainer('Accuracy') y_t, y_p = tf.ones((10, 1)), tf.ones((10, 1)) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.binary_accuracy) metric_container = compile_utils.MetricsContainer('accuracy') y_t, y_p = tf.ones((10, 1)), tf.ones((10, 20)) self.assertEqual(y_p.shape.as_list()[-1], 20) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.sparse_categorical_accuracy) metric_container = compile_utils.MetricsContainer('accuracy') y_t, y_p = tf.ones((10, 20)), tf.ones((10, 20)) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0]._fn, metrics_mod.categorical_accuracy) def test_metric_weighting(self): metric_container = compile_utils.MetricsContainer( metrics=['mae'], weighted_metrics=['mae']) y_t = tf.convert_to_tensor([[0], [3], [0]]) y_p = tf.convert_to_tensor([[0], [0], [0]]) sw = tf.convert_to_tensor([[1], [0], [1]]) metric_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metric_container.metrics, 2) mae_metric = metric_container.metrics[0] self.assertEqual(mae_metric.name, 'mae') self.assertEqual(mae_metric.result().numpy(), 1.) weighted_mae_metric = metric_container.metrics[1] self.assertEqual(weighted_mae_metric.name, 'weighted_mae') self.assertEqual(weighted_mae_metric.result().numpy(), 0.) def test_broadcast_metrics_to_dict(self): metric_container = compile_utils.MetricsContainer(metrics=['mae']) y_p = {'output': tf.convert_to_tensor([[0], [1], [2]])} y_t = {'output': tf.convert_to_tensor([[1], [2], [3]])} metric_container.update_state(y_t, y_p) mae_metric = metric_container.metrics[0] self.assertEqual(mae_metric.name, 'mae') self.assertEqual(mae_metric.result().numpy(), 1.) def test_broadcast_metrics_to_dict_with_output_names(self): metric_container = compile_utils.MetricsContainer( metrics=['mae'], output_names=['output']) y_p = tf.convert_to_tensor([[0], [1], [2]]) y_t = {'output': tf.convert_to_tensor([[1], [2], [3]])} metric_container.update_state(y_t, y_p) mae_metric = metric_container.metrics[0] self.assertEqual(mae_metric.name, 'mae') self.assertEqual(mae_metric.result().numpy(), 1.) def test_missing_label_with_no_metrics(self): # It's ok to exclude a label if that label has no # losses or metrics associated with it. metric_container = compile_utils.MetricsContainer(metrics={ 'output1': 'mae', 'output3': 'mse' }) y_p = { 'output1': tf.convert_to_tensor([[0], [1], [2]]), 'output2': tf.convert_to_tensor([[3], [4], [5]]), 'output3': tf.convert_to_tensor([[6], [7], [8]]) } y_t = { 'output1': tf.convert_to_tensor([[1], [2], [3]]), 'output3': tf.convert_to_tensor([[4], [5], [6]]) } metric_container.update_state(y_t, y_p) self.assertLen(metric_container.metrics, 2) mae_metric = metric_container.metrics[0] self.assertEqual(mae_metric.name, 'output1_mae') self.assertEqual(mae_metric.result().numpy(), 1.) mse_metric = metric_container.metrics[1] self.assertEqual(mse_metric.name, 'output3_mse') self.assertEqual(mse_metric.result().numpy(), 4.) def test_metrics_masking(self): metrics_container = compile_utils.MetricsContainer( metrics=['mae'], weighted_metrics=['mse']) y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32) y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32) y_p._keras_mask = tf.constant([[1, 1], [0, 0]], dtype=tf.float32) metrics_container.update_state(y_t, y_p) self.assertLen(metrics_container.metrics, 2) mae_metric = metrics_container.metrics[0] self.assertEqual(mae_metric.name, 'mae') self.assertAlmostEqual(mae_metric.result().numpy(), 0) weighted_mae_metric = metrics_container.metrics[1] self.assertEqual(weighted_mae_metric.name, 'mse') self.assertAlmostEqual(weighted_mae_metric.result().numpy(), 0) def test_metrics_sample_weight(self): metrics_container = compile_utils.MetricsContainer( metrics=['mae'], weighted_metrics=['mse']) y_p = tf.constant([[[1], [1]], [[0], [1]]], dtype=tf.float32) y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32) sw = tf.constant([[.2, .3], [.5, 0]], dtype=tf.float32) metrics_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metrics_container.metrics, 2) mae_metric = metrics_container.metrics[0] self.assertEqual(mae_metric.name, 'mae') self.assertAlmostEqual(mae_metric.result().numpy(), .25) # 1 / 4 weighted_mae_metric = metrics_container.metrics[1] self.assertEqual(weighted_mae_metric.name, 'mse') self.assertAlmostEqual(weighted_mae_metric.result().numpy(), .5) # .5 / 1 def test_metrics_masking_sample_weight(self): metrics_container = compile_utils.MetricsContainer( metrics=['mae'], weighted_metrics=['mse']) y_p = tf.constant([[[1], [1]], [[0], [1]]], dtype=tf.float32) y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32) sw = tf.constant([[.3, .2], [.2, .3]], dtype=tf.float32) y_p._keras_mask = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) metrics_container.update_state(y_t, y_p, sample_weight=sw) self.assertLen(metrics_container.metrics, 2) mae_metric = metrics_container.metrics[0] self.assertEqual(mae_metric.name, 'mae') self.assertAlmostEqual(mae_metric.result().numpy(), .5) # 1 / .5 weighted_mae_metric = metrics_container.metrics[1] self.assertEqual(weighted_mae_metric.name, 'mse') self.assertAlmostEqual(weighted_mae_metric.result().numpy(), .2 / .5) def test_loss_class_as_metric_with_distribution(self): distribution = tf.distribute.OneDeviceStrategy('/device:CPU:0') with distribution.scope(): metric_container = compile_utils.MetricsContainer( losses_mod.MeanSquaredError()) y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5)) metric_container.update_state(y_t, y_p) self.assertLen(metric_container.metrics, 1) metric = metric_container.metrics[0] self.assertEqual(metric.name, 'mean_squared_error') self.assertEqual(metric.result().numpy(), 1.) def test_custom_metric_callables(self): def custom_metric_fn(y_true, y_pred): return tf.reduce_sum(y_true - y_pred) class CustomMetricClass: def __call__(self, y_true, y_pred): return tf.reduce_sum(y_true - y_pred) metric_container = compile_utils.MetricsContainer( [custom_metric_fn, CustomMetricClass()]) y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5)) metric_container.update_state(y_t, y_p) self.assertEqual(metric_container.metrics[0].name, 'custom_metric_fn') self.assertEqual(metric_container.metrics[1].name, 'custom_metric_class') def test_reset_state_existing_metric_before_built(self): metric = metrics_mod.Mean() metric.update_state([2.0, 4.0]) self.assertEqual(metric.result().numpy(), 3.0) metric_container = compile_utils.MetricsContainer(metric) metric_container.reset_state() self.assertEqual(metric.result().numpy(), 0.0) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
30,265
36.8325
80
py
keras
keras-master/keras/engine/input_spec_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """InputSpec tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from keras.engine import input_spec class InputSpecTest(tf.test.TestCase): def test_axes_initialization(self): input_spec.InputSpec(shape=[1, None, 2, 3], axes={3: 5, '2': 2}) with self.assertRaisesRegex(ValueError, 'Axis 4 is greater than'): input_spec.InputSpec(shape=[1, None, 2, 3], axes={4: 5}) with self.assertRaisesRegex(TypeError, 'keys in axes must be integers'): input_spec.InputSpec(shape=[1, None, 2, 3], axes={'string': 5}) class InputSpecToTensorShapeTest(tf.test.TestCase): def test_defined_shape(self): spec = input_spec.InputSpec(shape=[1, None, 2, 3]) self.assertAllEqual( [1, None, 2, 3], input_spec.to_tensor_shape(spec).as_list()) def test_defined_ndims(self): spec = input_spec.InputSpec(ndim=5) self.assertAllEqual( [None] * 5, input_spec.to_tensor_shape(spec).as_list()) spec = input_spec.InputSpec(ndim=0) self.assertAllEqual( [], input_spec.to_tensor_shape(spec).as_list()) spec = input_spec.InputSpec(ndim=3, axes={1: 3, -1: 2}) self.assertAllEqual( [None, 3, 2], input_spec.to_tensor_shape(spec).as_list()) def test_undefined_shapes(self): spec = input_spec.InputSpec(max_ndim=5) with self.assertRaisesRegex(ValueError, 'unknown TensorShape'): input_spec.to_tensor_shape(spec).as_list() spec = input_spec.InputSpec(min_ndim=5, max_ndim=5) with self.assertRaisesRegex(ValueError, 'unknown TensorShape'): input_spec.to_tensor_shape(spec).as_list() if __name__ == '__main__': tf.test.main()
2,413
34.5
80
py
keras
keras-master/keras/engine/input_spec.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access # pylint: disable=g-classes-have-attributes """Contains the InputSpec class.""" import tensorflow.compat.v2 as tf from keras import backend from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util.tf_export import tf_export @keras_export('keras.layers.InputSpec', v1=['keras.layers.InputSpec', 'keras.__internal__.legacy.layers.InputSpec']) @tf_export(v1=['layers.InputSpec']) class InputSpec: """Specifies the rank, dtype and shape of every input to a layer. Layers can expose (if appropriate) an `input_spec` attribute: an instance of `InputSpec`, or a nested structure of `InputSpec` instances (one per input tensor). These objects enable the layer to run input compatibility checks for input structure, input rank, input shape, and input dtype. A None entry in a shape is compatible with any dimension, a None shape is compatible with any shape. Args: dtype: Expected DataType of the input. shape: Shape tuple, expected shape of the input (may include None for unchecked axes). Includes the batch size. ndim: Integer, expected rank of the input. max_ndim: Integer, maximum rank of the input. min_ndim: Integer, minimum rank of the input. axes: Dictionary mapping integer axes to a specific dimension value. allow_last_axis_squeeze: If True, then allow inputs of rank N+1 as long as the last axis of the input is 1, as well as inputs of rank N-1 as long as the last axis of the spec is 1. name: Expected key corresponding to this input when passing data as a dictionary. Example: ```python class MyLayer(Layer): def __init__(self): super(MyLayer, self).__init__() # The layer will accept inputs with shape (?, 28, 28) & (?, 28, 28, 1) # and raise an appropriate error message otherwise. self.input_spec = InputSpec( shape=(None, 28, 28, 1), allow_last_axis_squeeze=True) ``` """ def __init__(self, dtype=None, shape=None, ndim=None, max_ndim=None, min_ndim=None, axes=None, allow_last_axis_squeeze=False, name=None): self.dtype = tf.as_dtype(dtype).name if dtype is not None else None shape = tf.TensorShape(shape) if shape.rank is None: shape = None else: shape = tuple(shape.as_list()) if shape is not None: self.ndim = len(shape) self.shape = shape else: self.ndim = ndim self.shape = None self.max_ndim = max_ndim self.min_ndim = min_ndim self.name = name self.allow_last_axis_squeeze = allow_last_axis_squeeze try: axes = axes or {} self.axes = {int(k): axes[k] for k in axes} except (ValueError, TypeError): raise TypeError('The keys in axes must be integers.') if self.axes and (self.ndim is not None or self.max_ndim is not None): max_dim = (self.ndim if self.ndim else self.max_ndim) - 1 max_axis = max(self.axes) if max_axis > max_dim: raise ValueError('Axis {} is greater than the maximum allowed value: {}' .format(max_axis, max_dim)) def __repr__(self): spec = [('dtype=' + str(self.dtype)) if self.dtype else '', ('shape=' + str(self.shape)) if self.shape else '', ('ndim=' + str(self.ndim)) if self.ndim else '', ('max_ndim=' + str(self.max_ndim)) if self.max_ndim else '', ('min_ndim=' + str(self.min_ndim)) if self.min_ndim else '', ('axes=' + str(self.axes)) if self.axes else ''] return 'InputSpec(%s)' % ', '.join(x for x in spec if x) def get_config(self): return { 'dtype': self.dtype, 'shape': self.shape, 'ndim': self.ndim, 'max_ndim': self.max_ndim, 'min_ndim': self.min_ndim, 'axes': self.axes} @classmethod def from_config(cls, config): return cls(**config) def to_tensor_shape(spec): """Returns a tf.TensorShape object that matches the shape specifications. If the InputSpec's shape or ndim is defined, this method will return a fully or partially-known shape. Otherwise, the returned TensorShape is None. Args: spec: an InputSpec object. Returns: a tf.TensorShape object """ if spec.ndim is None and spec.shape is None: return tf.TensorShape(None) elif spec.shape is not None: return tf.TensorShape(spec.shape) else: shape = [None] * spec.ndim for a in spec.axes: shape[a] = spec.axes[a] # Assume that axes is defined return tf.TensorShape(shape) def assert_input_compatibility(input_spec, inputs, layer_name): """Checks compatibility between the layer and provided inputs. This checks that the tensor(s) `inputs` verify the input assumptions of a layer (if any). If not, a clear and actional exception gets raised. Args: input_spec: An InputSpec instance, list of InputSpec instances, a nested structure of InputSpec instances, or None. inputs: Input tensor, list of input tensors, or a nested structure of input tensors. layer_name: String, name of the layer (for error message formatting). Raises: ValueError: in case of mismatch between the provided inputs and the expectations of the layer. """ if not input_spec: return input_spec = tf.nest.flatten(input_spec) if isinstance(inputs, dict): # Flatten `inputs` by reference order if input spec names are provided names = [spec.name for spec in input_spec] if all(names): list_inputs = [] for name in names: if name not in inputs: raise ValueError(f'Missing data for input "{name}". ' 'You passed a data dictionary with keys ' f'{list(inputs.keys())}. ' f'Expected the following keys: {names}') list_inputs.append(inputs[name]) inputs = list_inputs inputs = tf.nest.flatten(inputs) for x in inputs: # Having a shape/dtype is the only commonality of the various tensor-like # objects that may be passed. The most common kind of invalid type we are # guarding for is a Layer instance (Functional API), which does not # have a `shape` attribute. if not hasattr(x, 'shape'): raise TypeError(f'Inputs to a layer should be tensors. Got: {x}') if len(inputs) != len(input_spec): raise ValueError(f'Layer "{layer_name}" expects {len(input_spec)} input(s),' f' but it received {len(inputs)} input tensors. ' f'Inputs received: {inputs}') for input_index, (x, spec) in enumerate(zip(inputs, input_spec)): if spec is None: continue shape = tf.TensorShape(x.shape) if shape.rank is None: return # Check ndim. if spec.ndim is not None and not spec.allow_last_axis_squeeze: ndim = shape.rank if ndim != spec.ndim: raise ValueError(f'Input {input_index} of layer "{layer_name}" ' 'is incompatible with the layer: ' f'expected ndim={spec.ndim}, found ndim={ndim}. ' f'Full shape received: {tuple(shape)}') if spec.max_ndim is not None: ndim = x.shape.rank if ndim is not None and ndim > spec.max_ndim: raise ValueError(f'Input {input_index} of layer "{layer_name}" ' 'is incompatible with the layer: ' f'expected max_ndim={spec.max_ndim}, ' f'found ndim={ndim}') if spec.min_ndim is not None: ndim = x.shape.rank if ndim is not None and ndim < spec.min_ndim: raise ValueError(f'Input {input_index} of layer "{layer_name}" ' 'is incompatible with the layer: ' f'expected min_ndim={spec.min_ndim}, ' f'found ndim={ndim}. ' f'Full shape received: {tuple(shape)}') # Check dtype. if spec.dtype is not None: if x.dtype.name != spec.dtype: raise ValueError(f'Input {input_index} of layer "{layer_name}" ' 'is incompatible with the layer: ' f'expected dtype={spec.dtype}, ' f'found dtype={x.dtype}') # Check specific shape axes. shape_as_list = shape.as_list() if spec.axes: for axis, value in spec.axes.items(): if hasattr(value, 'value'): value = value.value if value is not None and shape_as_list[int(axis)] not in {value, None}: raise ValueError( f'Input {input_index} of layer "{layer_name}" is ' f'incompatible with the layer: expected axis {axis}' f'of input shape to have value {value}, ' f'but received input with shape {display_shape(x.shape)}') # Check shape. if spec.shape is not None and shape.rank is not None: spec_shape = spec.shape if spec.allow_last_axis_squeeze: if shape_as_list and shape_as_list[-1] == 1: shape_as_list = shape_as_list[:-1] if spec_shape and spec_shape[-1] == 1: spec_shape = spec_shape[:-1] for spec_dim, dim in zip(spec_shape, shape_as_list): if spec_dim is not None and dim is not None: if spec_dim != dim: raise ValueError(f'Input {input_index} of layer "{layer_name}" is ' 'incompatible with the layer: ' f'expected shape={spec.shape}, ' f'found shape={display_shape(x.shape)}') def display_shape(shape): return str(tuple(shape.as_list())) def to_tensor_spec(input_spec, default_dtype=None): """Converts a Keras InputSpec object to a TensorSpec.""" default_dtype = default_dtype or backend.floatx() if isinstance(input_spec, InputSpec): dtype = input_spec.dtype or default_dtype return tf.TensorSpec(to_tensor_shape(input_spec), dtype) return tf.TensorSpec(None, default_dtype)
10,875
37.842857
80
py
keras
keras-master/keras/engine/functional_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #,============================================================================ """Tests for layer graphs construction & handling.""" import tensorflow.compat.v2 as tf import warnings import numpy as np from keras import backend from keras import combinations from keras import initializers from keras import keras_parameterized from keras import layers from keras import losses from keras import models from keras import testing_utils from keras.engine import base_layer from keras.engine import functional from keras.engine import input_layer as input_layer_lib from keras.engine import sequential from keras.engine import training as training_lib from keras.utils import layer_utils from keras.utils import tf_utils from tensorflow.python.training.tracking.util import Checkpoint class NetworkConstructionTest(keras_parameterized.TestCase): def test_default_model_name(self): inputs = input_layer_lib.Input(shape=(1,)) outputs = layers.Dense(1, activation='relu')(inputs) model = training_lib.Model(inputs=inputs, outputs=outputs) self.assertEqual(model.name, 'model') model_2 = training_lib.Model(inputs=inputs, outputs=outputs) self.assertEqual(model_2.name, 'model_1') model_3 = training_lib.Model(inputs=inputs, outputs=outputs) self.assertEqual(model_3.name, 'model_2') def test_get_updates(self): class MyLayer(layers.Layer): def build(self, input_shape): self.a = self.add_variable('a', (1, 1), 'float32', trainable=False) self.b = self.add_variable('b', (1, 1), 'float32', trainable=False) self.add_update(tf.compat.v1.assign_add(self.a, [[1.]], name='unconditional_update')) self.built = True def call(self, inputs): self.add_update(tf.compat.v1.assign_add(self.b, inputs, name='conditional_update'), inputs=True) return inputs + 1 with tf.Graph().as_default(): x1 = input_layer_lib.Input(shape=(1,)) layer = MyLayer() _ = layer(x1) self.assertEqual(len(layer.updates), 2) x2 = input_layer_lib.Input(shape=(1,)) y2 = layer(x2) self.assertEqual(len(layer.updates), 3) network = functional.Functional(x2, y2) self.assertEqual(len(network.updates), 3) x3 = input_layer_lib.Input(shape=(1,)) _ = layer(x3) self.assertEqual(len(network.updates), 4) x4 = input_layer_lib.Input(shape=(1,)) _ = network(x4) self.assertEqual(len(network.updates), 5) network.add_update(tf.compat.v1.assign_add(layer.a, [[1]])) self.assertEqual(len(network.updates), 6) network.add_update(tf.compat.v1.assign_add(layer.b, x4), inputs=True) self.assertEqual(len(network.updates), 7) @combinations.generate(combinations.combine(mode=['graph'])) def test_get_updates_bn(self): x1 = input_layer_lib.Input(shape=(1,)) layer = layers.BatchNormalization() _ = layer(x1) self.assertEqual(len(layer.updates), 2) def test_get_layer(self): # create a simple network x = input_layer_lib.Input(shape=(32,)) dense_a = layers.Dense(4, name='dense_a') dense_b = layers.Dense(2, name='dense_b') y = dense_b(dense_a(x)) network = functional.Functional(x, y, name='dense_network') # test various get_layer by index self.assertEqual(network.get_layer(index=1), dense_a) # test invalid get_layer by index with self.assertRaisesRegex( ValueError, 'Was asked to retrieve layer at index ' + str(3) + ' but model only has ' + str(len(network.layers)) + ' layers.'): network.get_layer(index=3) # test that only one between name and index is requested with self.assertRaisesRegex(ValueError, 'Provide only a layer name or a layer index'): network.get_layer(index=1, name='dense_b') # test that a name or an index must be provided with self.assertRaisesRegex(ValueError, 'Provide either a layer name or layer index.'): network.get_layer() # test various get_layer by name self.assertEqual(network.get_layer(name='dense_a'), dense_a) # test invalid get_layer by name with self.assertRaisesRegex(ValueError, 'No such layer: dense_c.'): network.get_layer(name='dense_c') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testTopologicalAttributes(self): # test layer attributes / methods related to cross-layer connectivity. a = input_layer_lib.Input(shape=(32,), name='input_a') b = input_layer_lib.Input(shape=(32,), name='input_b') # test input, output, input_shape, output_shape test_layer = layers.Dense(16, name='test_layer') a_test = test_layer(a) self.assertIs(test_layer.input, a) self.assertIs(test_layer.output, a_test) self.assertEqual(test_layer.input_shape, (None, 32)) self.assertEqual(test_layer.output_shape, (None, 16)) # test `get_*_at` methods dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) self.assertIs(dense.get_input_at(0), a) self.assertIs(dense.get_input_at(1), b) self.assertIs(dense.get_output_at(0), a_2) self.assertIs(dense.get_output_at(1), b_2) self.assertEqual(dense.get_input_shape_at(0), (None, 32)) self.assertEqual(dense.get_input_shape_at(1), (None, 32)) self.assertEqual(dense.get_output_shape_at(0), (None, 16)) self.assertEqual(dense.get_output_shape_at(1), (None, 16)) # Test invalid value for attribute retrieval. with self.assertRaises(ValueError): dense.get_input_at(2) with self.assertRaises(AttributeError): new_dense = layers.Dense(16) _ = new_dense.input with self.assertRaises(AttributeError): new_dense = layers.Dense(16) _ = new_dense.output with self.assertRaises(AttributeError): new_dense = layers.Dense(16) _ = new_dense.output_shape with self.assertRaises(AttributeError): new_dense = layers.Dense(16) _ = new_dense.input_shape with self.assertRaises(AttributeError): new_dense = layers.Dense(16) a = input_layer_lib.Input(shape=(3, 32)) a = input_layer_lib.Input(shape=(5, 32)) a_2 = dense(a) b_2 = dense(b) _ = new_dense.input_shape with self.assertRaises(AttributeError): new_dense = layers.Dense(16) a = input_layer_lib.Input(shape=(3, 32)) a = input_layer_lib.Input(shape=(5, 32)) a_2 = dense(a) b_2 = dense(b) _ = new_dense.output_shape def _assertAllIs(self, a, b): self.assertTrue(all(x is y for x, y in zip(a, b))) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testTopologicalAttributesMultiOutputLayer(self): class PowersLayer(layers.Layer): def call(self, inputs): return [inputs**2, inputs**3] x = input_layer_lib.Input(shape=(32,)) test_layer = PowersLayer() p1, p2 = test_layer(x) # pylint: disable=not-callable self.assertIs(test_layer.input, x) self._assertAllIs(test_layer.output, [p1, p2]) self.assertEqual(test_layer.input_shape, (None, 32)) self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testTopologicalAttributesMultiInputLayer(self): class AddLayer(layers.Layer): def call(self, inputs): assert len(inputs) == 2 return inputs[0] + inputs[1] a = input_layer_lib.Input(shape=(32,)) b = input_layer_lib.Input(shape=(32,)) test_layer = AddLayer() y = test_layer([a, b]) # pylint: disable=not-callable self._assertAllIs(test_layer.input, [a, b]) self.assertIs(test_layer.output, y) self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)]) self.assertEqual(test_layer.output_shape, (None, 32)) def testBasicNetwork(self): with tf.Graph().as_default(): # minimum viable network x = input_layer_lib.Input(shape=(32,)) dense = layers.Dense(2) y = dense(x) network = functional.Functional(x, y, name='dense_network') # test basic attributes self.assertEqual(network.name, 'dense_network') self.assertEqual(len(network.layers), 2) # InputLayer + Dense self.assertEqual(network.layers[1], dense) self._assertAllIs(network.weights, dense.weights) self._assertAllIs(network.trainable_weights, dense.trainable_weights) self._assertAllIs(network.non_trainable_weights, dense.non_trainable_weights) # test callability on Input x_2 = input_layer_lib.Input(shape=(32,)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 2]) # test callability on regular tensor x_2 = tf.compat.v1.placeholder(dtype='float32', shape=(None, 32)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 2]) # test network `trainable` attribute network.trainable = False self._assertAllIs(network.weights, dense.weights) self.assertEqual(network.trainable_weights, []) self._assertAllIs(network.non_trainable_weights, dense.trainable_weights + dense.non_trainable_weights) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_trainable_weights(self): a = layers.Input(shape=(2,)) b = layers.Dense(1)(a) model = training_lib.Model(a, b) weights = model.weights self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) model.trainable = True self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.layers[1].trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) # sequential model model = sequential.Sequential() model.add(layers.Dense(1, input_dim=2)) weights = model.weights self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) model.trainable = True self._assertAllIs(model.trainable_weights, weights) self.assertListEqual(model.non_trainable_weights, []) model.layers[0].trainable = False self.assertListEqual(model.trainable_weights, []) self._assertAllIs(model.non_trainable_weights, weights) def test_layer_call_arguments(self): with tf.Graph().as_default(): # Test the ability to pass and serialize arguments to `call`. inp = layers.Input(shape=(2,)) x = layers.Dense(3)(inp) x = layers.Dropout(0.5)(x, training=True) model = training_lib.Model(inp, x) # Would be `dropout/cond/Merge` by default self.assertIn('dropout', model.output.op.name) # Test that argument is kept when applying the model inp2 = layers.Input(shape=(2,)) out2 = model(inp2) self.assertIn('dropout', out2.op.name) # Test that argument is kept after loading a model config = model.get_config() model = training_lib.Model.from_config(config) self.assertIn('dropout', model.output.op.name) def test_node_construction(self): # test basics a = layers.Input(shape=(32,), name='input_a') b = layers.Input(shape=(32,), name='input_b') with self.assertRaises(ValueError): _ = layers.Input(shape=(32,), batch_shape=(10, 32)) with self.assertRaises(ValueError): _ = layers.Input(shape=(32,), unknown_kwarg=None) self.assertListEqual(a.shape.as_list(), [None, 32]) a_layer, a_node_index, a_tensor_index = a._keras_history b_layer, _, _ = b._keras_history self.assertEqual(len(a_layer._inbound_nodes), 1) self.assertEqual(a_tensor_index, 0) node = a_layer._inbound_nodes[a_node_index] self.assertEqual(node.outbound_layer, a_layer) self.assertListEqual(node.inbound_layers, []) self.assertListEqual(node.input_tensors, [a]) self.assertListEqual(node.input_shapes, [(None, 32)]) self.assertListEqual(node.output_tensors, [a]) self.assertListEqual(node.output_shapes, [(None, 32)]) dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) self.assertEqual(len(dense._inbound_nodes), 2) self.assertEqual(len(dense._outbound_nodes), 0) self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer) self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense) self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer) self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense) self.assertIs(dense._inbound_nodes[0].input_tensors, a) self.assertIs(dense._inbound_nodes[1].input_tensors, b) # test layer properties test_layer = layers.Dense(16, name='test_layer') a_test = test_layer(a) self.assertListEqual(test_layer.kernel.shape.as_list(), [32, 16]) self.assertIs(test_layer.input, a) self.assertIs(test_layer.output, a_test) self.assertEqual(test_layer.input_shape, (None, 32)) self.assertEqual(test_layer.output_shape, (None, 16)) self.assertIs(dense.get_input_at(0), a) self.assertIs(dense.get_input_at(1), b) self.assertIs(dense.get_output_at(0), a_2) self.assertIs(dense.get_output_at(1), b_2) self.assertEqual(dense.get_input_shape_at(0), (None, 32)) self.assertEqual(dense.get_input_shape_at(1), (None, 32)) self.assertEqual(dense.get_output_shape_at(0), (None, 16)) self.assertEqual(dense.get_output_shape_at(1), (None, 16)) self.assertEqual(dense.get_input_mask_at(0), None) self.assertEqual(dense.get_input_mask_at(1), None) self.assertEqual(dense.get_output_mask_at(0), None) self.assertEqual(dense.get_output_mask_at(1), None) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_multi_input_layer(self): with self.cached_session(): # test multi-input layer a = layers.Input(shape=(32,), name='input_a') b = layers.Input(shape=(32,), name='input_b') dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = layers.concatenate([a_2, b_2], name='merge') self.assertListEqual(merged.shape.as_list(), [None, 16 * 2]) merge_layer, merge_node_index, merge_tensor_index = merged._keras_history self.assertEqual(merge_node_index, 0) self.assertEqual(merge_tensor_index, 0) self.assertEqual(len(merge_layer._inbound_nodes), 1) self.assertEqual(len(merge_layer._outbound_nodes), 0) self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2) self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2) c = layers.Dense(64, name='dense_2')(merged) d = layers.Dense(5, name='dense_3')(c) model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model') self.assertEqual(len(model.layers), 6) output_shapes = model.compute_output_shape([(None, 32), (None, 32)]) self.assertListEqual(output_shapes[0].as_list(), [None, 64]) self.assertListEqual(output_shapes[1].as_list(), [None, 5]) self.assertListEqual( model.compute_mask([a, b], [None, None]), [None, None]) # we don't check names of first 2 layers (inputs) because # ordering of same-level layers is not fixed self.assertListEqual([l.name for l in model.layers][2:], ['dense_1', 'merge', 'dense_2', 'dense_3']) self.assertListEqual([l.name for l in model._input_layers], ['input_a', 'input_b']) self.assertListEqual([l.name for l in model._output_layers], ['dense_2', 'dense_3']) # actually run model fn = backend.function(model.inputs, model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)]) # test get_source_inputs self._assertAllIs(layer_utils.get_source_inputs(c), [a, b]) # serialization / deserialization json_config = model.to_json() recreated_model = models.model_from_json(json_config) recreated_model.compile('rmsprop', 'mse') self.assertListEqual([l.name for l in recreated_model.layers][2:], ['dense_1', 'merge', 'dense_2', 'dense_3']) self.assertListEqual([l.name for l in recreated_model._input_layers], ['input_a', 'input_b']) self.assertListEqual([l.name for l in recreated_model._output_layers], ['dense_2', 'dense_3']) fn = backend.function(recreated_model.inputs, recreated_model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)]) def test_multi_output_layer_output_names(self): inp = layers.Input(name='inp', shape=(None,), dtype=tf.float32) class _MultiOutput(layers.Layer): def call(self, x): return x + 1., x + 2. out = _MultiOutput(name='out')(inp) model = training_lib.Model(inp, out) self.assertEqual(['out', 'out_1'], model.output_names) self.assertAllClose([2., 3.], model(1.)) def test_recursion(self): with tf.Graph().as_default(), self.cached_session(): a = layers.Input(shape=(32,), name='input_a') b = layers.Input(shape=(32,), name='input_b') dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = layers.concatenate([a_2, b_2], name='merge') c = layers.Dense(64, name='dense_2')(merged) d = layers.Dense(5, name='dense_3')(c) model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model') e = layers.Input(shape=(32,), name='input_e') f = layers.Input(shape=(32,), name='input_f') self.assertEqual(len(model.inputs), 2) g, h = model([e, f]) self.assertEqual(len(model.inputs), 2) self.assertEqual(g.name, 'model/dense_2/BiasAdd:0') self.assertListEqual(g.shape.as_list(), c.shape.as_list()) self.assertListEqual(h.shape.as_list(), d.shape.as_list()) # test separate manipulation of different layer outputs i = layers.Dense(7, name='dense_4')(h) final_model = training_lib.Model( inputs=[e, f], outputs=[i, g], name='final') self.assertEqual(len(final_model.inputs), 2) self.assertEqual(len(final_model.outputs), 2) self.assertEqual(len(final_model.layers), 4) # we don't check names of first 2 layers (inputs) because # ordering of same-level layers is not fixed self.assertListEqual([layer.name for layer in final_model.layers][2:], ['model', 'dense_4']) self.assertListEqual( model.compute_mask([e, f], [None, None]), [None, None]) self.assertListEqual( final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7), (10, 64)]) # run recursive model fn = backend.function(final_model.inputs, final_model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)]) # test serialization model_config = final_model.get_config() recreated_model = models.Model.from_config(model_config) fn = backend.function(recreated_model.inputs, recreated_model.outputs) input_a_np = np.random.random((10, 32)) input_b_np = np.random.random((10, 32)) fn_outputs = fn([input_a_np, input_b_np]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_multi_input_multi_output_recursion(self): with self.cached_session(): # test multi-input multi-output a = layers.Input(shape=(32,), name='input_a') b = layers.Input(shape=(32,), name='input_b') dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = layers.concatenate([a_2, b_2], name='merge') c = layers.Dense(64, name='dense_2')(merged) d = layers.Dense(5, name='dense_3')(c) model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model') j = layers.Input(shape=(32,), name='input_j') k = layers.Input(shape=(32,), name='input_k') _, n = model([j, k]) o = layers.Input(shape=(32,), name='input_o') p = layers.Input(shape=(32,), name='input_p') q, _ = model([o, p]) self.assertListEqual(n.shape.as_list(), [None, 5]) self.assertListEqual(q.shape.as_list(), [None, 64]) s = layers.concatenate([n, q], name='merge_nq') self.assertListEqual(s.shape.as_list(), [None, 64 + 5]) # test with single output as 1-elem list multi_io_model = training_lib.Model([j, k, o, p], [s]) fn = backend.function(multi_io_model.inputs, multi_io_model.outputs) fn_outputs = fn([ np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)) ]) self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)]) # test with single output as tensor multi_io_model = training_lib.Model([j, k, o, p], s) fn = backend.function(multi_io_model.inputs, multi_io_model.outputs) fn_outputs = fn([ np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)) ]) # note that the output of the function will still be a 1-elem list self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)]) # test serialization model_config = multi_io_model.get_config() recreated_model = models.Model.from_config(model_config) fn = backend.function(recreated_model.inputs, recreated_model.outputs) fn_outputs = fn([ np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)), np.random.random((10, 32)) ]) # note that the output of the function will still be a 1-elem list self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)]) config = model.get_config() models.Model.from_config(config) model.summary() json_str = model.to_json() models.model_from_json(json_str) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_invalid_graphs(self): a = layers.Input(shape=(32,), name='input_a') b = layers.Input(shape=(32,), name='input_b') dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = layers.concatenate([a_2, b_2], name='merge') c = layers.Dense(64, name='dense_2')(merged) d = layers.Dense(5, name='dense_3')(c) model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model') # disconnected graph j = layers.Input(shape=(32,), name='input_j') k = layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): training_lib.Model([j], [m, n]) # redundant outputs j = layers.Input(shape=(32,), name='input_j') k = layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) training_lib.Model([j, k], [m, n, n]) # redundant inputs j = layers.Input(shape=(32,), name='input_j') k = layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): training_lib.Model([j, k, j], [m, n]) # i have not idea what I'm doing: garbage as inputs/outputs j = layers.Input(shape=(32,), name='input_j') k = layers.Input(shape=(32,), name='input_k') m, n = model([j, k]) with self.assertRaises(Exception): training_lib.Model([j, k], [m, n, 0]) def test_raw_tf_compatibility(self): with tf.Graph().as_default(): # test calling layers/models on TF tensors a = layers.Input(shape=(32,), name='input_a') b = layers.Input(shape=(32,), name='input_b') dense = layers.Dense(16, name='dense_1') a_2 = dense(a) b_2 = dense(b) merged = layers.concatenate([a_2, b_2], name='merge') c = layers.Dense(64, name='dense_2')(merged) d = layers.Dense(5, name='dense_3')(c) model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model') j = layers.Input(shape=(32,), name='input_j') k = layers.Input(shape=(32,), name='input_k') self.assertEqual(len(model.inputs), 2) m, n = model([j, k]) self.assertEqual(len(model.inputs), 2) tf_model = training_lib.Model([j, k], [m, n]) j_tf = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 32)) k_tf = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 32)) m_tf, n_tf = tf_model([j_tf, k_tf]) self.assertListEqual(m_tf.shape.as_list(), [None, 64]) self.assertListEqual(n_tf.shape.as_list(), [None, 5]) # test merge layers.concatenate([j_tf, k_tf], axis=1) layers.add([j_tf, k_tf]) # test tensor input x = tf.compat.v1.placeholder(shape=(None, 2), dtype=tf.float32) layers.InputLayer(input_tensor=x) x = layers.Input(tensor=x) layers.Dense(2)(x) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_basic_masking(self): a = layers.Input(shape=(10, 32), name='input_a') b = layers.Masking()(a) model = training_lib.Model(a, b) self.assertEqual(model.output_mask.shape.as_list(), [None, 10]) def testMaskingSingleInput(self): class MaskedLayer(layers.Layer): def call(self, inputs, mask=None): if mask is not None: return inputs * mask return inputs def compute_mask(self, inputs, mask=None): return tf.ones_like(inputs) if tf.executing_eagerly(): a = tf.constant([2] * 32) mask = tf.constant([0, 1] * 16) a._keras_mask = mask b = MaskedLayer().apply(a) self.assertTrue(hasattr(b, '_keras_mask')) self.assertAllEqual( self.evaluate(tf.ones_like(mask)), self.evaluate(getattr(b, '_keras_mask'))) self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b)) else: x = input_layer_lib.Input(shape=(32,)) y = MaskedLayer()(x) # pylint: disable=not-callable network = functional.Functional(x, y) # test callability on Input x_2 = input_layer_lib.Input(shape=(32,)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 32]) # test callability on regular tensor x_2 = tf.compat.v1.placeholder(dtype='float32', shape=(None, 32)) y_2 = network(x_2) self.assertEqual(y_2.shape.as_list(), [None, 32]) def test_activity_regularization_with_model_composition(self): def reg(x): return tf.reduce_sum(x) net_a_input = input_layer_lib.Input((2,)) net_a = net_a_input net_a = layers.Dense( 2, kernel_initializer='ones', use_bias=False, activity_regularizer=reg)( net_a) model_a = training_lib.Model([net_a_input], [net_a]) net_b_input = input_layer_lib.Input((2,)) net_b = model_a(net_b_input) model_b = training_lib.Model([net_b_input], [net_b]) model_b.compile(optimizer='sgd', loss=None) x = np.ones((1, 2)) loss = model_b.evaluate(x) self.assertEqual(loss, 4.) @combinations.generate(combinations.keras_mode_combinations()) def test_layer_sharing_at_heterogenous_depth(self): x_val = np.random.random((10, 5)) x = input_layer_lib.Input(shape=(5,)) a = layers.Dense(5, name='A') b = layers.Dense(5, name='B') output = a(b(a(b(x)))) m = training_lib.Model(x, output) m.run_eagerly = testing_utils.should_run_eagerly() output_val = m.predict(x_val) config = m.get_config() weights = m.get_weights() m2 = models.Model.from_config(config) m2.set_weights(weights) output_val_2 = m2.predict(x_val) self.assertAllClose(output_val, output_val_2, atol=1e-6) @combinations.generate(combinations.keras_mode_combinations()) def test_layer_sharing_at_heterogenous_depth_with_concat(self): input_shape = (16, 9, 3) input_layer = input_layer_lib.Input(shape=input_shape) a = layers.Dense(3, name='dense_A') b = layers.Dense(3, name='dense_B') c = layers.Dense(3, name='dense_C') x1 = b(a(input_layer)) x2 = a(c(input_layer)) output = layers.concatenate([x1, x2]) m = training_lib.Model(inputs=input_layer, outputs=output) m.run_eagerly = testing_utils.should_run_eagerly() x_val = np.random.random((10, 16, 9, 3)) output_val = m.predict(x_val) config = m.get_config() weights = m.get_weights() m2 = models.Model.from_config(config) m2.set_weights(weights) output_val_2 = m2.predict(x_val) self.assertAllClose(output_val, output_val_2, atol=1e-6) def test_layer_sharing_maintains_node_order(self): # See https://github.com/keras-team/keras/issues/14838. inp = input_layer_lib.Input(shape=[5], name='main_input') zeros = layers.Lambda(tf.zeros_like, name='generate_zeros')(inp) ones = layers.Lambda(tf.ones_like, name='generate_ones')(inp) shared_layer = layers.Layer(name='shared') ones_result = shared_layer(ones) zeros_result = shared_layer(zeros) zeros_result = layers.Layer(name='blank')(zeros_result) m = training_lib.Model( inputs=[inp], outputs=[zeros_result, ones_result]) m2 = models.Model.from_config(m.get_config()) self.assertAllClose( m2.predict_on_batch(tf.zeros([1, 5])), m.predict_on_batch(tf.zeros([1, 5]))) @combinations.generate(combinations.keras_mode_combinations()) def test_explicit_training_argument(self): a = layers.Input(shape=(2,)) b = layers.Dropout(0.5)(a) base_model = training_lib.Model(a, b) a = layers.Input(shape=(2,)) b = base_model(a, training=False) model = training_lib.Model(a, b) x = np.ones((100, 2)) y = np.ones((100, 2)) model.compile( optimizer='sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) loss = model.train_on_batch(x, y) self.assertEqual(loss, 0) # In inference mode, output is equal to input. a = layers.Input(shape=(2,)) b = base_model(a, training=True) model = training_lib.Model(a, b) preds = model.predict(x) self.assertEqual(np.min(preds), 0.) # At least one unit was dropped. @combinations.generate(combinations.keras_mode_combinations()) def test_mask_derived_from_keras_layer(self): inputs = input_layer_lib.Input((5, 10)) mask = input_layer_lib.Input((5,)) outputs = layers.RNN(layers.LSTMCell(100))(inputs, mask=mask) model = training_lib.Model([inputs, mask], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[np.ones((10, 5, 10)), np.zeros((10, 5))], y=np.zeros((10, 100)), batch_size=2) # All data is masked, returned values are 0's. self.assertEqual(history.history['loss'][0], 0.0) history = model.fit( x=[np.ones((10, 5, 10)), np.ones((10, 5))], y=np.zeros((10, 100)), batch_size=2) # Data is not masked, returned values are random. self.assertGreater(history.history['loss'][0], 0.0) model = training_lib.Model.from_config(model.get_config()) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[np.ones((10, 5, 10)), np.zeros((10, 5))], y=np.zeros((10, 100)), batch_size=2) # All data is masked, returned values are 0's. self.assertEqual(history.history['loss'][0], 0.0) history = model.fit( x=[np.ones((10, 5, 10)), np.ones((10, 5))], y=np.zeros((10, 100)), batch_size=2) # Data is not masked, returned values are random. self.assertGreater(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations()) def test_call_arg_derived_from_keras_layer(self): class MyAdd(layers.Layer): def call(self, x1, x2): return x1 + x2 input1 = input_layer_lib.Input(10) input2 = input_layer_lib.Input(10) outputs = MyAdd()(input1, input2) model = training_lib.Model([input1, input2], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) # Check serialization. model = training_lib.Model.from_config( model.get_config(), custom_objects={'MyAdd': MyAdd}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations(mode='eager'),) def test_only_some_in_first_arg_derived_from_keras_layer_keras_tensors(self): # This functionality is unsupported in v1 graphs class MyAddAll(layers.Layer): def call(self, inputs): x = inputs[0] for inp in inputs[1:]: if inp is not None: x = x + inp return x input1 = input_layer_lib.Input(10) input2 = input_layer_lib.Input(10) layer = MyAddAll() outputs = layer([0.0, input1, None, input2, None]) model = training_lib.Model([input1, input2], outputs) self.assertIn(layer, model.layers) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) # Check serialization. model = training_lib.Model.from_config( model.get_config(), custom_objects={'MyAddAll': MyAddAll}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate( combinations.times( combinations.keras_mode_combinations(), combinations.combine(share_already_used_layer=[True, False]))) def test_call_kwarg_derived_from_keras_layer(self, share_already_used_layer): class MaybeAdd(layers.Layer): def call(self, x1, x2=None): if x2 is not None: return x1 + x2 return x1 class IdentityLayer(layers.Layer): def call(self, x): return x input1 = input_layer_lib.Input(10) input2 = input_layer_lib.Input(10) identity_layer = IdentityLayer() if share_already_used_layer: # We have had model serialization/deserialization break in the past: # when a layer was previously used to construct other functional models # and had a non-empty list of inbound nodes before being used to define # the model being serialized/deserialized. # (The serialization/deserialization was not correctly adjusting # the node_index serialization/deserialization). # So, we explicitly test this case. training_lib.Model([input1], identity_layer(input1)) outputs = MaybeAdd()(input1, x2=identity_layer(input2)) model = training_lib.Model([input1, input2], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) model = training_lib.Model.from_config( model.get_config(), custom_objects={ 'MaybeAdd': MaybeAdd, 'IdentityLayer': IdentityLayer }) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))], y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations()) def test_call_kwarg_dtype_serialization(self): class Double(layers.Layer): def call(self, x1, dtype=None): return tf.cast(x1 + x1, dtype=dtype) input1 = input_layer_lib.Input(10) outputs = Double()(input1, dtype=tf.float16) model = training_lib.Model([input1], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10))], y=6 * np.ones((10, 10)), batch_size=2) # Check that input was correctly doubled. self.assertEqual(history.history['loss'][0], 0.0) # Check the output dtype self.assertEqual(model(tf.ones((3, 10))).dtype, tf.float16) model = training_lib.Model.from_config( model.get_config(), custom_objects={'Double': Double}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10))], y=6 * np.ones((10, 10)), batch_size=2) # Check that input was correctly doubled. self.assertEqual(history.history['loss'][0], 0.0) # Check the output dtype self.assertEqual(model(tf.ones((3, 10))).dtype, tf.float16) @combinations.generate(combinations.keras_mode_combinations()) def test_call_kwarg_nonserializable(self): class Double(layers.Layer): def call(self, x1, kwarg=None): return x1 + x1 class NonSerializable: def __init__(self, foo=None): self.foo = foo input1 = input_layer_lib.Input(10) outputs = Double()(input1, kwarg=NonSerializable()) model = training_lib.Model([input1], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[3 * np.ones((10, 10))], y=6 * np.ones((10, 10)), batch_size=2) # Check that input was correctly doubled. self.assertEqual(history.history['loss'][0], 0.0) with self.assertRaisesRegex( TypeError, 'Layer double was passed non-JSON-serializable arguments.'): model.get_config() @combinations.generate( combinations.times( combinations.keras_mode_combinations(), combinations.combine(share_already_used_layer=[True, False]))) def test_call_kwarg_derived_from_keras_layer_and_first_arg_is_constant( self, share_already_used_layer): class IdentityLayer(layers.Layer): def call(self, x): return x class MaybeAdd(layers.Layer): def call(self, x1, x2=None): if x2 is not None: return x1 + x2 return x1 input2 = input_layer_lib.Input(10) identity_layer = IdentityLayer() if share_already_used_layer: # We have had model serialization/deserialization break in the past: # when a layer was previously used to construct other functional models # and had a non-empty list of inbound nodes before being used to define # the model being serialized/deserialized. # (The serialization/deserialization was not correctly adjusting # the node_index serialization/deserialization). # So, we explicitly test this case. training_lib.Model([input2], identity_layer(input2)) outputs = MaybeAdd()(3., x2=identity_layer(input2)) model = training_lib.Model([input2], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=7 * np.ones((10, 10)), y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) model = training_lib.Model.from_config( model.get_config(), custom_objects={ 'MaybeAdd': MaybeAdd, 'IdentityLayer': IdentityLayer }) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=7 * np.ones((10, 10)), y=10 * np.ones((10, 10)), batch_size=2) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations()) def test_composite_call_kwarg_derived_from_keras_layer(self): # Create a test layer that accepts composite tensor inputs. class MaybeAdd(layers.Layer): def call(self, x1, x2=None): # We need to convert this to a tensor for loss calculations - # losses don't play nicely with ragged tensors yet. if x2 is not None: return (x1 + x2).to_tensor(default_value=0) return x1.to_tensor(default_value=0) input1 = input_layer_lib.Input((None,), ragged=True) input2 = input_layer_lib.Input((None,), ragged=True) outputs = MaybeAdd()(input1, x2=input2) model = training_lib.Model([input1, input2], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) input_data = [ tf.ragged.constant([[3.0, 3.0], [3.0, 3.0], [3.0]]), tf.ragged.constant([[7.0, 7.0], [7.0, 7.0], [7.0]]) ] expected_data = np.array([[10.0, 10.0], [10.0, 10.0], [10.0, 0.0]]) history = model.fit(x=input_data, y=expected_data) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) model = training_lib.Model.from_config( model.get_config(), custom_objects={'MaybeAdd': MaybeAdd}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(x=input_data, y=expected_data) # Check that second input was correctly added to first. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations(mode='eager')) def test_call_some_not_all_nested_in_first_arg_derived_from_keras_layer(self): # This functionality is unsupported in v1 graphs class AddAll(layers.Layer): def call(self, x1_x2, x3): x1, x2 = x1_x2 out = x1 + x2 if x3 is not None: for t in x3.values(): out += t return out input1 = input_layer_lib.Input(10) input2 = input_layer_lib.Input(10) input3 = input_layer_lib.Input(10) layer = AddAll() outputs = layer( [input1, 4 * tf.ones((1, 10))], x3={ 'a': input2, 'b': input3, 'c': 5 * tf.ones((1, 10)) }) model = training_lib.Model([input1, input2, input3], outputs) self.assertIn(layer, model.layers) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))], y=15 * np.ones((10, 10)), batch_size=2) # Check that all inputs were correctly added. self.assertEqual(history.history['loss'][0], 0.0) model = training_lib.Model.from_config( model.get_config(), custom_objects={'AddAll': AddAll}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))], y=15 * np.ones((10, 10)), batch_size=2) # Check that all inputs were correctly added. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations()) def test_call_nested_arg_derived_from_keras_layer(self): class AddAll(layers.Layer): def call(self, x1, x2, x3=None): out = x1 + x2 if x3 is not None: for t in x3.values(): out += t return out input1 = input_layer_lib.Input(10) input2 = input_layer_lib.Input(10) input3 = input_layer_lib.Input(10) outputs = AddAll()( input1, 4 * tf.ones((1, 10)), x3={ 'a': input2, 'b': input3, 'c': 5 * tf.ones((1, 10)) }) model = training_lib.Model([input1, input2, input3], outputs) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))], y=15 * np.ones((10, 10)), batch_size=2) # Check that all inputs were correctly added. self.assertEqual(history.history['loss'][0], 0.0) model = training_lib.Model.from_config( model.get_config(), custom_objects={'AddAll': AddAll}) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) history = model.fit( x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))], y=15 * np.ones((10, 10)), batch_size=2) # Check that all inputs were correctly added. self.assertEqual(history.history['loss'][0], 0.0) @combinations.generate(combinations.keras_mode_combinations()) def test_multi_output_model_with_none_masking(self): def func(x): return [x * 0.2, x * 0.3] def output_shape(input_shape): return [input_shape, input_shape] i = layers.Input(shape=(3, 2, 1)) o = layers.Lambda(function=func, output_shape=output_shape)(i) self.assertEqual(backend.int_shape(o[0]), (None, 3, 2, 1)) self.assertEqual(backend.int_shape(o[1]), (None, 3, 2, 1)) o = layers.add(o) model = training_lib.Model(i, o) model.run_eagerly = testing_utils.should_run_eagerly() i2 = layers.Input(shape=(3, 2, 1)) o2 = model(i2) model2 = training_lib.Model(i2, o2) model2.run_eagerly = testing_utils.should_run_eagerly() x = np.random.random((4, 3, 2, 1)) out = model2.predict(x) assert out.shape == (4, 3, 2, 1) self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4) @combinations.generate(combinations.keras_mode_combinations()) def test_constant_initializer_with_numpy(self): initializer = tf.compat.v1.constant_initializer(np.ones((3, 2))) model = sequential.Sequential() model.add(layers.Dense(2, input_shape=(3,), kernel_initializer=initializer)) model.add(layers.Dense(3)) model.compile( loss='mse', optimizer='sgd', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly()) json_str = model.to_json() models.model_from_json(json_str) def test_subclassed_error_if_init_not_called(self): class MyNetwork(training_lib.Model): def __init__(self): self._foo = [layers.Dense(10), layers.Dense(10)] with self.assertRaisesRegex(RuntimeError, 'forgot to call'): MyNetwork() @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_int_input_shape(self): inputs = input_layer_lib.Input(10) self.assertEqual([None, 10], inputs.shape.as_list()) inputs_with_batch = input_layer_lib.Input(batch_size=20, shape=5) self.assertEqual([20, 5], inputs_with_batch.shape.as_list()) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_model_initialization(self): # Functional model inputs = input_layer_lib.Input(shape=(32,)) outputs = layers.Dense(4)(inputs) with self.assertRaisesRegex(TypeError, 'Keyword argument not understood'): model = training_lib.Model( inputs, outputs, name='m', trainable=False, dtype='int64') with self.assertRaisesRegex(TypeError, 'Keyword argument not understood'): model = training_lib.Model( inputs, outputs, name='m', trainable=False, dynamic=False) model = training_lib.Model(inputs, outputs, name='m', trainable=False) self.assertEqual('m', model.name) self.assertFalse(model.trainable) self.assertFalse(model.dynamic) class SubclassModel(training_lib.Model): pass # Subclassed model model = SubclassModel( name='subclassed', trainable=True, dtype='int64', dynamic=True) self.assertEqual('subclassed', model.name) self.assertTrue(model.dynamic) self.assertTrue(model.trainable) w = model.add_weight('w', [], initializer=tf.compat.v1.constant_initializer(1)) self.assertEqual(tf.int64, w.dtype) def test_disconnected_inputs(self): input_tensor1 = input_layer_lib.Input(shape=[200], name='a') input_tensor2 = input_layer_lib.Input(shape=[10], name='b') output_tensor1 = layers.Dense(units=10)(input_tensor1) net = functional.Functional( inputs=[input_tensor1, input_tensor2], outputs=[output_tensor1]) net2 = functional.Functional.from_config(net.get_config()) self.assertLen(net2.inputs, 2) self.assertEqual('a', net2.layers[0].name) self.assertEqual('b', net2.layers[1].name) @combinations.generate(combinations.keras_model_type_combinations()) def test_dependency_tracking(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) model.trackable = Checkpoint() self.assertIn('trackable', model._unconditional_dependency_names) self.assertEqual(model.trackable, model._lookup_dependency('trackable')) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_model_construction_in_tf_function(self): d = {'model': None} @tf.function def fn(x): if d['model'] is None: # Check that Functional can be built in a `tf.function`. inputs = input_layer_lib.Input(10) outputs = layers.Dense(1)(inputs) model = functional.Functional(inputs, outputs) d['model'] = model else: model = d['model'] return model(x) x = tf.ones((10, 10)) y = fn(x) self.assertEqual(y.shape.as_list(), [10, 1]) class DeferredModeTest(keras_parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testSimpleNetworkBuilding(self): inputs = input_layer_lib.Input(shape=(32,)) if tf.executing_eagerly(): self.assertEqual(inputs.dtype.name, 'float32') self.assertEqual(inputs.shape.as_list(), [None, 32]) x = layers.Dense(2)(inputs) if tf.executing_eagerly(): self.assertEqual(x.dtype.name, 'float32') self.assertEqual(x.shape.as_list(), [None, 2]) outputs = layers.Dense(4)(x) network = functional.Functional(inputs, outputs) self.assertIsInstance(network, functional.Functional) if tf.executing_eagerly(): # It should be possible to call such a network on EagerTensors. inputs = tf.constant( np.random.random((10, 32)).astype('float32')) outputs = network(inputs) self.assertEqual(outputs.shape.as_list(), [10, 4]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testMultiIONetworkBuilding(self): input_a = input_layer_lib.Input(shape=(32,)) input_b = input_layer_lib.Input(shape=(16,)) a = layers.Dense(16)(input_a) class AddLayer(layers.Layer): def call(self, inputs): return inputs[0] + inputs[1] c = AddLayer()([a, input_b]) # pylint: disable=not-callable c = layers.Dense(2)(c) network = functional.Functional([input_a, input_b], [a, c]) if tf.executing_eagerly(): a_val = tf.constant( np.random.random((10, 32)).astype('float32')) b_val = tf.constant( np.random.random((10, 16)).astype('float32')) outputs = network([a_val, b_val]) self.assertEqual(len(outputs), 2) self.assertEqual(outputs[0].shape.as_list(), [10, 16]) self.assertEqual(outputs[1].shape.as_list(), [10, 2]) class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase): def _testShapeInference(self, model, input_shape, expected_output_shape): input_value = np.random.random(input_shape) output_value = model.predict(input_value) self.assertEqual(output_value.shape, expected_output_shape) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testSingleInputCase(self): class LayerWithOneInput(layers.Layer): def build(self, input_shape): self.w = tf.ones(shape=(3, 4)) def call(self, inputs): return backend.dot(inputs, self.w) inputs = input_layer_lib.Input(shape=(3,)) layer = LayerWithOneInput() if tf.executing_eagerly(): self.assertEqual( layer.compute_output_shape((None, 3)).as_list(), [None, 4]) # As a side-effect, compute_output_shape builds the layer. self.assertTrue(layer.built) # We can still query the layer's compute_output_shape with compatible # input shapes. self.assertEqual( layer.compute_output_shape((6, 3)).as_list(), [6, 4]) outputs = layer(inputs) model = training_lib.Model(inputs, outputs) self._testShapeInference(model, (2, 3), (2, 4)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testMultiInputOutputCase(self): class MultiInputOutputLayer(layers.Layer): def build(self, input_shape): self.w = tf.ones(shape=(3, 4)) def call(self, inputs): a = backend.dot(inputs[0], self.w) b = a + inputs[1] return [a, b] input_a = input_layer_lib.Input(shape=(3,)) input_b = input_layer_lib.Input(shape=(4,)) output_a, output_b = MultiInputOutputLayer()([input_a, input_b]) model = training_lib.Model([input_a, input_b], [output_a, output_b]) output_a_val, output_b_val = model.predict( [np.random.random((2, 3)), np.random.random((2, 4))]) self.assertEqual(output_a_val.shape, (2, 4)) self.assertEqual(output_b_val.shape, (2, 4)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testTrainingArgument(self): class LayerWithTrainingArg(layers.Layer): def build(self, input_shape): self.w = tf.ones(shape=(3, 4)) def call(self, inputs, training): return backend.dot(inputs, self.w) inputs = input_layer_lib.Input(shape=(3,)) outputs = LayerWithTrainingArg()(inputs, training=False) model = training_lib.Model(inputs, outputs) self._testShapeInference(model, (2, 3), (2, 4)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoneInShape(self): class Model(training_lib.Model): def __init__(self): super(Model, self).__init__() self.conv1 = layers.Conv2D(8, 3) self.pool = layers.GlobalAveragePooling2D() self.fc = layers.Dense(3) def call(self, x): x = self.conv1(x) x = self.pool(x) x = self.fc(x) return x model = Model() model.build(tf.TensorShape((None, None, None, 1))) self.assertTrue(model.built, 'Model should be built') self.assertTrue(model.weights, 'Model should have its weights created as it ' 'has been built') sample_input = tf.ones((1, 10, 10, 1)) output = model(sample_input) self.assertEqual(output.shape, (1, 3)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoneInShapeWithCompoundModel(self): class BasicBlock(training_lib.Model): def __init__(self): super(BasicBlock, self).__init__() self.conv1 = layers.Conv2D(8, 3) self.pool = layers.GlobalAveragePooling2D() self.dense = layers.Dense(3) def call(self, x): x = self.conv1(x) x = self.pool(x) x = self.dense(x) return x class CompoundModel(training_lib.Model): def __init__(self): super(CompoundModel, self).__init__() self.block = BasicBlock() def call(self, x): x = self.block(x) # pylint: disable=not-callable return x model = CompoundModel() model.build(tf.TensorShape((None, None, None, 1))) self.assertTrue(model.built, 'Model should be built') self.assertTrue(model.weights, 'Model should have its weights created as it ' 'has been built') sample_input = tf.ones((1, 10, 10, 1)) output = model(sample_input) # pylint: disable=not-callable self.assertEqual(output.shape, (1, 3)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def testNoneInShapeWithFunctionalAPI(self): class BasicBlock(training_lib.Model): # Inheriting from layers.Layer since we are calling this layer # inside a model created using functional API. def __init__(self): super(BasicBlock, self).__init__() self.conv1 = layers.Conv2D(8, 3) def call(self, x): x = self.conv1(x) return x input_layer = layers.Input(shape=(None, None, 1)) x = BasicBlock()(input_layer) x = layers.GlobalAveragePooling2D()(x) output_layer = layers.Dense(3)(x) model = training_lib.Model(inputs=input_layer, outputs=output_layer) model.build(tf.TensorShape((None, None, None, 1))) self.assertTrue(model.built, 'Model should be built') self.assertTrue(model.weights, 'Model should have its weights created as it ' 'has been built') sample_input = tf.ones((1, 10, 10, 1)) output = model(sample_input) self.assertEqual(output.shape, (1, 3)) @combinations.generate(combinations.keras_mode_combinations()) def test_sequential_as_downstream_of_masking_layer(self): inputs = layers.Input(shape=(3, 4)) x = layers.Masking(mask_value=0., input_shape=(3, 4))(inputs) s = sequential.Sequential() s.add(layers.Dense(5, input_shape=(4,))) x = layers.wrappers.TimeDistributed(s)(x) model = training_lib.Model(inputs=inputs, outputs=x) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly()) model_input = np.random.randint( low=1, high=5, size=(10, 3, 4)).astype('float32') for i in range(4): model_input[i, i:, :] = 0. model.fit(model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6) if not tf.executing_eagerly(): # Note: this doesn't work in eager due to DeferredTensor/ops compatibility # issue. mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)] mask_outputs += [model.layers[2].compute_mask( model.layers[2].input, mask_outputs[-1])] func = backend.function([model.input], mask_outputs) mask_outputs_val = func([model_input]) self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1)) self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_external_keras_serialization_compat_input_layers(self): inputs = input_layer_lib.Input(shape=(10,)) outputs = layers.Dense(1)(inputs) model = training_lib.Model(inputs, outputs) config = model.get_config() # Checks that single inputs and outputs are still saved as 1-element lists. # Saving as 1-element lists or not is equivalent in TF Keras, but only the # 1-element list format is supported in TF.js and keras-team/Keras. self.assertLen(config['input_layers'], 1) self.assertLen(config['output_layers'], 1) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_external_keras_serialization_compat_inbound_nodes(self): # Check single Tensor input. inputs = input_layer_lib.Input(shape=(10,), name='in') outputs = layers.Dense(1)(inputs) model = training_lib.Model(inputs, outputs) config = model.get_config() self.assertEqual(config['layers'][1]['inbound_nodes'], [[['in', 0, 0, {}]]]) # Check multiple Tensor input. inputs1 = input_layer_lib.Input(shape=(10,), name='in1') inputs2 = input_layer_lib.Input(shape=(10,), name='in2') outputs = layers.Add()([inputs1, inputs2]) model = training_lib.Model([inputs1, inputs2], outputs) config = model.get_config() self.assertEqual(config['layers'][2]['inbound_nodes'], [[['in1', 0, 0, {}], ['in2', 0, 0, {}]]]) @combinations.generate(combinations.combine(mode=['eager'])) def test_dict_inputs_tensors(self): # Note that this test is running with v2 eager only, since the v1 # will behave differently wrt to dict input for training. inputs = { 'sentence2': input_layer_lib.Input( shape=(), name='a', dtype=tf.string), 'sentence1': input_layer_lib.Input( shape=(), name='b', dtype=tf.string), } strlen = layers.Lambda(tf.strings.length) diff = layers.Subtract()( [strlen(inputs['sentence1']), strlen(inputs['sentence2'])]) diff = tf.cast(diff, tf.float32) model = training_lib.Model(inputs, diff) extra_keys = { 'sentence1': tf.constant(['brown fox', 'lazy dog']), 'sentence2': tf.constant(['owl', 'cheeky cat']), 'label': tf.constant([0, 1]), } with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') model(extra_keys) self.assertIn('ignored by the model', str(w[-1].message)) model.compile('sgd', 'mse') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') model.fit(extra_keys, y=tf.constant([0, 1]), steps_per_epoch=1) self.assertIn('ignored by the model', str(w[-1].message)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') model.evaluate(extra_keys, tf.constant([0, 1])) self.assertIn('ignored by the model', str(w[-1].message)) # Make sure the model inputs are sorted with the dict keys. self.assertEqual(model.inputs[0]._keras_history.layer.name, 'b') self.assertEqual(model.inputs[1]._keras_history.layer.name, 'a') class GraphUtilsTest(tf.test.TestCase): def testGetReachableFromInputs(self): with tf.Graph().as_default(), self.cached_session(): pl_1 = tf.compat.v1.placeholder(shape=None, dtype='float32') pl_2 = tf.compat.v1.placeholder(shape=None, dtype='float32') pl_3 = tf.compat.v1.placeholder(shape=None, dtype='float32') x_1 = pl_1 + pl_2 x_2 = pl_2 * 2 x_3 = pl_3 + 1 x_4 = x_1 + x_2 x_5 = x_3 * pl_1 self.assertEqual( tf_utils.get_reachable_from_inputs([pl_1]), {pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op}) self.assertEqual( tf_utils.get_reachable_from_inputs([pl_1, pl_2]), {pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op}) self.assertEqual( tf_utils.get_reachable_from_inputs([pl_3]), {pl_3, x_3, x_5, x_3.op, x_5.op}) self.assertEqual( tf_utils.get_reachable_from_inputs([x_3]), {x_3, x_5, x_5.op}) class NestedNetworkTest(keras_parameterized.TestCase): @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_nested_inputs_network(self): inputs = { 'x1': input_layer_lib.Input(shape=(1,)), 'x2': input_layer_lib.Input(shape=(1,)) } outputs = layers.Add()([inputs['x1'], inputs['x2']]) network = functional.Functional(inputs, outputs) network = functional.Functional.from_config(network.get_config()) result_tensor = network({ 'x1': tf.ones((1, 1), 'float32'), 'x2': tf.ones((1, 1), 'float32') }) result = self.evaluate(result_tensor) self.assertAllEqual(result, [[2.]]) # TODO(b/122726584): Investigate why concrete batch is flaky in some builds. output_shape = network.compute_output_shape({ 'x1': (None, 1), 'x2': (None, 1) }) self.assertListEqual(output_shape.as_list(), [None, 1]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_nested_outputs_network(self): inputs = input_layer_lib.Input(shape=(1,)) outputs = { 'x+x': layers.Add()([inputs, inputs]), 'x*x': layers.Multiply()([inputs, inputs]) } network = functional.Functional(inputs, outputs) network = functional.Functional.from_config(network.get_config()) result_tensor = network(tf.ones((1, 1), 'float32')) result = self.evaluate(result_tensor) self.assertAllEqual(result['x+x'], [[2.]]) self.assertAllEqual(result['x*x'], [[1.]]) output_shape = network.compute_output_shape((None, 1)) self.assertListEqual(output_shape['x+x'].as_list(), [None, 1]) self.assertListEqual(output_shape['x*x'].as_list(), [None, 1]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_nested_network_inside_network(self): inner_inputs = { 'x1': input_layer_lib.Input(shape=(1,)), 'x2': input_layer_lib.Input(shape=(1,)) } inner_outputs = { 'x1+x2': layers.Add()([inner_inputs['x1'], inner_inputs['x2']]), 'x1*x2': layers.Multiply()([inner_inputs['x1'], inner_inputs['x2']]) } inner_network = functional.Functional( inner_inputs, inner_outputs) inputs = [ input_layer_lib.Input(shape=(1,)), input_layer_lib.Input(shape=(1,)) ] middle = inner_network({'x1': inputs[0], 'x2': inputs[1]}) outputs = layers.Add()([middle['x1+x2'], middle['x1*x2']]) network = functional.Functional(inputs, outputs) network = functional.Functional.from_config(network.get_config()) # Computes: `(x1+x2) + (x1*x2)` result_tensor = network( [tf.ones((1, 1), 'float32'), tf.ones((1, 1), 'float32')]) result = self.evaluate(result_tensor) self.assertAllEqual(result, [[3.]]) output_shape = network.compute_output_shape([(None, 1), (None, 1)]) self.assertListEqual(output_shape.as_list(), [None, 1]) @combinations.generate(combinations.combine(mode=['graph'])) def test_updates_with_direct_call(self): inputs = input_layer_lib.Input(shape=(10,)) x = layers.BatchNormalization()(inputs) x = layers.Dense(10)(x) model = training_lib.Model(inputs, x) ph = backend.placeholder(shape=(10, 10)) model(ph) self.assertLen(model.updates, 4) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_dict_mapping_input(self): class ReturnFirst(layers.Layer): def call(self, inputs): b, _ = inputs return b # Checks that inputs are put in same order as the # Model was constructed with. b = input_layer_lib.Input(shape=(10,), name='b') a = input_layer_lib.Input(shape=(10,), name='a') outputs = ReturnFirst()([b, a]) b_val = tf.ones((10, 10)) a_val = tf.zeros((10, 10)) model = training_lib.Model([b, a], outputs) res = model({'a': a_val, 'b': b_val}) self.assertAllClose(self.evaluate(res), self.evaluate(b_val)) reversed_model = training_lib.Model([a, b], outputs) res = reversed_model({'a': a_val, 'b': b_val}) self.assertAllClose(self.evaluate(res), self.evaluate(b_val)) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_dict_mapping_single_input(self): b = input_layer_lib.Input(shape=(1,), name='b') outputs = b * 2 model = training_lib.Model(b, outputs) b_val = tf.ones((1, 1)) extra_val = tf.ones((1, 10)) inputs = {'a': extra_val, 'b': b_val} res = model(inputs) # Check that 'b' was used and 'a' was ignored. self.assertEqual(res.shape.as_list(), [1, 1]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) def test_nested_dict_mapping(self): a = input_layer_lib.Input(shape=(1,), dtype='int32', name='a') b = input_layer_lib.Input(shape=(1,), dtype='int32', name='b') c = input_layer_lib.Input(shape=(1,), dtype='int32', name='c') d = input_layer_lib.Input(shape=(1,), dtype='int32', name='d') inputs = {'a': (a, b), 'c': (c, d)} outputs = 1000 * a + 100 * b + 10 * c + d model = training_lib.Model(inputs, outputs) a_val = tf.ones((1, 1), dtype='int32') b_val = 2 * tf.ones((1, 1), dtype='int32') c_val = 3 * tf.ones((1, 1), dtype='int32') d_val = 4 * tf.ones((1, 1), dtype='int32') inputs_val = {'a': (a_val, b_val), 'c': (c_val, d_val)} res = model(inputs_val) # Check that inputs were flattened in the correct order. self.assertFalse(model._enable_dict_to_input_mapping) self.assertEqual(self.evaluate(res), [1234]) @combinations.generate(combinations.keras_mode_combinations()) class AddLossTest(keras_parameterized.TestCase): def test_add_loss_outside_call_only_loss(self): inputs = input_layer_lib.Input((10,)) mid = layers.Dense(10)(inputs) outputs = layers.Dense(1)(mid) model = training_lib.Model(inputs, outputs) model.add_loss(tf.reduce_mean(outputs)) self.assertLen(model.losses, 1) initial_weights = model.get_weights() x = np.ones((10, 10)) model.compile( 'sgd', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, batch_size=2, epochs=1) model2 = model.from_config(model.get_config()) model2.compile( 'sgd', run_eagerly=testing_utils.should_run_eagerly()) model2.set_weights(initial_weights) model2.fit(x, batch_size=2, epochs=1) # The TFOpLayer and the AddLoss layer are serialized. self.assertLen(model2.layers, 5) self.assertAllClose(model.get_weights(), model2.get_weights()) def test_add_loss_outside_call_multiple_losses(self): inputs = input_layer_lib.Input((10,)) x1 = layers.Dense(10)(inputs) x2 = layers.Dense(10)(x1) outputs = layers.Dense(1)(x2) model = training_lib.Model(inputs, outputs) model.add_loss(tf.reduce_sum(x1 * x2)) model.add_loss(tf.reduce_mean(outputs)) self.assertLen(model.losses, 2) initial_weights = model.get_weights() x, y = np.ones((10, 10)), np.ones((10, 1)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(x, y, batch_size=2, epochs=1) model2 = model.from_config(model.get_config()) model2.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly()) model2.set_weights(initial_weights) model2.fit(x, y, batch_size=2, epochs=1) self.assertAllClose(model.get_weights(), model2.get_weights()) def test_add_loss_crossentropy_backtracking(self): inputs = input_layer_lib.Input((2,)) labels = input_layer_lib.Input((1,)) outputs = layers.Dense(1, activation='sigmoid')(inputs) model = functional.Functional([inputs, labels], outputs) model.add_loss(losses.binary_crossentropy(labels, outputs)) model.compile('adam') x = np.random.random((2, 2)) y = np.random.random((2, 1)) model.fit([x, y]) inputs = input_layer_lib.Input((2,)) labels = input_layer_lib.Input((2,)) outputs = layers.Dense(2, activation='softmax')(inputs) model = functional.Functional([inputs, labels], outputs) model.add_loss(losses.categorical_crossentropy(labels, outputs)) model.compile('adam') x = np.random.random((2, 2)) y = np.random.random((2, 2)) model.fit([x, y]) inputs = input_layer_lib.Input((2,)) labels = input_layer_lib.Input((1,), dtype='int32') outputs = layers.Dense(2, activation='softmax')(inputs) model = functional.Functional([inputs, labels], outputs) model.add_loss(losses.sparse_categorical_crossentropy(labels, outputs)) model.compile('adam') x = np.random.random((2, 2)) y = np.random.randint(0, 2, size=(2, 1)) model.fit([x, y]) @combinations.generate(combinations.keras_mode_combinations()) class WeightAccessTest(keras_parameterized.TestCase): def test_functional_model(self): inputs = input_layer_lib.Input((10,)) x1 = layers.Dense(10)(inputs) x2 = layers.Dense(10)(x1) outputs = layers.Dense(1)(x2) model = training_lib.Model(inputs, outputs) self.assertEqual(len(model.weights), 6) def test_sequential_model_with_input_shape(self): x1 = layers.Dense(10, input_shape=(10,)) x2 = layers.Dense(10) x3 = layers.Dense(1) model = sequential.Sequential([x1, x2, x3]) self.assertEqual(len(model.weights), 6) def test_sequential_model_without_input_shape(self): x1 = layers.Dense(10) x2 = layers.Dense(10) x3 = layers.Dense(1) model = sequential.Sequential([x1, x2, x3]) with self.assertRaisesRegex( ValueError, 'Weights for model .* have not yet been created'): _ = model.weights def test_subclass_model_with_build_method(self): class SubclassModel(models.Model): def build(self, input_shape): self.w = self.add_weight(shape=input_shape[-1], initializer='ones') def call(self, inputs): return inputs * self.w model = SubclassModel() with self.assertRaisesRegex( ValueError, 'Weights for model .* have not yet been created'): _ = model.weights model(input_layer_lib.Input((10,))) self.assertEqual(len(model.weights), 1) def test_subclass_model_without_build_method(self): class SubclassModel(models.Model): def __init__(self): super(SubclassModel, self).__init__() self.w = self.add_weight(shape=(), initializer='ones') def call(self, inputs): return inputs * self.w model = SubclassModel() self.assertEqual(len(model.weights), 1) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class DTypeTest(keras_parameterized.TestCase): @testing_utils.enable_v2_dtype_behavior def test_graph_network_dtype(self): inputs = input_layer_lib.Input((10,)) outputs = layers.Dense(10)(inputs) network = functional.Functional(inputs, outputs) self.assertEqual(network.dtype, 'float32') @testing_utils.enable_v2_dtype_behavior def test_subclassed_network_dtype(self): class IdentityNetwork(training_lib.Model): def call(self, inputs): return inputs network = IdentityNetwork() self.assertEqual(network.dtype, 'float32') self.assertEqual(network(tf.constant(1, 'float64')).dtype, 'float32') network = IdentityNetwork(dtype='float16') self.assertEqual(network.dtype, 'float16') self.assertEqual(network(tf.constant(1, 'float64')).dtype, 'float16') network = IdentityNetwork(autocast=False) self.assertEqual(network.dtype, 'float32') self.assertEqual(network(tf.constant(1, 'float64')).dtype, 'float64') class AttrTrackingLayer(base_layer.Layer): """Count how many times `dynamic` and `stateful` are called. These counts are used to test that the attribute cache behaves as expected. """ def __init__(self, *args, **kwargs): self.stateful_count = 0 self.dynamic_count = 0 super(AttrTrackingLayer, self).__init__(*args, **kwargs) @base_layer.Layer.stateful.getter def stateful(self): self.stateful_count += 1 return super(AttrTrackingLayer, self).stateful @property def dynamic(self): self.dynamic_count += 1 return super(AttrTrackingLayer, self).dynamic @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class CacheCorrectnessTest(keras_parameterized.TestCase): def layer_and_network_test(self): # Top level layer network = functional.Functional() layer_0 = AttrTrackingLayer() sub_network = functional.Functional() layer_1 = AttrTrackingLayer(dynamic=True) layer_2 = AttrTrackingLayer() sub_network.sub_layers = [layer_1, layer_2] network.sub_layer = layer_0 for _ in range(2): self.assertEqual(network.dynamic, False) self.assertEqual(network.stateful, False) # The second pass should be a cache hit. self.assertEqual(layer_0.dynamic_count, 1) self.assertEqual(layer_0.stateful_count, 1) # Mutations of the sub-layer should force recalculation of the network's # stateful attribute. (mutations bubble up.) layer_0.stateful = True self.assertEqual(network.stateful, True) self.assertEqual(layer_0.stateful_count, 2) layer_0.stateful = False self.assertEqual(network.stateful, False) self.assertEqual(layer_0.stateful_count, 3) # But changing stateful should not affect dynamic. self.assertEqual(network.dynamic, False) self.assertEqual(layer_0.dynamic_count, 1) network.sub_network = sub_network # Adding to the topology should invalidate the cache and reflect in the top # level network. self.assertEqual(network.dynamic, True) self.assertEqual(layer_0.dynamic_count, 2) self.assertEqual(layer_1.dynamic_count, 1) # Still dynamic, but we need to recompute. sub_network.sub_layers.pop() self.assertEqual(network.dynamic, True) self.assertEqual(layer_0.dynamic_count, 3) self.assertEqual(layer_1.dynamic_count, 2) # Now that we've removed the dynamic layer deep in the layer hierarchy, we # need to make sure that that bubbles up through all the levels. sub_network.sub_layers.pop() self.assertEqual(network.dynamic, False) self.assertEqual(layer_0.dynamic_count, 4) self.assertEqual(layer_1.dynamic_count, 2) # Now check with a tracked dict. sub_network.sub_layers = { "layer_1": layer_1, "layer_2": layer_2, } self.assertEqual(network.dynamic, True) self.assertEqual(layer_0.dynamic_count, 5) self.assertEqual(layer_1.dynamic_count, 3) # In-place assignment should still invalidate the cache. sub_network.sub_layers["layer_1"] = layer_1 self.assertEqual(network.dynamic, True) self.assertEqual(layer_0.dynamic_count, 6) self.assertEqual(layer_1.dynamic_count, 4) sub_network.sub_layers["layer_1"] = None for _ in range(2): self.assertEqual(network.dynamic, False) self.assertEqual(layer_0.dynamic_count, 7) self.assertEqual(layer_1.dynamic_count, 4) layer_3 = AttrTrackingLayer() layer_3.stateful = True sub_network.sub_layers = None self.assertEqual(network.dynamic, False) self.assertEqual(network.stateful, False) # Test duplicate layers. sub_network.sub_layers = [layer_1, layer_1, layer_1, layer_3] self.assertEqual(network.dynamic, True) self.assertEqual(network.stateful, True) for _ in range(3): sub_network.sub_layers.pop() self.assertEqual(network.dynamic, True) self.assertEqual(network.stateful, False) sub_network.sub_layers.pop() self.assertEqual(network.dynamic, False) self.assertEqual(network.stateful, False) def test_compute_output_shape_cache(self): # See https://github.com/tensorflow/tensorflow/issues/32029. x = input_layer_lib.Input(shape=(None, 32)) dense = layers.Dense(2) y = dense(x) network = functional.Functional(x, y, name='dense_network') for i in range(999, 1024): self.assertEqual(network.compute_output_shape((1, i, 32)), (1, i, 2)) def test_2d_inputs_squeezed_to_1d(self): input_1d = input_layer_lib.Input(shape=()) outputs = input_1d * 2. net = functional.Functional(input_1d, outputs) x = np.ones((10, 1)) y = net(x) self.assertEqual(y.shape.rank, 1) def test_1d_inputs_expanded_to_2d(self): input_1d = input_layer_lib.Input(shape=(1,)) outputs = input_1d * 2. net = functional.Functional(input_1d, outputs) x = np.ones((10,)) y = net(x) self.assertEqual(y.shape.rank, 2) def test_training_passed_during_construction(self): def _call(inputs, training): if training is None: return inputs * -1.0 elif training: return inputs else: return inputs * 0.0 class MyLayer(base_layer.Layer): def call(self, inputs, training=True): return _call(inputs, training) my_layer = MyLayer() x = np.ones((1, 10)) # Hard-coded `true` value passed during construction is respected. inputs = input_layer_lib.Input(10) outputs = my_layer(inputs, training=True) network = functional.Functional(inputs, outputs) self.assertAllEqual(network(x, training=True), _call(x, True)) self.assertAllEqual(network(x, training=False), _call(x, True)) self.assertAllEqual(network(x), _call(x, True)) # Hard-coded `false` value passed during construction is respected. inputs = input_layer_lib.Input(10) outputs = my_layer(inputs, training=False) network = functional.Functional(inputs, outputs) self.assertAllEqual(network(x, training=True), _call(x, False)) self.assertAllEqual(network(x, training=False), _call(x, False)) self.assertAllEqual(network(x), _call(x, False)) if tf.executing_eagerly(): # In v2, construction still works when no `training` is specified # When no value passed during construction, it uses the local default. inputs = input_layer_lib.Input(10) outputs = my_layer(inputs) network = functional.Functional(inputs, outputs) self.assertAllEqual(network(x, training=True), _call(x, True)) self.assertAllEqual(network(x, training=False), _call(x, False)) self.assertAllEqual(network(x), _call(x, True)) # Use local default # `None` value passed positionally during construction is ignored at runtime inputs = input_layer_lib.Input(10) outputs = my_layer(inputs, None) network = functional.Functional(inputs, outputs) self.assertAllEqual(network(x, training=True), _call(x, True)) self.assertAllEqual(network(x, training=False), _call(x, False)) if tf.executing_eagerly(): self.assertAllEqual(network(x), _call(x, True)) # Use local default else: # in v1 training would have defaulted to using the `None` inside the layer # if training is not passed at runtime self.assertAllEqual(network(x), _call(x, None)) # `None` value passed as kwarg during construction is ignored at runtime. inputs = input_layer_lib.Input(10) outputs = my_layer(inputs, training=None) network = functional.Functional(inputs, outputs) self.assertAllEqual(network(x, training=True), _call(x, True)) self.assertAllEqual(network(x, training=False), _call(x, False)) if tf.executing_eagerly(): self.assertAllEqual(network(x), _call(x, True)) # Use local default else: # in v1 training would have defaulted to using the `None` inside the layer # if training is not passed at runtime self.assertAllEqual(network(x), _call(x, None)) class InputsOutputsErrorTest(keras_parameterized.TestCase): @testing_utils.enable_v2_dtype_behavior def test_input_error(self): inputs = input_layer_lib.Input((10,)) outputs = layers.Dense(10)(inputs) with self.assertRaisesRegex( TypeError, "('Keyword argument not understood:', 'input')"): models.Model(input=inputs, outputs=outputs) @testing_utils.enable_v2_dtype_behavior def test_output_error(self): inputs = input_layer_lib.Input((10,)) outputs = layers.Dense(10)(inputs) with self.assertRaisesRegex( TypeError, "('Keyword argument not understood:', 'output')"): models.Model(inputs=inputs, output=outputs) def test_input_spec(self): if not tf.executing_eagerly(): return inputs = input_layer_lib.Input((10,)) outputs = layers.Dense(10)(inputs) model = models.Model(inputs, outputs) with self.assertRaisesRegex( ValueError, r'.*expected shape=.*'): model(np.zeros((3, 11))) def test_input_spec_list_of_inputs(self): if not tf.executing_eagerly(): return input_1 = input_layer_lib.Input((10,), name='1') input_2 = input_layer_lib.Input((5,), name='2') x = layers.Concatenate()([input_1, input_2]) outputs = layers.Dense(10)(x) model = models.Model([input_1, input_2], outputs) with self.assertRaisesRegex( ValueError, r'.*expects 2 input.*'): model(np.zeros((3, 10))) with self.assertRaisesRegex( ValueError, r'.*expects 2 input.*'): model([np.zeros((3, 10)), np.zeros((3, 5)), np.zeros((3, 10))]) with self.assertRaisesRegex( ValueError, r'.*expected shape=.*'): model([np.zeros((3, 10)), np.zeros((3, 6))]) # Test passing data via dict keyed by input name with self.assertRaisesRegex( ValueError, r'Missing data for input.*'): model({'1': np.zeros((3, 10))}) with self.assertRaisesRegex( ValueError, r'.*expected shape=.*'): model({'1': np.zeros((3, 10)), '2': np.zeros((3, 6))}) def test_input_spec_dict(self): if not tf.executing_eagerly(): return input_1 = input_layer_lib.Input((10,)) input_2 = input_layer_lib.Input((5,)) x = layers.Concatenate()([input_1, input_2]) outputs = layers.Dense(10)(x) model = models.Model({'1': input_1, '2': input_2}, outputs) with self.assertRaisesRegex( ValueError, r'Missing data for input.*'): model({'1': np.zeros((3, 10))}) with self.assertRaisesRegex( ValueError, r'.*expected shape=.*'): model({'1': np.zeros((3, 10)), '2': np.zeros((3, 6))}) class FunctionalSubclassModel(training_lib.Model): def __init__(self, *args, **kwargs): self.foo = {'foo': 'bar'} # Make sure users can assign dict attributes my_input = input_layer_lib.Input(shape=(16,)) dense = layers.Dense(32, activation='relu') output = dense(my_input) outputs = {'output': output} super().__init__(inputs=[my_input], outputs=outputs, *args, **kwargs) class MixinClass: def __init__(self, foo, **kwargs): self._foo = foo super().__init__(**kwargs) def get_foo(self): return self._foo class SubclassedModel(training_lib.Model): def __init__(self, bar, **kwargs): self._bar = bar super().__init__(**kwargs) def get_bar(self): return self._bar class MultipleInheritanceModelTest(keras_parameterized.TestCase): def testFunctionalSubclass(self): m = FunctionalSubclassModel() # Some smoke test for the weights and output shape of the model self.assertLen(m.weights, 2) self.assertEqual(m.outputs[0].shape.as_list(), [None, 32]) def testFunctionalSubclassPreMixin(self): class MixedFunctionalSubclassModel(MixinClass, FunctionalSubclassModel): pass m = MixedFunctionalSubclassModel(foo='123') self.assertTrue(m._is_graph_network) self.assertLen(m.weights, 2) self.assertEqual(m.outputs[0].shape.as_list(), [None, 32]) self.assertEqual(m.get_foo(), '123') def testFunctionalSubclassPostMixin(self): # Make sure the the mixin class is also init correct when the order changed. class MixedFunctionalSubclassModel(FunctionalSubclassModel, MixinClass): pass m = MixedFunctionalSubclassModel(foo='123') self.assertTrue(m._is_graph_network) self.assertLen(m.weights, 2) self.assertEqual(m.outputs[0].shape.as_list(), [None, 32]) self.assertEqual(m.get_foo(), '123') def testSubclassModelPreMixin(self): class MixedSubclassModel(MixinClass, SubclassedModel): pass m = MixedSubclassModel(foo='123', bar='456') self.assertFalse(m._is_graph_network) self.assertEqual(m.get_foo(), '123') self.assertEqual(m.get_bar(), '456') if __name__ == '__main__': tf.test.main()
89,643
34.771748
83
py
keras
keras-master/keras/engine/training_distributed_v1.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Part of the Keras training engine related to distributed training.""" import tensorflow.compat.v2 as tf # pylint: disable=protected-access import numpy as np from tensorflow.python.distribute import input_lib from keras import backend from keras import callbacks as cbks from keras.distribute import distribute_coordinator_utils as dc from keras.distribute import distributed_training_utils_v1 as dist_utils from keras.engine import partial_batch_padding_handler as padding_util from keras.engine import training_arrays_v1 from keras.engine import training_utils_v1 from keras.utils.generic_utils import Progbar from keras.utils.mode_keys import ModeKeys from tensorflow.python.platform import tf_logging as logging def _per_replica_execution_function(model, mode): exec_func = model._make_execution_function(mode) return (exec_func.inputs, exec_func.outputs, exec_func.updates_op, exec_func.session_kwargs) def _build_model(strategy, model, mode, inputs, targets=None): if model._compile_distribution: dist_utils.clone_model_on_replicas( model, strategy, mode, inputs=inputs, targets=targets) else: dist_utils._build_distributed_network(model, strategy, mode, inputs, targets) def _make_train_step_fn(model, mode, strategy, output_labels): """Create step fn. Args: model: a Keras Model instance. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. strategy: a `tf.distribute.Strategy` instance. output_labels: the output labels for the step function. Returns: A step function to run by `tf.distribute.Strategy`. """ def _step_fn(ctx, inputs): """A step fn that returns update ops.""" if isinstance(inputs, (tuple, list)) and len(inputs) == 2: inputs, targets = inputs else: targets = None # When input feature is a dictionary of tensors, dictionary is flattended # to an array and passed as a model input. This results in input mismatch # when model input layer names are not sorted in alphabetical order as # `nest.flatten()`sorts dictionary elements by keys. As so, transform input # tensors into an array and order it along `model._feed_input_names`. if isinstance(inputs, dict): inputs = [inputs[input_name] for input_name in model._feed_input_names] _build_model(strategy, model, mode, inputs, targets) (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = strategy.extended.call_for_each_replica( _per_replica_execution_function, args=(dist_utils.get_distributed_model(model, mode), mode)) (all_inputs, all_outputs, all_updates, all_session_args) = dist_utils.unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) combined_fn = backend.function( all_inputs, all_outputs, updates=all_updates, name='distributed_' + str(mode) + '_function', **all_session_args) for label, output in zip(output_labels, combined_fn.outputs): if label == 'loss': reduce_op = tf.distribute.ReduceOp.SUM else: # We reduce all other metrics using mean for now. This is temporary # workaround until new metrics are in place. reduce_op = tf.distribute.ReduceOp.MEAN ctx.set_last_step_output(label, output, reduce_op) # TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn: # feed_dict, session kwargs, run options, run_metadata for now. These should # be handled appropriately return combined_fn.updates_op return _step_fn def experimental_tpu_fit_loop(model, dataset, epochs=100, verbose=1, callbacks=None, initial_epoch=0, steps_per_epoch=None, val_dataset=None, validation_steps=None, validation_freq=1): """Fit loop for training with TPU tf.distribute.Strategy. Args: model: Keras Model instance. dataset: Dataset that returns inputs and targets epochs: Number of times to iterate over the data verbose: Integer, Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. val_dataset: Dataset for validation data. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. Returns: Returns `None`. Raises: ValueError: in case of invalid arguments. """ mode = ModeKeys.TRAIN current_strategy = model._distribution_strategy iteration_value = min(steps_per_epoch, current_strategy.extended.steps_per_run) steps_per_run = backend.variable( value=iteration_value, dtype='int32', name='steps_per_run') # TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops. iterator = dist_utils.get_iterator(dataset, current_strategy) scope = dist_utils.distributed_scope( strategy=current_strategy, learning_phase=1) scope.__enter__() out_labels = model.metrics_names or [] step_fn = _make_train_step_fn(model, ModeKeys.TRAIN, current_strategy, out_labels) # Add initial dummy values for loss and other metric tensors. initial_loop_values = {} initial_loop_values['loss'] = tf.constant(1e7) for m in model._get_training_eval_metrics(): tensor = m.result() initial_loop_values[m.name] = tf.zeros(tensor.shape, tensor.dtype) ctx = current_strategy.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=steps_per_run, initial_loop_values=initial_loop_values) train_op = ctx.run_op output_tensors = ctx.last_step_outputs do_validation = bool(validation_steps) if model._compile_distribution: dist_utils._copy_weights_to_distributed_model(model, mode) callbacks = cbks.configure_callbacks( callbacks, model, do_validation=do_validation, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose, count_mode='steps', mode=mode) # Calculate the steps each time on the device. steps_to_run = ([current_strategy.extended.steps_per_run] * (steps_per_epoch // current_strategy.extended.steps_per_run)) if steps_per_epoch % current_strategy.extended.steps_per_run: steps_to_run.append( steps_per_epoch % current_strategy.extended.steps_per_run) target_steps = len(steps_to_run) callbacks._call_begin_hook(mode) initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode) for epoch in range(initial_epoch, epochs): dist_utils._reset_metrics(model) callbacks.on_epoch_begin(epoch) epoch_logs = {} step_index = 0 prev_step_count = None current_step = 0 while current_step < target_steps: step_count = steps_to_run[current_step] batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count} callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs) if prev_step_count is None or step_count != prev_step_count: backend.get_session().run(steps_per_run.assign(step_count)) prev_step_count = step_count try: _, outputs = backend.batch_get_value([train_op, output_tensors]) except tf.errors.OutOfRangeError: logging.warning('Your dataset iterator ran out of data; ' 'interrupting training. Make sure that your dataset ' 'can generate at least `steps_per_epoch * epochs` ' 'batches (in this case, %d batches).' % steps_per_epoch * epochs) break batch_logs.update(outputs) callbacks._call_batch_hook(mode, 'end', step_index, batch_logs) step_index = step_index + step_count current_step += 1 if callbacks.model.stop_training: break if (do_validation and training_utils_v1.should_run_validation(validation_freq, epoch)): logging.info('Running validation at fit epoch: %s', epoch) if model._compile_distribution: # Since we create a new clone from the original model we need to copy # the weights back to the original model before we can run validation. dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN) val_outs = experimental_tpu_test_loop( # pylint: disable=undefined-variable model, val_dataset, steps=validation_steps, verbose=verbose, callbacks=callbacks) if not isinstance(val_outs, list): val_outs = [val_outs] # Same labels assumed. for label, val_out in zip(out_labels, val_outs): epoch_logs['val_' + label] = val_out callbacks.on_epoch_end(epoch, epoch_logs) if callbacks.model.stop_training: break model._successful_loop_finish = True callbacks._call_end_hook(mode) if model._compile_distribution: # Copy the weights back from the replicated model to the original model. dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN) scope.__exit__(None, None, None) return model.history def experimental_tpu_test_loop(model, dataset, verbose=0, steps=None, callbacks=None): """Test loop for evaluating with TPU tf.distribute.Strategy. Args: model: Keras Model instance. dataset: Dataset for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. callbacks: List of callbacks to be called during training Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the outputs. """ mode = ModeKeys.TEST current_strategy = model._distribution_strategy iterator = dist_utils.get_iterator(dataset, current_strategy) scope = dist_utils.distributed_scope( strategy=current_strategy, learning_phase=0) scope.__enter__() out_labels = model.metrics_names def _test_step_fn(inputs): """A fn that returns output of single test step.""" if isinstance(inputs, (tuple, list)) and len(inputs) == 2: inputs, targets = inputs else: targets = None (tf.distribute.get_replica_context().merge_call( _build_model, args=(model, mode, inputs, targets))) (_, outputs, updates, _) = _per_replica_execution_function( dist_utils.get_distributed_model(model, mode), mode) with tf.control_dependencies([updates]): return [tf.identity(out) for out in outputs] test_input_data = iterator.get_next() per_replica_outputs = current_strategy.run( _test_step_fn, args=(test_input_data,)) output_tensors = {} for label, output in zip(out_labels, per_replica_outputs): if label == 'loss': reduce_op = tf.distribute.ReduceOp.SUM else: # We reduce all other metrics using mean for now. This is temporary # workaround until new metrics are in place. reduce_op = tf.distribute.ReduceOp.MEAN output_tensors[label] = current_strategy.reduce(reduce_op, output, axis=None) test_op = tf.group(list(output_tensors.values())) if verbose >= 1: progbar = Progbar(target=steps) if model._compile_distribution: dist_utils._copy_weights_to_distributed_model(model, mode) dist_utils._reset_metrics(model) callbacks = cbks.configure_callbacks( callbacks, model, do_validation=False, epochs=1, steps_per_epoch=steps, verbose=verbose, count_mode='steps', mode=ModeKeys.TEST) callbacks._call_begin_hook(mode) outs = [0.] * len(model.metrics_names) if steps is not None: target_steps = steps else: raise ValueError('Number of steps could not be inferred from the data, ' 'please pass the steps argument.') current_step = 0 while current_step < target_steps: batch_logs = {'batch': current_step, 'size': 1} callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs) try: _, batch_outs = backend.batch_get_value([test_op, output_tensors]) except tf.errors.OutOfRangeError: warning_msg = ( 'Make sure that your dataset can generate at least ' '`steps` batches (in this case, {} batches).'.format(steps)) logging.warning('Your dataset iterator ran out of data; ' 'interrupting evaluation. ' + warning_msg) target_steps = current_step break for i, label in enumerate(model.metrics_names): if i == 0: # Loss is stateless metrics. outs[i] += batch_outs[label] else: # For all stateful metrics, the aggregation is handled by mirrored vars. outs[i] = batch_outs[label] batch_logs = cbks.make_logs(model, batch_logs, outs, mode) callbacks._call_batch_hook(mode, 'end', current_step, batch_logs) if verbose == 1: progbar.update(current_step + 1) current_step += 1 if verbose >= 1: # Progress bar finishes at the end. progbar.update(target_steps) callbacks._call_end_hook(mode) scope.__exit__(None, None, None) if len(outs) >= 0: outs[0] /= (target_steps) if len(outs) == 1: return outs[0] return outs def experimental_tpu_predict_loop(model, dataset, verbose=0, steps=None, callbacks=None): """Predict loop for predicting with TPU tf.distribute.Strategy. Args: model: Keras Model instance. dataset: Dataset for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. callbacks: List of callbacks to be called during training Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). """ mode = ModeKeys.PREDICT dataset_fully_shaped = dist_utils.is_dataset_shape_fully_defined(dataset) padding_handler = None if not dataset_fully_shaped: # TODO(hongjunchoi): Investigate whether operations from # PartialBatchPaddingHandler are unnecessarily pruned out # during graph optimization. padding_handler = padding_util.PartialBatchPaddingHandler( model._feed_output_shapes) batch_size, _, prefetch_buffer = input_lib._get_dataset_attributes(dataset) padding_handler.padded_batch_size = batch_size padding_handler.padding_mask = dataset.reduce(padding_handler.padding_mask, padding_handler.update_mask) dataset = dataset.map(padding_handler.pad_batch) dataset = dataset.unbatch() # Upon this point, it is guaranteed that the dataset does not # have partial batches. Thus, we set `drop_remainder=True` to # get static shape information about the elements in the dataset. dataset = dataset.batch(batch_size, drop_remainder=True) if prefetch_buffer is not None: dataset = dataset.prefetch(prefetch_buffer) current_strategy = model._distribution_strategy iterator = dist_utils.get_iterator(dataset, current_strategy) scope = dist_utils.distributed_scope( strategy=current_strategy, learning_phase=0) scope.__enter__() def _predict_step_fn(inputs): """A fn that returns output of single prediction step.""" (tf.distribute.get_replica_context().merge_call( _build_model, args=(model, mode, inputs))) (_, outputs, updates, _) = _per_replica_execution_function( dist_utils.get_distributed_model(model, mode), mode) with tf.control_dependencies([updates]): return [tf.identity(out) for out in outputs] # TODO(hongjunchoi): When numpy array is passed as an input to `predict()` # use numpy arrays directly to avoid cumulating unnecessary input pipeline # ops. predict_input_data = iterator.get_next() per_replica_outputs = current_strategy.run( _predict_step_fn, args=(predict_input_data,)) output_tensors = dist_utils.flatten_per_replica_values( current_strategy, per_replica_outputs) if verbose >= 1: progbar = Progbar(target=steps) if model._compile_distribution: dist_utils._copy_weights_to_distributed_model(model, mode) dist_utils._reset_metrics(model) callbacks = cbks.configure_callbacks( callbacks, model, do_validation=False, epochs=1, steps_per_epoch=steps, verbose=verbose, count_mode='steps', mode=mode) callbacks._call_begin_hook(mode) # Since we do not know how many samples we will see, we cannot pre-allocate # the returned Numpy arrays. Instead, we store one array per batch seen # and concatenate them upon returning. num_model_outputs = len(model.output_names) unconcatenated_outs = [[] for _ in range(num_model_outputs)] if steps is not None: target_steps = steps else: raise ValueError('Number of steps could not be inferred from the data, ' 'please pass the steps argument.') current_step = 0 while current_step < target_steps: batch_logs = {'batch': current_step, 'size': 1} callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs) try: predict_ops = tf.group(output_tensors) _, batch_outs = backend.batch_get_value([predict_ops, output_tensors]) except tf.errors.OutOfRangeError: warning_msg = ( 'Make sure that your dataset can generate at least ' '`steps` batches (in this case, {} batches).'.format(steps)) logging.warning('Your dataset iterator ran out of data; ' 'interrupting evaluation. ' + warning_msg) break # TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy. for i in range(num_model_outputs): output_start_index = i * current_strategy.num_replicas_in_sync output_end_index = ( output_start_index + current_strategy.num_replicas_in_sync) single_model_output = batch_outs[output_start_index:output_end_index] unconcatenated_outs[i].extend(single_model_output) batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode) callbacks._call_batch_hook(mode, 'end', current_step, batch_logs) if verbose == 1: progbar.update(current_step + 1) current_step += 1 if verbose >= 1: # Progress bar finishes at the end. progbar.update(current_step) callbacks._call_end_hook(mode) scope.__exit__(None, None, None) if len(unconcatenated_outs) == 1: prediction_result = np.concatenate(unconcatenated_outs[0], axis=0) else: prediction_result = [ np.concatenate(out, axis=0) for out in unconcatenated_outs ] if padding_handler: prediction_result = padding_handler.apply_mask(prediction_result) return prediction_result class DistributionSingleWorkerTrainingLoop(training_utils_v1.TrainingLoop): """Training loop for distribution strategy with single worker.""" def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs): """Fit loop for Distribution Strategies.""" dist_utils.validate_callbacks(input_callbacks=callbacks, optimizer=model.optimizer) dist_utils.validate_inputs(x, y) batch_size, steps_per_epoch = dist_utils.process_batch_and_step_size( model._distribution_strategy, x, batch_size, steps_per_epoch, ModeKeys.TRAIN, validation_split=validation_split) batch_size = model._validate_or_infer_batch_size( batch_size, steps_per_epoch, x) dataset = model._distribution_standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, validation_split=validation_split, shuffle=shuffle, epochs=epochs) if not dist_utils.is_distributing_by_cloning(model): with model._distribution_strategy.scope(): (dataset, _, _) = model._standardize_user_data( dataset, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, validation_split=validation_split, shuffle=shuffle) val_dataset = None if validation_data: val_x, val_y, val_sample_weights = ( training_utils_v1.unpack_validation_data(validation_data)) dist_utils.validate_inputs(val_x, val_y) _, validation_steps = dist_utils.process_batch_and_step_size( model._distribution_strategy, val_x, batch_size, validation_steps, ModeKeys.TEST) val_dataset = model._distribution_standardize_user_data( val_x, val_y, sample_weight=val_sample_weights, class_weight=None, batch_size=batch_size, validation_split=validation_split, shuffle=shuffle, allow_partial_batch=True) elif validation_split: raise ValueError('validation_split argument is not supported with ' 'distribution strategies.') if backend.is_tpu_strategy(model._distribution_strategy): steps_per_epoch = training_utils_v1.infer_steps_for_dataset( model, dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch') if steps_per_epoch is None: raise ValueError('Number of steps could not be inferred from the data, ' 'please pass the steps_per_epoch argument.') if not tf.executing_eagerly(): # Run TPU training in a custom loop in graph mode. return experimental_tpu_fit_loop( model, dataset, epochs=epochs, verbose=verbose, callbacks=callbacks, val_dataset=val_dataset, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq) return training_arrays_v1.fit_loop( model, dataset, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_dataset, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq, steps_name='steps_per_epoch') def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs): """Evaluate loop for Distribution Strategies.""" dist_utils.validate_inputs(x, y) batch_size, steps = dist_utils.process_batch_and_step_size( model._distribution_strategy, x, batch_size, steps, ModeKeys.TEST) batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) dataset = model._distribution_standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size, allow_partial_batch=True) if backend.is_tpu_strategy(model._distribution_strategy): steps = training_utils_v1.infer_steps_for_dataset( model, dataset, steps, steps_name='steps') if steps is None: raise ValueError('Number of steps could not be inferred from the data, ' 'please pass the steps argument.') if not tf.executing_eagerly(): # Run TPU evaluation in a custom loop in graph mode. return experimental_tpu_test_loop( model, dataset, verbose=verbose, steps=steps, callbacks=callbacks) return training_arrays_v1.test_loop( model, inputs=dataset, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks) def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs): """Predict loop for Distribution Strategies.""" dist_utils.validate_inputs(x=x, y=None) batch_size, steps = dist_utils.process_batch_and_step_size( model._distribution_strategy, x, batch_size, steps, ModeKeys.PREDICT) batch_size = model._validate_or_infer_batch_size(batch_size, steps, x) dataset = model._distribution_standardize_user_data( x, batch_size=batch_size, allow_partial_batch=True) if backend.is_tpu_strategy(model._distribution_strategy): steps = training_utils_v1.infer_steps_for_dataset( model, dataset, steps, steps_name='steps') if steps is None: raise ValueError('Number of steps could not be inferred from the data, ' 'please pass the steps argument.') if not tf.executing_eagerly(): return experimental_tpu_predict_loop( model, dataset, verbose=verbose, steps=steps, callbacks=callbacks) return training_arrays_v1.predict_loop( model, dataset, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks) def _train_with_multi_worker(method): """Decorator that handles multi worker training with distribution strategy.""" def wrapper(model, **kwargs): def _worker_fn(_): callbacks = kwargs.pop('callbacks', None) filtered_callbacks = dist_utils.filter_distributed_callbacks( callbacks, model) kwargs['callbacks'] = filtered_callbacks return method(model, **kwargs) return dc.run_distribute_coordinator( _worker_fn, model._distribution_strategy) return wrapper class DistributionMultiWorkerTrainingLoop(training_utils_v1.TrainingLoop): """Training loop for distribution strategy with multiple worker.""" def __init__(self, single_worker_loop): self._single_worker_loop = single_worker_loop def fit(self, *args, **kwargs): return _train_with_multi_worker(self._single_worker_loop.fit)( *args, **kwargs) def evaluate(self, *args, **kwargs): return _train_with_multi_worker(self._single_worker_loop.evaluate)( *args, **kwargs) def predict(self, *args, **kwargs): # Currently predict is still using the single worker implementation. return self._single_worker_loop.predict(*args, **kwargs)
29,172
35.974651
82
py
keras
keras-master/keras/engine/keras_tensor_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """InputSpec tests.""" import tensorflow.compat.v2 as tf from keras import layers from keras.engine import keras_tensor class KerasTensorTest(tf.test.TestCase): def test_repr_and_string(self): kt = keras_tensor.KerasTensor( type_spec=tf.TensorSpec(shape=(1, 2, 3), dtype=tf.float32)) expected_str = ("KerasTensor(type_spec=TensorSpec(shape=(1, 2, 3), " "dtype=tf.float32, name=None))") expected_repr = "<KerasTensor: shape=(1, 2, 3) dtype=float32>" self.assertEqual(expected_str, str(kt)) self.assertEqual(expected_repr, repr(kt)) kt = keras_tensor.KerasTensor( type_spec=tf.TensorSpec(shape=(2,), dtype=tf.int32), inferred_value=[2, 3]) expected_str = ("KerasTensor(type_spec=TensorSpec(shape=(2,), " "dtype=tf.int32, name=None), inferred_value=[2, 3])") expected_repr = ( "<KerasTensor: shape=(2,) dtype=int32 inferred_value=[2, 3]>") self.assertEqual(expected_str, str(kt)) self.assertEqual(expected_repr, repr(kt)) kt = keras_tensor.KerasTensor( type_spec=tf.SparseTensorSpec( shape=(1, 2, 3), dtype=tf.float32)) expected_str = ("KerasTensor(type_spec=SparseTensorSpec(" "TensorShape([1, 2, 3]), tf.float32))") expected_repr = ( "<KerasTensor: type_spec=SparseTensorSpec(" "TensorShape([1, 2, 3]), tf.float32)>") self.assertEqual(expected_str, str(kt)) self.assertEqual(expected_repr, repr(kt)) inp = layers.Input(shape=(3, 5)) kt = layers.Dense(10)(inp) expected_str = ( "KerasTensor(type_spec=TensorSpec(shape=(None, 3, 10), " "dtype=tf.float32, name=None), name='dense/BiasAdd:0', " "description=\"created by layer 'dense'\")") expected_repr = ( "<KerasTensor: shape=(None, 3, 10) dtype=float32 (created " "by layer 'dense')>") self.assertEqual(expected_str, str(kt)) self.assertEqual(expected_repr, repr(kt)) kt = tf.reshape(kt, shape=(3, 5, 2)) expected_str = ( "KerasTensor(type_spec=TensorSpec(shape=(3, 5, 2), dtype=tf.float32, " "name=None), name='tf.reshape/Reshape:0', description=\"created " "by layer 'tf.reshape'\")") expected_repr = ("<KerasTensor: shape=(3, 5, 2) dtype=float32 (created " "by layer 'tf.reshape')>") self.assertEqual(expected_str, str(kt)) self.assertEqual(expected_repr, repr(kt)) kts = tf.unstack(kt) for i in range(3): expected_str = ( "KerasTensor(type_spec=TensorSpec(shape=(5, 2), dtype=tf.float32, " "name=None), name='tf.unstack/unstack:%s', description=\"created " "by layer 'tf.unstack'\")" % (i,)) expected_repr = ("<KerasTensor: shape=(5, 2) dtype=float32 " "(created by layer 'tf.unstack')>") self.assertEqual(expected_str, str(kts[i])) self.assertEqual(expected_repr, repr(kts[i])) if __name__ == "__main__": tf.compat.v1.enable_eager_execution() tf.compat.v1.enable_v2_tensorshape() tf.test.main()
3,758
40.307692
80
py
keras
keras-master/keras/engine/functional_utils.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for keras functional model.""" from keras import backend from keras.engine import input_layer as input_layer_module from keras.engine import keras_tensor from keras.engine import node as node_module import tensorflow.compat.v2 as tf _KERAS_TENSOR_TYPE_CHECK_ERROR_MSG = ( 'Found unexpected instance while processing input tensors for keras ' 'functional model. Expecting KerasTensor which is from tf.keras.Input() ' 'or output from keras layer call(). Got: {}') def is_input_keras_tensor(tensor): """Check if tensor is directly generated from `tf.keras.Input`. This check is useful when constructing the functional model, since we will need to clone Nodes and KerasTensors if the model is building from non input tensor. Args: tensor: A `KerasTensor` as inputs to the functional model. Returns: bool. Whether the tensor is directly generated from `tf.keras.Input`. Raises: ValueError: if the tensor is not a KerasTensor instance. """ if not node_module.is_keras_tensor(tensor): raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(tensor)) return tensor.node.is_input def find_nodes_by_inputs_and_outputs(inputs, outputs): """Fetch all Nodes in the graph defined by "inputs" and "outputs". This method is used to find and then clone Nodes when creating a new sub-model from an existing functional model. Args: inputs: A nested structure of KerasTensor to use as model inputs. outputs: A nested structure of KerasTensor to use as model outputs. Returns: A list of Nodes that are connected to the inputs and outputs. Raises: ValueError: when inputs and outputs are disconnected or in case of unexpected objects in the inputs/outputs. """ # We walk the graph bottom up, starting from output nodes, and keep tracing # the upstream node, until we find all the inputs nodes. We don't use top # down search here since we don't know whether a certain node is in the graph # between inputs and outputs, e.g. a functional graph could have multiple # outputs, and the user could choose a subset of them to build the model. # The bottom up approach will ensure all the nodes we visit are actually # in use. If we reach the top and didn't find the nodes in the `inputs`, # that's an error, since the user didn't specify the correct inputs. start_keras_tensors = tf.nest.flatten(outputs) end_keras_tensors = tf.nest.flatten(inputs) for t in start_keras_tensors + end_keras_tensors: if not node_module.is_keras_tensor(t): raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(t)) end_ids = set([id(kt) for kt in end_keras_tensors]) # Track all the end tensors we found so far, if we didn't reach all the # user-specified keras inputs after we finish the search, then that's an # error since the inputs are disconnected from the outputs. end_ids_found = set() nodes_to_visit = [] nodes_in_graph = [] node_id_visited = set() for t in start_keras_tensors: nodes_to_visit.append(t.node) while nodes_to_visit: node = nodes_to_visit.pop(0) if id(node) in node_id_visited: continue node_id_visited.add(id(node)) nodes_in_graph.append(node) # Any input keras_tensor that produce the current node. for kt in node.keras_inputs: if id(kt) in end_ids: # We found the inputs of the model, stop tracing upstream nodes end_ids_found.add(id(kt)) continue inbound_node = kt.node # In case this is the tf.keras.Input node, we have reached the end of the # tracing of upstream nodes. Any further tracing will just be an # infinite loop. we should raise an error here since we didn't find the # input in the user-specified inputs. if inbound_node.is_input: raise ValueError('Found input tensor cannot be reached given provided ' 'output tensors. Please make sure the tensor {} is ' 'included in the model inputs when building ' 'functional model.'.format(kt)) nodes_to_visit.append(inbound_node) # Do a final check and make sure we have reached all the user-specified inputs if end_ids != end_ids_found: unvisited_inputs = [kt for kt in end_keras_tensors if id(kt) not in end_ids_found] raise ValueError('Found unvisited input tensors that are disconnected from ' 'the outputs: {}'.format(unvisited_inputs)) return nodes_in_graph def clone_graph_nodes(inputs, outputs): """Clone the `Node` between the inputs and output tensors. This function is used to create a new functional model from any intermediate keras tensors. The clone of the nodes mimic the behavior of reconstructing the functional graph network by re-executing all the __call__ methods. The cloned nodes will be appended to the layers. Note that a new tf.keras.Inputs will be created for any items in the `inputs` Args: inputs: A nested structure of keras_tensors. outputs: A nested structure of keras_tensors. Returns: A pair of inputs and outputs, with cloned keras_tensors. They can be used to create a new functional model. """ nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) cloned_inputs = [] cloned_outputs = [] # We not only need to create copies of Nodes (mimic the calls), also need to # clone keras_tensors to avoid the override of _keras_history attached on the # keras_tensor. The following dict is used to track any keras tensor we cloned # The key is the string ID of the original keras tensor, and value is the # cloned keras_tensor instance. kt_id_mapping = {} for kt_input in tf.nest.flatten(inputs): if kt_input.node.is_input: # For any existing keras_tensor from tf.keras.Input, we leave them as is. cloned_inputs.append(kt_input) kt_id_mapping[id(kt_input)] = kt_input else: # We need to create a new tf.keras.Input for any intermediate keras_tensor cpy = _clone_keras_tensor(kt_input) cloned_input = input_layer_module.Input(tensor=cpy) cloned_inputs.append(cloned_input) kt_id_mapping[id(kt_input)] = cloned_input cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs) for kt_output in tf.nest.flatten(outputs): cpy = _clone_keras_tensor(kt_output) # We reuse the _keras_history here, which contains the old information. It # is used in the Node constructor to check if the tensor "is_keras_tensor()" # The history will be override by the Node constructor anyway for the # corresponding layer output anyway. cpy._keras_history = kt_output._keras_history # pylint: disable=protected-access cloned_outputs.append(cpy) kt_id_mapping[id(kt_output)] = cpy cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs) for node in nodes_to_clone: # Clone any keras_tensors to avoid override of _keras_history # Or reuse an existing keras_tensor if it has already been cloned. output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping) call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping) call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping) # Creating new nodes based on the existing node information. # Node wires itself to inbound and outbound layers. # The Node constructor actually updates this layer's self._inbound_nodes, # sets _keras_history on the outputs, and adds itself to the # `_outbound_nodes` of the layers that produced the inputs to this # layer call. node_module.Node(node.layer, call_args=call_args_copy, call_kwargs=call_kwargs_copy, outputs=output_copy) return cloned_inputs, cloned_outputs def clone_keras_tensors(args, keras_tensor_mapping): """Clone the keras tensors from the inputs. For any KerasTensor instance in the `args`, a new copy of KerasTensor will be created if it has not been cloned yet (by checking the `keras_tensor_mapping`). For any other types, the instance will be unchanged. This function is useful for cloning the Nodes since KerasTensor can't be reused across the models. Args: args: A nested structure of objects, which could contain KerasTensor. keras_tensor_mapping: A dict contains the ID of original KerasTensor, and the cloned KerasTensor instance. The dict will be updated with newly copied KerasTensor instances within this method. Returns: Same structure as inputs, with KerasTensor cloned. """ result = [] for obj in tf.nest.flatten(args): if node_module.is_keras_tensor(obj): if id(obj) in keras_tensor_mapping: cpy = keras_tensor_mapping[id(obj)] else: # Create copy of keras_tensor if we haven't done it before cpy = _clone_keras_tensor(obj) cpy._keras_history = obj._keras_history # pylint: disable=protected-access keras_tensor_mapping[id(obj)] = cpy result.append(cpy) else: result.append(obj) return tf.nest.pack_sequence_as(args, result) def _clone_keras_tensor(kt): """Create an identical keras_tensor based on the input. We use keras_tensor_to_placeholder and keras_tensor_from_tensor to make sure inferred shape are not lost during the copy. Args: kt: the input KerasTensor. Returns: An identical copy of the input KerasTensor. """ # Create a scratch graph since we don't intend to use the placeholders. with backend._scratch_graph() as scratch_graph: # pylint: disable=protected-access with scratch_graph.as_default(): placeholder = keras_tensor.keras_tensor_to_placeholder(kt) return keras_tensor.keras_tensor_from_tensor(placeholder)
10,492
41.140562
85
py
keras
keras-master/keras/engine/base_layer_utils_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import tensorflow.compat.v2 as tf import keras from keras import backend from keras import combinations from keras import keras_parameterized from keras.engine import base_layer_utils @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class TrackableWeightHandlerTest(keras_parameterized.TestCase): def get_table_handler(self): # Note: There is some repetition in these tests' setup. However, Tensorflow # does not play nicely with a separate setUp() call (causing errors related # to graph building), so we have to use a called setup instead of a setUp() # call. table = tf.lookup.experimental.MutableHashTable( key_dtype=tf.string, value_dtype=tf.int32, default_value=0) return base_layer_utils.TrackableWeightHandler(table) def test_get_num_tensors(self): table_handler = self.get_table_handler() self.assertEqual(2, table_handler.num_tensors) def test_get_and_set_weights(self): table_handler = self.get_table_handler() table_data = {b'a': 1, b'b': 2, b'c': 3} table_handler.set_weights( [list(table_data.keys()), list(table_data.values())]) weights = backend.batch_get_value(table_handler.get_tensors()) weight_data = {key: value for key, value in zip(weights[0], weights[1])} self.assertDictEqual(table_data, weight_data) def test_get_and_set_weights_does_not_add_ops(self): table_handler = self.get_table_handler() table_data = {b'a': 1, b'b': 2, b'c': 3} table_handler.set_weights( [list(table_data.keys()), list(table_data.values())]) _ = backend.batch_get_value(table_handler.get_tensors()) backend.get_session().graph.finalize() table_handler.set_weights( [list(table_data.keys()), list(table_data.values())]) _ = backend.batch_get_value(table_handler.get_tensors()) @combinations.generate(combinations.combine(mode=['eager'])) class OpLayerTest(keras_parameterized.TestCase): def test_tensor_op_layer(self): int_values = keras.Input(shape=(2,), dtype=tf.int32) float_values = tf.cast(int_values, tf.float32) model = keras.Model(int_values, float_values) model.compile(loss='mse') input_data = np.array([[1, 2], [3, 4]], dtype=np.int32) expected = [[1.0, 2.0], [3.0, 4.0]] output = model.predict(input_data) self.assertAllClose(expected, output) def test_ragged_op_layer_keras_tensors(self): int_values = keras.Input(shape=(None,), dtype=tf.int32, ragged=True) float_values = tf.cast(int_values, tf.float32) model = keras.Model(int_values, float_values) model.compile(loss='mse') input_data = tf.ragged.constant( [[1, 2], [3, 4]], dtype=np.int32) expected = [[1.0, 2.0], [3.0, 4.0]] output = model.predict(input_data) self.assertIsInstance(output, tf.RaggedTensor) self.assertAllClose(expected, output) def test_sparse_op_layer_keras_tensors(self): int_values = keras.Input(shape=(None,), dtype=tf.int32, sparse=True) float_values = tf.cast(int_values, tf.float32) _ = keras.Model(int_values, float_values) model = keras.Model(int_values, float_values) model.compile(loss='mse') input_data = tf.sparse.from_dense( np.array([[1, 2], [3, 4]], dtype=np.int32)) expected = [[1.0, 2.0], [3.0, 4.0]] output = model.predict(input_data) self.assertIsInstance(output, tf.SparseTensor) self.assertAllClose(expected, tf.sparse.to_dense(output)) if __name__ == '__main__': tf.test.main()
4,217
36.660714
80
py
keras
keras-master/keras/type/types.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-classes-have-attributes """Python module for Keras base types. All the classes in this module is abstract classes that contains none or minimal implementations. It is designed be used as base class for other concrete classes, type checks, and python3 type hints. """ import abc # TODO(scottzhu): Export all the types under this module with API symbol. class Layer(object, metaclass=abc.ABCMeta): """This is the class from which all layers inherit. A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves *computation*, defined in the `call()` method, and a *state* (weight variables), defined either in the constructor `__init__()` or in the `build()` method. Users will just instantiate a layer and then treat it as a callable. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Defines custom layer attributes, and creates layer state variables that do not depend on input shapes, using `add_weight()`. * `build(self, input_shape)`: This method can be used to create weights that depend on the shape(s) of the input(s), using `add_weight()`. `__call__()` will automatically build the layer (if it has not been built yet) by calling `build()`. * `call(self, *args, **kwargs)`: Called in `__call__` after making sure `build()` has been called. `call()` performs the logic of applying the layer to the input tensors (which should be passed in as argument). Two reserved keyword arguments you can optionally use in `call()` are: - `training` (boolean, whether the call is in inference mode or training mode) - `mask` (boolean tensor encoding masked timesteps in the input, used in RNN layers) * `get_config(self)`: Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in `__init__`, then override `from_config(self)` as well. This method is used when saving the layer or a model that contains this layer. Examples: Here's a basic example: a layer with two variables, `w` and `b`, that returns `y = w . x + b`. It shows how to implement `build()` and `call()`. Variables set as attributes of a layer are tracked as weights of the layers (in `layer.weights`). ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): # Create the state of the layer (weights) w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(input_shape[-1], self.units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable( initial_value=b_init(shape=(self.units,), dtype='float32'), trainable=True) def call(self, inputs): # Defines the computation from inputs to outputs return tf.matmul(inputs, self.w) + self.b # Instantiates the layer. linear_layer = SimpleDense(4) # This will also call `build(input_shape)` and create the weights. y = linear_layer(tf.ones((2, 2))) assert len(linear_layer.weights) == 2 # These weights are trainable, so they're listed in `trainable_weights`: assert len(linear_layer.trainable_weights) == 2 ``` Note that the method `add_weight()` offers a shortcut to create weights: ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b ``` Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during `call()`. Here's a example layer that computes the running sum of its inputs: ```python class ComputeSum(Layer): def __init__(self, input_dim): super(ComputeSum, self).__init__() # Create a non-trainable weight. self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False) def call(self, inputs): self.total.assign_add(tf.reduce_sum(inputs, axis=0)) return self.total my_sum = ComputeSum(2) x = tf.ones((2, 2)) y = my_sum(x) print(y.numpy()) # [2. 2.] y = my_sum(x) print(y.numpy()) # [4. 4.] assert my_sum.weights == [my_sum.total] assert my_sum.non_trainable_weights == [my_sum.total] assert my_sum.trainable_weights == [] ``` For more information about creating layers, see the guide [Making new Layers and Models via subclassing]( https://www.tensorflow.org/guide/keras/custom_layers_and_models) Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type of the first input in TensorFlow 1). dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Attributes: name: The name of the layer (string). dtype: The dtype of the layer's computations and weights. If mixed precision is used with a `tf.keras.mixed_precision.Policy`, this is instead just the dtype of the layer's weights, as the computations are done in a different dtype. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. Each layer has a dtype, which is typically the dtype of the layer's computations and variables. A layer's dtype can be queried via the `Layer.dtype` property. The dtype is specified with the `dtype` constructor argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()` if no dtype is passed. `floatx()` itself defaults to "float32". Additionally, layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed precision is used, layers may have different computation and variable dtypes. See `tf.keras.mixed_precision.Policy` for details on layer dtypes. """ pass
8,159
40.42132
80
py
keras
keras-master/keras/distribute/keras_save_load_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for saving and loading using keras save/load APIs with DS.""" import tensorflow.compat.v2 as tf from keras import testing_utils from keras.distribute import saved_model_test_base as test_base from keras.saving import save @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class KerasSaveLoadTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = 'keras_save_load' super(KerasSaveLoadTest, self).setUp() def _save_model(self, model, saved_dir): model.save(saved_dir, save_format='tf') def _load_and_run_model(self, distribution, saved_dir, predict_dataset, output_name='output_1'): restored_keras_model = save.load_model(saved_dir) return restored_keras_model.predict( predict_dataset, steps=test_base.PREDICT_STEPS) @tf.__internal__.distribute.combinations.generate(test_base.simple_models_with_strategies()) def test_save_no_strategy_restore_strategy(self, model_and_input, distribution): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_no_strategy(self, model_and_input, distribution, save_in_scope): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_strategy(self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope): self.run_test_save_strategy_restore_strategy(model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
3,294
44.136986
97
py
keras
keras-master/keras/distribute/keras_metrics_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras metrics.""" from absl.testing import parameterized from keras import metrics from keras.engine import base_layer import tensorflow.compat.v2 as tf combinations = tf.__internal__.distribute.combinations def _labeled_dataset_fn(): # First four batches of x: labels, predictions -> (labels == predictions) # 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False # 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False # 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False # 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True return tf.data.Dataset.range(1000).map( lambda x: {"labels": x % 5, "predictions": x % 3}).batch( 4, drop_remainder=True) def _boolean_dataset_fn(): # First four batches of labels, predictions: {TP, FP, TN, FN} # with a threshold of 0.5: # T, T -> TP; F, T -> FP; T, F -> FN # F, F -> TN; T, T -> TP; F, T -> FP # T, F -> FN; F, F -> TN; T, T -> TP # F, T -> FP; T, F -> FN; F, F -> TN return tf.data.Dataset.from_tensor_slices({ "labels": [True, False, True, False], "predictions": [True, True, False, False]}).repeat().batch( 3, drop_remainder=True) def _threshold_dataset_fn(): # First four batches of labels, predictions: {TP, FP, TN, FN} # with a threshold of 0.5: # True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN # False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP # True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP # False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN return tf.data.Dataset.from_tensor_slices({ "labels": [True, False, True, False], "predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch( 3, drop_remainder=True) def _regression_dataset_fn(): return tf.data.Dataset.from_tensor_slices({ "labels": [1., .5, 1., 0.], "predictions": [1., .75, .25, 0.]}).repeat() def all_combinations(): return tf.__internal__.test.combinations.combine( distribution=[ combinations.default_strategy, combinations.one_device_strategy, combinations.mirrored_strategy_with_gpu_and_cpu, combinations.mirrored_strategy_with_two_gpus ], mode=["graph", "eager"]) def tpu_combinations(): return tf.__internal__.test.combinations.combine( distribution=[ combinations.tpu_strategy, ], mode=["graph"]) class KerasMetricsTest(tf.test.TestCase, parameterized.TestCase): def _test_metric(self, distribution, dataset_fn, metric_init_fn, expected_fn): with tf.Graph().as_default(), distribution.scope(): metric = metric_init_fn() iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn()) updates = distribution.experimental_local_results( distribution.run(metric, args=(iterator.get_next(),))) batches_per_update = distribution.num_replicas_in_sync self.evaluate(iterator.initializer) self.evaluate([v.initializer for v in metric.variables]) batches_consumed = 0 for i in range(4): batches_consumed += batches_per_update self.evaluate(updates) self.assertAllClose(expected_fn(batches_consumed), self.evaluate(metric.result()), 0.001, msg="After update #" + str(i+1)) if batches_consumed >= 4: # Consume 4 input batches in total. break @combinations.generate(all_combinations() + tpu_combinations()) def testMean(self, distribution): def _dataset_fn(): return tf.data.Dataset.range(1000).map(tf.compat.v1.to_float).batch( 4, drop_remainder=True) def _expected_fn(num_batches): # Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc. return num_batches * 2 - 0.5 self._test_metric(distribution, _dataset_fn, metrics.Mean, _expected_fn) @combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ combinations.mirrored_strategy_with_one_cpu, combinations.mirrored_strategy_with_gpu_and_cpu, combinations.mirrored_strategy_with_two_gpus, combinations.tpu_strategy_packed_var, combinations.parameter_server_strategy_1worker_2ps_cpu, combinations.parameter_server_strategy_1worker_2ps_1gpu, ], mode=["eager"], jit_compile=[False]) + tf.__internal__.test.combinations.combine( distribution=[combinations.mirrored_strategy_with_two_gpus], mode=["eager"], jit_compile=[True])) def testAddMetric(self, distribution, jit_compile): if not tf.__internal__.tf2.enabled(): self.skipTest("Skip test since tf2 is not enabled. Pass " " --test_env=TF2_BEHAVIOR=1 to enable tf2 behavior.") class MetricLayer(base_layer.Layer): def __init__(self): super(MetricLayer, self).__init__(name="metric_layer") self.sum = metrics.Sum(name="sum") # Using aggregation for jit_compile results in failure. Thus only set # aggregation for PS Strategy for multi-gpu tests. if isinstance(distribution, tf.distribute.experimental.ParameterServerStrategy): self.sum_var = tf.Variable( 1.0, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) else: self.sum_var = tf.Variable(1.0) def call(self, inputs): self.add_metric(self.sum(inputs)) self.add_metric( tf.reduce_mean(inputs), name="mean", aggregation="mean") self.sum_var.assign(self.sum.result()) return inputs with distribution.scope(): layer = MetricLayer() def func(): return layer(tf.ones(())) if jit_compile: func = tf.function(jit_compile=True)(func) @tf.function def run(): return distribution.run(func) if distribution._should_use_with_coordinator: coord = tf.distribute.experimental.coordinator.ClusterCoordinator( distribution) coord.schedule(run) coord.join() else: run() self.assertEqual(layer.metrics[0].result().numpy(), 1.0 * distribution.num_replicas_in_sync) self.assertEqual(layer.metrics[1].result().numpy(), 1.0) self.assertEqual(layer.sum_var.read_value().numpy(), 1.0 * distribution.num_replicas_in_sync) @combinations.generate(all_combinations()) def test_precision(self, distribution): # True positive is 2, false positive 1, precision is 2/3 = 0.6666667 label_prediction = ([0, 1, 1, 1], [1, 0, 1, 1]) with distribution.scope(): precision = metrics.Precision() self.evaluate([v.initializer for v in precision.variables]) updates = distribution.run(precision, args=label_prediction) self.evaluate(updates) self.assertAllClose(precision.result(), 0.6666667) @combinations.generate(all_combinations()) def test_recall(self, distribution): # True positive is 2, false negative 1, precision is 2/3 = 0.6666667 label_prediction = ([0, 1, 1, 1], [1, 0, 1, 1]) with distribution.scope(): recall = metrics.Recall() self.evaluate([v.initializer for v in recall.variables]) updates = distribution.run(recall, args=label_prediction) self.evaluate(updates) self.assertAllClose(recall.result(), 0.6666667) @combinations.generate(all_combinations()) def test_SensitivityAtSpecificity(self, distribution): label_prediction = ([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) with distribution.scope(): metric = metrics.SensitivityAtSpecificity(0.5) self.evaluate([v.initializer for v in metric.variables]) updates = distribution.run(metric, args=label_prediction) self.evaluate(updates) self.assertAllClose(metric.result(), 0.5) @combinations.generate(all_combinations()) def test_SpecificityAtSensitivity(self, distribution): label_prediction = ([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) with distribution.scope(): metric = metrics.SpecificityAtSensitivity(0.5) self.evaluate([v.initializer for v in metric.variables]) updates = distribution.run(metric, args=label_prediction) self.evaluate(updates) self.assertAllClose(metric.result(), 0.66666667) @combinations.generate(all_combinations()) def test_PrecisionAtRecall(self, distribution): label_prediction = ([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) with distribution.scope(): metric = metrics.PrecisionAtRecall(0.5) self.evaluate([v.initializer for v in metric.variables]) updates = distribution.run(metric, args=label_prediction) self.evaluate(updates) self.assertAllClose(metric.result(), 0.5) @combinations.generate(all_combinations()) def test_RecallAtPrecision(self, distribution): label_prediction = ([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) with distribution.scope(): metric = metrics.RecallAtPrecision(0.8) self.evaluate([v.initializer for v in metric.variables]) updates = distribution.run(metric, args=label_prediction) self.evaluate(updates) self.assertAllClose(metric.result(), 0.5) @combinations.generate(all_combinations()) def test_auc(self, distribution): label_prediction = ([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) with distribution.scope(): metric = metrics.AUC(num_thresholds=3) self.evaluate([v.initializer for v in metric.variables]) updates = distribution.run(metric, args=label_prediction) self.evaluate(updates) self.assertAllClose(metric.result(), 0.75) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
10,486
38.573585
80
py
keras
keras-master/keras/distribute/keras_premade_models_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for keras premade models using tf.distribute.Strategy.""" from absl.testing import parameterized from keras.engine import sequential from keras.layers import core from keras.optimizer_v2 import adagrad from keras.optimizer_v2 import gradient_descent from keras.premade import linear from keras.premade import wide_deep from keras.utils import dataset_creator import numpy as np import tensorflow.compat.v2 as tf def strategy_combinations_eager_data_fn(): return tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.default_strategy, tf.__internal__.distribute.combinations.one_device_strategy, tf.__internal__.distribute.combinations.one_device_strategy_gpu, tf.__internal__.distribute.combinations .mirrored_strategy_with_gpu_and_cpu, tf.__internal__.distribute.combinations .mirrored_strategy_with_two_gpus, tf.__internal__.distribute.combinations .mirrored_strategy_with_two_gpus_no_merge_call, tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, tf.__internal__.distribute.combinations.multi_worker_mirrored_2x2_gpu, tf.__internal__.distribute.combinations .parameter_server_strategy_1worker_2ps_cpu, tf.__internal__.distribute.combinations .parameter_server_strategy_1worker_2ps_1gpu, # NOTE: TPUStrategy not tested because the models in this test are # sparse and do not work with TPUs. ], use_dataset_creator=[True, False], mode=['eager'], data_fn=['numpy', 'dataset']) INPUT_SIZE = 64 BATCH_SIZE = 10 def get_numpy(): inputs = np.random.uniform( low=-5., high=5., size=(INPUT_SIZE, 2)).astype(np.float32) output = .3 * inputs[:, 0] + .2 * inputs[:, 1] return inputs, output def get_dataset(input_context=None, batch_size=None): inputs, output = get_numpy() dataset = tf.data.Dataset.from_tensor_slices((inputs, output)) if input_context: dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) if batch_size is None: batch_size = BATCH_SIZE dataset = dataset.batch(batch_size).repeat(200) return dataset # A `dataset_fn` is required for `Model.fit` to work across all strategies. def dataset_fn(input_context): batch_size = input_context.get_per_replica_batch_size( global_batch_size=BATCH_SIZE) return get_dataset(input_context, batch_size) class KerasPremadeModelsTest(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( strategy_combinations_eager_data_fn()) def test_linear_model(self, distribution, use_dataset_creator, data_fn): if ((not use_dataset_creator) and isinstance( distribution, tf.distribute.experimental.ParameterServerStrategy)): self.skipTest( 'Parameter Server strategy requires dataset creator to be used in ' 'model.fit.') with distribution.scope(): model = linear.LinearModel() opt = gradient_descent.SGD(learning_rate=0.1) model.compile(opt, 'mse') if use_dataset_creator: x = dataset_creator.DatasetCreator(dataset_fn) hist = model.fit(x, epochs=5, steps_per_epoch=INPUT_SIZE) else: if data_fn == 'numpy': inputs, output = get_numpy() hist = model.fit(inputs, output, epochs=5) else: hist = model.fit(get_dataset(), epochs=5) self.assertLess(hist.history['loss'][4], 0.2) @tf.__internal__.distribute.combinations.generate( strategy_combinations_eager_data_fn()) def test_wide_deep_model(self, distribution, use_dataset_creator, data_fn): if ((not use_dataset_creator) and isinstance( distribution, tf.distribute.experimental.ParameterServerStrategy)): self.skipTest( 'Parameter Server strategy requires dataset creator to be used in ' 'model.fit.') with distribution.scope(): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_opt = gradient_descent.SGD(learning_rate=0.05) dnn_opt = adagrad.Adagrad(learning_rate=0.1) wide_deep_model.compile(optimizer=[linear_opt, dnn_opt], loss='mse') if use_dataset_creator: x = dataset_creator.DatasetCreator(dataset_fn) hist = wide_deep_model.fit(x, epochs=5, steps_per_epoch=INPUT_SIZE) else: if data_fn == 'numpy': inputs, output = get_numpy() hist = wide_deep_model.fit(inputs, output, epochs=5) else: hist = wide_deep_model.fit(get_dataset(), epochs=5) self.assertLess(hist.history['loss'][4], 0.2) if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main()
5,721
39.013986
80
py
keras
keras-master/keras/distribute/multi_worker_callback_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras callbacks in multi-worker training with TF2.""" import tensorflow.compat.v2 as tf import json import os from absl.testing import parameterized from keras import callbacks from keras.distribute import distributed_file_utils from keras.distribute import multi_worker_testing_utils def checkpoint_exists(filepath): """Returns whether the checkpoint `filepath` refers to exists.""" if filepath.endswith('.h5'): return tf.io.gfile.exists(filepath) tf_saved_model_exists = tf.io.gfile.exists(filepath) tf_weights_only_checkpoint_exists = tf.io.gfile.exists( filepath + '.index') return tf_saved_model_exists or tf_weights_only_checkpoint_exists def _model_setup(test_obj, file_format): """Set up a MNIST Keras model for testing purposes. This function builds a MNIST Keras model and returns relevant information for testing. Args: test_obj: The `TestCase` testing object. file_format: File format for checkpoints. 'tf' or 'h5'. Returns: A tuple of (model, saving_filepath, train_ds, steps) where train_ds is the training dataset. """ batch_size = 64 steps = 2 with tf.distribute.MultiWorkerMirroredStrategy().scope(): # TODO(b/142509827): In rare cases this errors out at C++ level with the # "Connect failed" error message. train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) # Pass saving_filepath from the parent thread to ensure every worker has the # same filepath to save. saving_filepath = os.path.join(test_obj.get_temp_dir(), 'checkpoint.' + file_format) return model, saving_filepath, train_ds, steps def get_tf_config_task(): return json.loads(os.environ['TF_CONFIG'])['task'] def get_tf_config_cluster_spec(): return json.loads(os.environ['TF_CONFIG'])['cluster'] def get_task_type(): return get_tf_config_task()['type'] def get_task_index(): return get_tf_config_task()['index'] def is_chief(): return ('chief' not in get_tf_config_cluster_spec() and get_task_type() == 'worker' and get_task_index() == 0) class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( mode=['eager'], file_format=['h5', 'tf'], save_weights_only=[True, False])) def test_model_checkpoint_saves_on_chief_but_not_otherwise( self, file_format, mode, save_weights_only): def proc_model_checkpoint_saves_on_chief_but_not_otherwise( test_obj, file_format): model, saving_filepath, train_ds, steps = _model_setup( test_obj, file_format) num_epoch = 2 extension = os.path.splitext(saving_filepath)[1] # Incorporate type/index information and thread id in saving_filepath to # ensure every worker has a unique path. Note that in normal use case the # saving_filepath will be the same for all workers, but we use different # ones here just to test out chief saves checkpoint but non-chief doesn't. task_config = get_tf_config_task() saving_filepath = os.path.join( test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' % (task_config['type'], task_config['index'], extension)) # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(checkpoint_exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, validation_data=train_ds, validation_steps=steps, callbacks=[ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=save_weights_only) ]) # If it's chief, the model should be saved; if not, the model shouldn't. test_obj.assertEqual(checkpoint_exists(saving_filepath), is_chief()) # If it's chief, the model should be saved (`write_filepath` should # simply return `saving_filepath`); if not, i.e. for non-chief workers, # the temporary path generated by `write_filepath` should no longer # contain the checkpoint that has been deleted. test_obj.assertEqual( checkpoint_exists( distributed_file_utils.write_filepath( saving_filepath, model._distribution_strategy)), is_chief()) tf.__internal__.distribute.multi_process_runner.run( proc_model_checkpoint_saves_on_chief_but_not_otherwise, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self, file_format)) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager'])) def test_model_checkpoint_works_with_same_file_path(self, mode): def proc_model_checkpoint_works_with_same_file_path( test_obj, saving_filepath): model, _, train_ds, steps = _model_setup(test_obj, file_format='') num_epoch = 2 # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(tf.io.gfile.exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)]) test_obj.assertTrue(tf.io.gfile.exists(saving_filepath)) saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint') tf.__internal__.distribute.multi_process_runner.run( proc_model_checkpoint_works_with_same_file_path, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self, saving_filepath)) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager'])) def test_backupandrestore_checkpoint_works_with_interruption(self, mode): class InterruptingCallback(callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): if epoch == 2: raise RuntimeError('Interrupting!') class AssertCallback(callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): # the interruption happened on epoch 2 as specified in # InterruptingCallback, so the initial epoch after restart will begin # at 2. assert epoch > 1 def proc_model_checkpoint_works_with_same_file_path(test_obj, saving_filepath): model, _, train_ds, steps = _model_setup(test_obj, file_format='') num_epoch = 4 # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(tf.io.gfile.exists(saving_filepath)) bar_dir = os.path.join(os.path.dirname(saving_filepath), 'backup') try: model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[ callbacks.ModelCheckpoint(filepath=saving_filepath), callbacks.BackupAndRestore(backup_dir=bar_dir), InterruptingCallback() ]) except RuntimeError as e: if 'Interrupting!' not in str(e): raise tf.__internal__.distribute.multi_process_runner.get_barrier().wait() backup_filepath = os.path.join(bar_dir, 'chief', 'checkpoint') test_obj.assertTrue(tf.io.gfile.exists(backup_filepath)) test_obj.assertTrue(tf.io.gfile.exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[ callbacks.ModelCheckpoint(filepath=saving_filepath), callbacks.BackupAndRestore(backup_dir=bar_dir), AssertCallback() ]) tf.__internal__.distribute.multi_process_runner.get_barrier().wait() test_obj.assertFalse(tf.io.gfile.exists(backup_filepath)) test_obj.assertTrue(tf.io.gfile.exists(saving_filepath)) saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint') tf.__internal__.distribute.multi_process_runner.run( proc_model_checkpoint_works_with_same_file_path, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self, saving_filepath)) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(mode=['eager'])) def test_profiler_saves_on_both_chief_and_non_chief(self, mode): def proc_profiler_saves_on_both_chief_and_non_chief(test_obj): model, _, train_ds, steps = _model_setup(test_obj, file_format='') num_epoch = 2 task_config = get_tf_config_task() saving_filepath = os.path.join( test_obj.get_temp_dir(), 'logfile_%s_%d' % (task_config['type'], task_config['index'])) # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(tf.io.gfile.exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[ callbacks.TensorBoard( log_dir=saving_filepath, profile_batch=[2, 4]) ]) # Profiler dir should be created on both chief and non-chief node profiler_dir_path = os.path.join(saving_filepath, 'plugins', 'profile') test_obj.assertTrue(tf.io.gfile.exists(profiler_dir_path)) tf.__internal__.distribute.multi_process_runner.run( proc_profiler_saves_on_both_chief_and_non_chief, cluster_spec= tf.__internal__.distribute.multi_process_runner.create_cluster_spec( num_workers=2), args=(self,)) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager'])) def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode): def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj): model, _, train_ds, steps = _model_setup(test_obj, file_format='') num_epoch = 2 # Incorporate type/index information and thread id in saving_filepath to # ensure every worker has a unique path. Note that in normal use case the # saving_filepath will be the same for all workers, but we use different # ones here just to test out chief saves summaries but non-chief doesn't. task_config = get_tf_config_task() saving_filepath = os.path.join( test_obj.get_temp_dir(), 'logfile_%s_%d' % (task_config['type'], task_config['index'])) # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(tf.io.gfile.exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, # disabling profiler by setting profile_batch to zero callbacks=[ callbacks.TensorBoard(log_dir=saving_filepath, profile_batch=0) ]) # If it's chief, the summaries should be saved in the filepath; if not, # the directory should be empty (although created). Using # `file_io.list_directory()` since the directory may be created at this # point. test_obj.assertEqual( bool(tf.io.gfile.listdir(saving_filepath)), is_chief()) tf.__internal__.distribute.multi_process_runner.run( proc_tensorboard_saves_on_chief_but_not_otherwise, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self,)) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager'])) def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode): def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj): model, _, train_ds, steps = _model_setup(test_obj, file_format='') num_epoch = 2 saving_filepath = os.path.join( test_obj.get_temp_dir(), 'logfile_%s' % (get_tf_config_task()['type'])) saving_filepath_for_temp = os.path.join(saving_filepath, 'workertemp_1') os.mkdir(saving_filepath) os.mkdir(saving_filepath_for_temp) # Verifies that even if `saving_filepath_for_temp` exists, tensorboard # can still save to temporary directory. test_obj.assertTrue(tf.io.gfile.exists(saving_filepath_for_temp)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)]) tf.__internal__.distribute.multi_process_runner.run( proc_tensorboard_can_still_save_to_temp_even_if_it_exists, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self,)) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager'])) def test_tensorboard_works_with_same_file_path(self, mode): def proc_tensorboard_works_with_same_file_path(test_obj, saving_filepath): model, _, train_ds, steps = _model_setup(test_obj, file_format='') num_epoch = 2 # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(tf.io.gfile.exists(saving_filepath)) tf.__internal__.distribute.multi_process_runner.get_barrier().wait() model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)]) tf.__internal__.distribute.multi_process_runner.get_barrier().wait() test_obj.assertTrue(tf.io.gfile.listdir(saving_filepath)) saving_filepath = os.path.join(self.get_temp_dir(), 'logfile') tf.__internal__.distribute.multi_process_runner.run( proc_tensorboard_works_with_same_file_path, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self, saving_filepath)) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager'])) def test_early_stopping(self, mode): def proc_early_stopping(test_obj): class EpochCounterCallback(callbacks.Callback): def on_epoch_begin(self, epoch, logs): self.last_epoch = epoch model, _, train_ds, steps = _model_setup(test_obj, file_format='') epoch_counter_cbk = EpochCounterCallback() cbks = [ callbacks.EarlyStopping( monitor='loss', min_delta=0.05, patience=1, verbose=1), epoch_counter_cbk ] # Empirically, it is expected that `model.fit()` terminates around the # 22th epoch. Asserting that it should have been stopped before the 50th # epoch to avoid flakiness and be more predictable. model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks) test_obj.assertLess(epoch_counter_cbk.last_epoch, 50) tf.__internal__.distribute.multi_process_runner.run( proc_early_stopping, cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2), args=(self,)) if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main()
16,091
38.831683
110
py
keras
keras-master/keras/distribute/custom_training_loop_metrics_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for custom training loops.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util from keras import metrics from keras.distribute import strategy_combinations class KerasMetricsTest(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies + strategy_combinations.multiworker_strategies, mode=["eager"] )) def test_multiple_keras_metrics_experimental_run(self, distribution): with distribution.scope(): loss_metric = metrics.Mean("loss", dtype=np.float32) loss_metric_2 = metrics.Mean("loss_2", dtype=np.float32) @tf.function def train_step(): def step_fn(): loss = tf.constant(5.0, dtype=np.float32) loss_metric.update_state(loss) loss_metric_2.update_state(loss) distribution.run(step_fn) train_step() self.assertEqual(loss_metric.result().numpy(), loss_metric_2.result().numpy()) self.assertEqual(loss_metric.result().numpy(), 5.0) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies+ strategy_combinations.multiworker_strategies, mode=["eager"] )) def test_update_keras_metric_declared_in_strategy_scope(self, distribution): with distribution.scope(): metric = metrics.Mean("test_metric", dtype=np.float32) dataset = tf.data.Dataset.range(10).batch(2) dataset = distribution.experimental_distribute_dataset(dataset) @tf.function def step_fn(i): metric.update_state(i) for i in dataset: distribution.run(step_fn, args=(i,)) # This should be the mean of integers 0-9 which has a sum of 45 and a count # of 10 resulting in mean of 4.5. self.assertEqual(metric.result().numpy(), 4.5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies, mode=["eager"] )) def test_update_keras_metric_outside_strategy_scope_cross_replica( self, distribution): metric = metrics.Mean("test_metric", dtype=np.float32) with distribution.scope(): for i in range(10): metric.update_state(i) # This should be the mean of integers 0-9 which has a sum of 45 and a count # of 10 resulting in mean of 4.5. self.assertEqual(metric.result().numpy(), 4.5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies, mode=["eager"])) @test_util.disable_mlir_bridge("TODO(b/168036682): Support dynamic padder") def test_update_keras_metrics_dynamic_shape(self, distribution): with distribution.scope(): metric = metrics.Mean("test_metric", dtype=np.float32) dataset = tf.data.Dataset.range(10).batch(2, drop_remainder=False) @tf.function def train_fn(dataset): weights = tf.constant([0.1, 0.1]) def step_fn(i): metric.update_state(i, weights) for i in dataset: distribution.run(step_fn, args=(i,)) train_fn(dataset) # This should be the mean of integers 0-9 which has a sum of 45 and a count # of 10 resulting in mean of 4.5. self.assertEqual(metric.result().numpy(), 4.5) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
4,349
34.365854
80
py
keras
keras-master/keras/distribute/keras_rnn_model_correctness_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras RNN models using DistributionStrategy.""" import tensorflow.compat.v2 as tf import numpy as np import keras from keras import testing_utils from keras.distribute import keras_correctness_test_base from keras.layers import recurrent as rnn_v1 from keras.layers import recurrent_v2 as rnn_v2 from keras.mixed_precision import policy from keras.optimizer_v2 import gradient_descent as gradient_descent_keras class _DistributionStrategyRnnModelCorrectnessTest( keras_correctness_test_base .TestDistributionStrategyEmbeddingModelCorrectnessBase): def _get_layer_class(self): raise NotImplementedError def get_model(self, max_words=10, initial_weights=None, distribution=None, input_shapes=None): del input_shapes rnn_cls = self._get_layer_class() with keras_correctness_test_base.MaybeDistributionScope(distribution): word_ids = keras.layers.Input( shape=(max_words,), dtype=np.int32, name='words') word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids) rnn_embed = rnn_cls(units=4, return_sequences=False)(word_embed) dense_output = keras.layers.Dense(2)(rnn_embed) preds = keras.layers.Softmax(dtype='float32')(dense_output) model = keras.Model(inputs=[word_ids], outputs=[preds]) if initial_weights: model.set_weights(initial_weights) optimizer_fn = gradient_descent_keras.SGD model.compile( optimizer=optimizer_fn(learning_rate=0.1), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) return model @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class DistributionStrategyGruModelCorrectnessTest( _DistributionStrategyRnnModelCorrectnessTest): def _get_layer_class(self): if tf.__internal__.tf2.enabled(): if not tf.executing_eagerly(): self.skipTest("GRU v2 and legacy graph mode don't work together.") return rnn_v2.GRU else: return rnn_v1.GRU @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.test_combinations_for_embedding_model() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_gru_model_correctness(self, distribution, use_numpy, use_validation_data): self.run_correctness_test(distribution, use_numpy, use_validation_data) @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class DistributionStrategyLstmModelCorrectnessTest( _DistributionStrategyRnnModelCorrectnessTest): def _get_layer_class(self): if tf.__internal__.tf2.enabled(): if not tf.executing_eagerly(): self.skipTest("LSTM v2 and legacy graph mode don't work together.") return rnn_v2.LSTM else: return rnn_v1.LSTM @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.test_combinations_for_embedding_model() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_lstm_model_correctness(self, distribution, use_numpy, use_validation_data): self.run_correctness_test(distribution, use_numpy, use_validation_data) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.test_combinations_for_embedding_model() + keras_correctness_test_base.multi_worker_mirrored_eager()) @testing_utils.enable_v2_dtype_behavior def test_lstm_model_correctness_mixed_precision(self, distribution, use_numpy, use_validation_data): if isinstance(distribution, (tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): self.skipTest('CentralStorageStrategy is not supported by ' 'mixed precision.') if isinstance(distribution, (tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)): policy_name = 'mixed_bfloat16' else: policy_name = 'mixed_float16' with policy.policy_scope(policy_name): self.run_correctness_test(distribution, use_numpy, use_validation_data) if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main()
5,155
38.358779
110
py
keras
keras-master/keras/distribute/model_combinations.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Strategy and optimizer combinations for combinations.combine().""" import tensorflow.compat.v2 as tf from keras.distribute import simple_models simple_functional_model = tf.__internal__.test.combinations.NamedObject( "SimpleFunctionalModel", simple_models.SimpleFunctionalModel()) simple_sequential_model = tf.__internal__.test.combinations.NamedObject( "SimpleSequentialModel", simple_models.SimpleSequentialModel()) simple_subclass_model = tf.__internal__.test.combinations.NamedObject( "SimpleSubclassModel", simple_models.SimpleSubclassModel()) simple_tfmodule_model = tf.__internal__.test.combinations.NamedObject( "SimpleTFModuleModel", simple_models.SimpleTFModuleModel())
1,393
43.967742
80
py
keras
keras-master/keras/distribute/test_example.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple network to use in tests and examples.""" import tensorflow.compat.v2 as tf from keras.optimizer_v2 import optimizer_v2 def minimize_loss_example(optimizer, use_bias=False, use_callable_loss=True): """Example of non-distribution-aware legacy code.""" def dataset_fn(): dataset = tf.data.Dataset.from_tensors([[1.]]).repeat() # TODO(isaprykin): batch with drop_remainder causes shapes to be # fully defined for TPU. Remove this when XLA supports dynamic shapes. return dataset.batch(1, drop_remainder=True) layer = tf.compat.v1.layers.Dense(1, use_bias=use_bias) def model_fn(x): """A very simple model written by the user.""" def loss_fn(): y = tf.reshape(layer(x), []) - tf.constant(1.) return y * y if isinstance(optimizer, optimizer_v2.OptimizerV2): return optimizer.minimize(loss_fn, lambda: layer.trainable_variables) elif use_callable_loss: return optimizer.minimize(loss_fn) else: return optimizer.minimize(loss_fn()) return model_fn, dataset_fn, layer def batchnorm_example(optimizer_fn, batch_per_epoch=1, momentum=0.9, renorm=False, update_ops_in_replica_mode=False): """Example of non-distribution-aware legacy code with batch normalization.""" def dataset_fn(): # input shape is [16, 8], input values are increasing in both dimensions. return tf.data.Dataset.from_tensor_slices( [[[float(x * 8 + y + z * 100) for y in range(8)] for x in range(16)] for z in range(batch_per_epoch)]).repeat() optimizer = optimizer_fn() batchnorm = tf.compat.v1.layers.BatchNormalization( renorm=renorm, momentum=momentum, fused=False) layer = tf.compat.v1.layers.Dense(1, use_bias=False) def model_fn(x): """A model that uses batchnorm.""" def loss_fn(): y = batchnorm(x, training=True) with tf.control_dependencies( tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) if update_ops_in_replica_mode else []): loss = tf.reduce_mean( tf.reduce_sum(layer(y)) - tf.constant(1.)) # `x` and `y` will be fetched by the gradient computation, but not `loss`. return loss if isinstance(optimizer, optimizer_v2.OptimizerV2): return optimizer.minimize(loss_fn, lambda: layer.trainable_variables) # Callable loss. return optimizer.minimize(loss_fn) return model_fn, dataset_fn, batchnorm
3,219
35.179775
80
py
keras
keras-master/keras/distribute/saved_model_save_load_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for saving and loading using tf's saved_model APIs with DS.""" import tensorflow.compat.v2 as tf import os from keras import testing_utils from keras.distribute import model_combinations from keras.distribute import saved_model_test_base as test_base @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class SavedModelKerasModelTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = 'saved_model_save_load' super(SavedModelKerasModelTest, self).setUp() def _save_model(self, model, saved_dir): tf.saved_model.save(model, saved_dir) def _load_and_run_model(self, distribution, saved_dir, predict_dataset, output_name='output_1'): return test_base.load_and_run_with_saved_model_api(distribution, saved_dir, predict_dataset, output_name) @tf.__internal__.distribute.combinations.generate(test_base.simple_models_with_strategies()) def test_save_no_strategy_restore_strategy(self, model_and_input, distribution): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_no_strategy(self, model_and_input, distribution, save_in_scope): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_strategy(self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope): self.run_test_save_strategy_restore_strategy(model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_no_variable_device_placement(self, model_and_input, distribution, save_in_scope): saved_dir = self.run_test_save_strategy(model_and_input, distribution, save_in_scope) func = tf.saved_model.load(saved_dir) concrete_function = func.signatures[test_base._DEFAULT_FUNCTION_KEY] for f in concrete_function.graph.as_graph_def().library.function: for n in f.node_def: if n.op == 'ReadVariableOp': self.assertEmpty(n.device) class SavedModelTFModuleTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = 'saved_model_save_load' super(SavedModelTFModuleTest, self).setUp() def _train_model(self, model, x_train, y_train, batch_size): pass def _predict_with_model(self, distribution, model, predict_dataset): if distribution: dist_predict_dataset = distribution.experimental_distribute_dataset( predict_dataset) per_replica_predict_data = next(iter(dist_predict_dataset)) result = distribution.run(model, args=(per_replica_predict_data,)) # Convert the per_replica value to a list, then concatenate them reduced = distribution.experimental_local_results(result) concat = tf.concat(reduced, 0) return concat else: return model(next(iter(predict_dataset))) def _save_model(self, model, saved_dir): call = model.__call__.get_concrete_function(tf.TensorSpec(None)) tf.saved_model.save(model, saved_dir, signatures=call) def _load_and_run_model(self, distribution, saved_dir, predict_dataset, output_name='output_1'): del output_name model = tf.saved_model.load(saved_dir) return self._predict_with_model(distribution, model, predict_dataset) @tf.__internal__.distribute.combinations.generate(test_base.tfmodule_models_with_strategies()) def test_save_no_strategy_restore_strategy(self, model_and_input, distribution): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.tfmodule_models_with_strategies(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_no_strategy( self, model_and_input, distribution, save_in_scope): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.tfmodule_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_strategy(self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope): self.run_test_save_strategy_restore_strategy(model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( model_and_input=[model_combinations.simple_tfmodule_model], distribution=test_base.strategies + [tf.__internal__.distribute.combinations.cloud_tpu_strategy])) def test_save_load_io_device(self, model_and_input, distribution): saved_dir = os.path.join(self.get_temp_dir(), 'io_device') with distribution.scope(): model = model_and_input.get_model() x_train, y_train, _ = model_and_input.get_data() batch_size = model_and_input.get_batch_size() self._train_model(model, x_train, y_train, batch_size) call = model.__call__.get_concrete_function(tf.TensorSpec(None)) save_options = tf.saved_model.SaveOptions( experimental_io_device='/job:localhost') tf.saved_model.save(model, saved_dir, signatures=call, options=save_options) load_options = tf.saved_model.LoadOptions( experimental_io_device='/job:localhost') # Check that the model can be loaded and training continued without error. with distribution.scope(): loaded_model = tf.saved_model.load(saved_dir, options=load_options) self._train_model(loaded_model, x_train, y_train, batch_size) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
8,370
47.109195
97
py
keras
keras-master/keras/distribute/saved_model_mixed_api_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for saving and loading with mixed APIs with distribution strategies. For saving, Keras's export_saved_model() API is used; and for loading, saved_model's load() API is used. Keras's export_save_model() when used with `serving_only` parameter equals to True should be the same as using tf.saved_model.save(). """ import tensorflow.compat.v2 as tf from keras import testing_utils from keras.distribute import saved_model_test_base as test_base from keras.saving import save _DEFAULT_FUNCTION_KEY = 'serving_default' @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = 'saved_model_save_load' super(SavedModelSaveAndLoadTest, self).setUp() def _save_model(self, model, saved_dir): save.save_model(model, saved_dir, save_format='tf') def _load_and_run_model(self, distribution, saved_dir, predict_dataset, output_name='output_1'): return test_base.load_and_run_with_saved_model_api(distribution, saved_dir, predict_dataset, output_name) @tf.__internal__.distribute.combinations.generate(test_base.simple_models_with_strategies()) def test_save_no_strategy_restore_strategy(self, model_and_input, distribution): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_no_strategy(self, model_and_input, distribution, save_in_scope): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(test_base.simple_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine(save_in_scope=[True, False]))) def test_save_strategy_restore_strategy(self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope): self.run_test_save_strategy_restore_strategy(model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope) if __name__ == '__main__': tf.compat.v1.enable_eager_execution() tf.test.main()
3,691
44.580247
97
py
keras
keras-master/keras/distribute/mirrored_strategy_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for MirroredStrategy.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from tensorflow.python.eager import backprop from keras.engine import training as keras_training from keras.layers import core as keras_core from keras.optimizer_v2 import rmsprop from keras.utils import kpl_test_utils from tensorflow.python.training import optimizer as optimizer_lib class MiniModel(keras_training.Model): """Minimal model for mnist. Useful for testing and debugging on slow TPU simulators. """ def __init__(self): super(MiniModel, self).__init__(name="") self.fc = keras_core.Dense(1, name="fc", kernel_initializer="ones", bias_initializer="ones") def call(self, inputs, training=True): inputs = tf.ones([1, 10]) return self.fc(inputs) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=["eager"])) class MirroredStrategyDefunTest(tf.test.TestCase, parameterized.TestCase): def testTrain(self, distribution): with distribution.scope(): mock_model = MiniModel() mock_model.call = tf.function(mock_model.call) def loss_fn(ctx): del ctx return mock_model(tf.ones([1, 10])) gradients_fn = backprop.implicit_grad(loss_fn) gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn) grads_and_vars = distribution.extended.call_for_each_replica( gradients_fn, args=(None,)) optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.25) update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protected-access if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(update_ops) updated_var_values = self.evaluate(mock_model.variables) # All variables start at 1.0 and get two updates of 0.25. self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0]) self.assertAllEqual([0.5], updated_var_values[1]) def testTrainAndServeWithKPL(self, distribution): use_adapt = False test_utils_obj = kpl_test_utils.DistributeKplTestUtils() with distribution.scope(): feature_mapper, label_mapper = test_utils_obj.define_kpls_for_training( use_adapt) model = test_utils_obj.define_model() optimizer = rmsprop.RMSprop(learning_rate=0.1) accuracy = keras.metrics.Accuracy() def dataset_fn(_): return test_utils_obj.dataset_fn(feature_mapper, label_mapper) @tf.function def train_step(iterator): """The step function for one training step.""" def step_fn(inputs): """The computation to run on each replica(GPU).""" features, labels = inputs with tf.GradientTape() as tape: pred = model(features, training=True) loss = keras.losses.binary_crossentropy(labels, pred) loss = tf.nn.compute_average_loss(loss) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(list(zip(grads, model.trainable_variables))) actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64) accuracy.update_state(labels, actual_pred) distribution.run(step_fn, args=(next(iterator),)) distributed_dataset = distribution.distribute_datasets_from_function( dataset_fn) distributed_iterator = iter(distributed_dataset) num_epochs = 4 num_steps = 7 for _ in range(num_epochs): accuracy.reset_state() for _ in range(num_steps): train_step(distributed_iterator) self.assertGreater(accuracy.result().numpy(), 0.5) self.assertEqual(optimizer.iterations.numpy(), num_epochs * num_steps) # Test save/load/serving the trained model. test_utils_obj.test_save_load_serving_model( model, feature_mapper, test_utils_obj.define_reverse_lookup_layer()) if __name__ == "__main__": tf.test.main()
4,881
35.706767
113
py
keras
keras-master/keras/distribute/simple_models.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple functional keras model with one layer.""" import tensorflow.compat.v2 as tf import numpy as np import keras from keras.distribute import model_collection_base from keras.optimizer_v2 import gradient_descent _BATCH_SIZE = 10 def _get_data_for_simple_models(): x_train = tf.constant(np.random.rand(1000, 3), dtype=tf.float32) y_train = tf.constant(np.random.rand(1000, 5), dtype=tf.float32) x_predict = tf.constant( np.random.rand(1000, 3), dtype=tf.float32) return x_train, y_train, x_predict class SimpleFunctionalModel(model_collection_base.ModelAndInput): """A simple functional model and its inputs.""" def get_model(self, **kwargs): output_name = 'output_1' x = keras.layers.Input(shape=(3,), dtype=tf.float32) y = keras.layers.Dense(5, dtype=tf.float32, name=output_name)(x) model = keras.Model(inputs=x, outputs=y) optimizer = gradient_descent.SGD(learning_rate=0.001) model.compile( loss='mse', metrics=['mae'], optimizer=optimizer) return model def get_data(self): return _get_data_for_simple_models() def get_batch_size(self): return _BATCH_SIZE class SimpleSequentialModel(model_collection_base.ModelAndInput): """A simple sequential model and its inputs.""" def get_model(self, **kwargs): output_name = 'output_1' model = keras.Sequential() y = keras.layers.Dense( 5, dtype=tf.float32, name=output_name, input_dim=3) model.add(y) optimizer = gradient_descent.SGD(learning_rate=0.001) model.compile( loss='mse', metrics=['mae'], optimizer=optimizer) return model def get_data(self): return _get_data_for_simple_models() def get_batch_size(self): return _BATCH_SIZE class _SimpleModel(keras.Model): def __init__(self): super(_SimpleModel, self).__init__() self._dense_layer = keras.layers.Dense(5, dtype=tf.float32) def call(self, inputs): return self._dense_layer(inputs) class SimpleSubclassModel(model_collection_base.ModelAndInput): """A simple subclass model and its data.""" def get_model(self, **kwargs): model = _SimpleModel() optimizer = gradient_descent.SGD(learning_rate=0.001) model.compile( loss='mse', metrics=['mae'], cloning=False, optimizer=optimizer) return model def get_data(self): return _get_data_for_simple_models() def get_batch_size(self): return _BATCH_SIZE class _SimpleModule(tf.Module): def __init__(self): self.v = tf.Variable(3.0) @tf.function def __call__(self, x): return self.v * x class SimpleTFModuleModel(model_collection_base.ModelAndInput): """A simple model based on tf.Module and its data.""" def get_model(self, **kwargs): model = _SimpleModule() return model def get_data(self): return _get_data_for_simple_models() def get_batch_size(self): return _BATCH_SIZE
3,631
24.942857
80
py
keras
keras-master/keras/distribute/keras_models_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras high level APIs, e.g. fit, evaluate and predict.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from keras.distribute.strategy_combinations import all_strategies class KerasModelsTest(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=all_strategies, mode=["eager"])) def test_lstm_model_with_dynamic_batch(self, distribution): input_data = np.random.random([1, 32, 64, 64, 3]) input_shape = tuple(input_data.shape[1:]) def build_model(): model = keras.models.Sequential() model.add( keras.layers.ConvLSTM2D( 4, kernel_size=(4, 4), activation="sigmoid", padding="same", input_shape=input_shape)) model.add(keras.layers.GlobalMaxPooling2D()) model.add(keras.layers.Dense(2, activation="sigmoid")) return model with distribution.scope(): model = build_model() model.compile(loss="binary_crossentropy", optimizer="adam") result = model.predict(input_data) self.assertEqual(result.shape, (1, 2)) if __name__ == "__main__": tf.test.main()
1,984
33.824561
80
py
keras
keras-master/keras/distribute/custom_training_loop_optimizer_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for custom training loops that involves advanced optimizer usage.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized from tensorflow.python.distribute import values from keras.distribute import strategy_combinations as keras_strategy_combinations from keras.optimizer_v2 import gradient_descent class OptimizerTest(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( tf.__internal__.test.combinations.combine( distribution=keras_strategy_combinations.multidevice_strategies, mode=["eager"], ), tf.__internal__.test.combinations.combine( experimental_aggregate_gradients=True, expected=[[[-0.3, -0.3], [-0.3, -0.3]]]) + tf.__internal__.test.combinations.combine( experimental_aggregate_gradients=False, expected=[[[-0.1, -0.1], [-0.2, -0.2]]]) )) def test_custom_aggregation(self, distribution, experimental_aggregate_gradients, expected): with distribution.scope(): v = tf.Variable([0., 0.]) optimizer = gradient_descent.SGD(0.1) class PerReplica(values.DistributedValues): """Holds a map from replica to unsynchronized values.""" @property def values(self): """Returns the per replica values.""" return self._values @tf.function def optimize(): with tf.device(distribution.extended.worker_devices[0]): v1 = tf.convert_to_tensor([1., 1.]) with tf.device(distribution.extended.worker_devices[1]): v2 = tf.convert_to_tensor([2., 2.]) grads = PerReplica([v1, v2]) def step_fn(grads): optimizer.apply_gradients( [(grads, v)], experimental_aggregate_gradients=experimental_aggregate_gradients) return v.read_value() return distribution.experimental_local_results( distribution.run(step_fn, args=(grads,))) self.assertAllClose(optimize(), expected) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=tf.__internal__.distribute.combinations.one_device_strategy, mode=["eager"], experimental_aggregate_gradients=[True, False])) def test_custom_aggregation_one_device(self, distribution, experimental_aggregate_gradients): with distribution.scope(): v = tf.Variable([0., 0.]) optimizer = gradient_descent.SGD(0.1) @tf.function def optimize(): grads = tf.convert_to_tensor([1., 1.]) def step_fn(grads): optimizer.apply_gradients( [(grads, v)], experimental_aggregate_gradients=experimental_aggregate_gradients) return v.read_value() return distribution.experimental_local_results( distribution.run(step_fn, args=(grads,))) self.assertAllClose(optimize(), [[-0.1, -0.1]]) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=[ tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu ])) def test_custom_aggregation_central_storage(self, distribution): with distribution.scope(): v = tf.Variable([0., 0.]) optimizer = gradient_descent.SGD(0.1) grads = tf.convert_to_tensor([1., 1.]) def step_fn(grads): with self.assertRaises(NotImplementedError): optimizer.apply_gradients([(grads, v)], experimental_aggregate_gradients=False) return distribution.run(step_fn, args=(grads,)) if __name__ == "__main__": tf.test.main()
4,483
36.057851
91
py
keras
keras-master/keras/distribute/keras_image_model_correctness_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras CNN models using DistributionStrategy.""" import tensorflow.compat.v2 as tf import numpy as np import keras from keras import testing_utils from keras.distribute import keras_correctness_test_base from keras.optimizer_v2 import gradient_descent @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul. Even if Dense layers run in ' 'float64, the test sometimes fails with TensorFloat-32 enabled for unknown ' 'reasons') class DistributionStrategyCnnCorrectnessTest( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, initial_weights=None, distribution=None, input_shapes=None): del input_shapes with keras_correctness_test_base.MaybeDistributionScope(distribution): image = keras.layers.Input(shape=(28, 28, 3), name='image') c1 = keras.layers.Conv2D( name='conv1', filters=16, kernel_size=(3, 3), strides=(4, 4), kernel_regularizer=keras.regularizers.l2(1e-4))( image) if self.with_batch_norm == 'regular': c1 = keras.layers.BatchNormalization(name='bn1')(c1) elif self.with_batch_norm == 'sync': # Test with parallel batch norms to verify all-reduce works OK. bn1 = keras.layers.SyncBatchNormalization(name='bn1')(c1) bn2 = keras.layers.SyncBatchNormalization(name='bn2')(c1) c1 = keras.layers.Add()([bn1, bn2]) c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1) logits = keras.layers.Dense( 10, activation='softmax', name='pred')( keras.layers.Flatten()(c1)) model = keras.Model(inputs=[image], outputs=[logits]) if initial_weights: model.set_weights(initial_weights) model.compile( optimizer=gradient_descent.SGD(learning_rate=0.1), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) return model def _get_data(self, count, shape=(28, 28, 3), num_classes=10): centers = np.random.randn(num_classes, *shape) features = [] labels = [] for _ in range(count): label = np.random.randint(0, num_classes, size=1)[0] offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape)) offset = offset.reshape(shape) labels.append(label) features.append(centers[label] + offset) x = np.asarray(features, dtype=np.float32) y = np.asarray(labels, dtype=np.float32).reshape((count, 1)) return x, y def get_data(self): x_train, y_train = self._get_data( count=keras_correctness_test_base._GLOBAL_BATCH_SIZE * keras_correctness_test_base._EVAL_STEPS) x_predict = x_train return x_train, y_train, x_predict def get_data_with_partial_last_batch_eval(self): x_train, y_train = self._get_data(count=1280) x_eval, y_eval = self._get_data(count=1000) return x_train, y_train, x_eval, y_eval, x_eval @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_cnn_correctness(self, distribution, use_numpy, use_validation_data): if (distribution == tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu): self.skipTest('b/183958183') self.run_correctness_test(distribution, use_numpy, use_validation_data) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm='regular') @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_cnn_with_sync_batch_norm_correctness(self, distribution, use_numpy, use_validation_data): if not tf.executing_eagerly(): self.skipTest('SyncBatchNorm is not enabled in graph mode.') self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm='sync') @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base .all_strategy_and_input_config_combinations_eager() + keras_correctness_test_base.multi_worker_mirrored_eager() + keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()) def test_cnn_correctness_with_partial_last_batch_eval(self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch=True, training_epochs=1) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base. all_strategy_and_input_config_combinations_eager() + keras_correctness_test_base.multi_worker_mirrored_eager() + keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()) def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval( self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm='regular', partial_last_batch=True) if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main()
6,649
39.54878
91
py
keras
keras-master/keras/distribute/distribute_strategy_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.keras models using tf.distribute.Strategy.""" import tensorflow.compat.v2 as tf import os from absl.testing import parameterized import numpy as np import keras from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver from keras import backend from keras import testing_utils from keras.distribute import distributed_training_utils from keras.distribute import distributed_training_utils_v1 from keras.distribute import multi_worker_testing_utils from keras.distribute import optimizer_combinations from keras.distribute.strategy_combinations import all_strategies from keras.distribute.strategy_combinations import multi_worker_mirrored_strategies from keras.distribute.strategy_combinations import strategies_minus_default_minus_tpu from keras.distribute.strategy_combinations import strategies_minus_tpu from keras.distribute.strategy_combinations import tpu_strategies from keras.engine import base_layer_utils from keras.mixed_precision import policy from keras.optimizer_v2 import gradient_descent as gradient_descent_keras from keras.utils import losses_utils from keras.utils import np_utils _RANDOM_SEED = 1337 _TRAIN_SIZE = 200 _INPUT_SIZE = (10,) _NUM_CLASS = 2 # Note: Please make sure the tests in this file are also covered in # keras_backward_compat_test for features that are supported with both APIs. # TODO(anjalisridhar): Add a decorator that will allow us to run these tests as # part of the tf.keras unit tests suite. def simple_sequential_model(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE)) model.add(keras.layers.Dropout(0.1)) model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax')) return model def simple_subclassed_model(num_labels=_NUM_CLASS): class _SimpleMLP(keras.Model): def __init__(self, num_labels): super(_SimpleMLP, self).__init__() self.dense = keras.layers.Dense(num_labels) def call(self, inputs): return self.dense(inputs) return _SimpleMLP(num_labels) def simple_multi_inputs_multi_outputs_model(): input_a = keras.layers.Input(shape=(16,), name='input_a') input_b = keras.layers.Input(shape=(16,), name='input_b') merged = keras.layers.concatenate([input_a, input_b], name='merge') output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged) output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged) model = keras.models.Model( inputs=[input_a, input_b], outputs=[output_c, output_d]) return model def get_multi_inputs_multi_outputs_data(): (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data( train_samples=_TRAIN_SIZE, test_samples=50, input_shape=(16,), num_classes=3, random_seed=_RANDOM_SEED) (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data( train_samples=_TRAIN_SIZE, test_samples=50, input_shape=(16,), num_classes=2, random_seed=_RANDOM_SEED) (m_train, _), (m_test, _) = testing_utils.get_test_data( train_samples=_TRAIN_SIZE, test_samples=50, input_shape=(8,), num_classes=2, random_seed=_RANDOM_SEED) c_train = np_utils.to_categorical(c_train) c_test = np_utils.to_categorical(c_test) d_train = np_utils.to_categorical(d_train) d_test = np_utils.to_categorical(d_test) train_data = { 'input_a': a_train, 'input_b': b_train, 'input_m': m_train, 'output_c': c_train, 'output_d': d_train } test_data = { 'input_a': a_test, 'input_b': b_test, 'input_m': m_test, 'output_c': c_test, 'output_d': d_test } return (train_data, test_data) def batch_wrapper(dataset, batch_size, distribution, repeat=None): if repeat: dataset = dataset.repeat(repeat) # TPUs currently require fully defined input shapes, drop_remainder ensures # the input will have fully defined shapes. if backend.is_tpu_strategy(distribution): return dataset.batch(batch_size, drop_remainder=True) else: return dataset.batch(batch_size) def get_model(): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) return model def get_sample_weights_model(): x = keras.layers.Input(shape=(1,), name='input') y = keras.layers.Dense( 1, kernel_initializer='ones', bias_initializer='zeros', name='dense')( x) model = keras.Model(x, y) return model def get_dataset(distribution): inputs = np.zeros((10, 3), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = batch_wrapper(dataset, 10, distribution) return dataset def get_predict_dataset(distribution): inputs = np.zeros((10, 3), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices(inputs) dataset = dataset.repeat(100) dataset = batch_wrapper(dataset, 10, distribution) return dataset def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None): if targets is not None: input_slices = (inputs, targets) dummy_op = (lambda inp, target: True) else: input_slices = inputs dummy_op = (lambda inp: True) original_dataset = (tf.data.Dataset.from_tensor_slices(input_slices)) ds_with_unknown_cardinality = ( original_dataset.filter(dummy_op).batch(10, drop_remainder=True)) return ds_with_unknown_cardinality def multi_input_output_model(): a = keras.layers.Input(shape=(3,), name='input_a') b = keras.layers.Input(shape=(5,), name='input_b') # TODO(anjalisridhar): Change the output dimension of the second Dense layer # once the iterator output validation issue has been fixed. dense_1 = keras.layers.Dense(7, name='dense_1') dense_2 = keras.layers.Dense(7, name='dense_2') c = dense_1(a) d = dense_2(b) e = keras.layers.Dropout(0.5, name='dropout')(c) model = keras.models.Model([a, b], [d, e]) return model def strategy_minus_tpu_combinations(): return tf.__internal__.test.combinations.combine( distribution=strategies_minus_tpu, mode=['graph', 'eager']) def tpu_strategy_combinations(): return tf.__internal__.test.combinations.combine( distribution=tpu_strategies, mode=['graph', 'eager']) def tpu_strategy_combinations_graph_only(): return tf.__internal__.test.combinations.combine(distribution=tpu_strategies, mode=['graph']) def multi_worker_strategy_combinations_eager_only(): return tf.__internal__.test.combinations.combine( distribution=multi_worker_mirrored_strategies, mode=['eager']) def all_strategy_combinations(): return strategy_minus_tpu_combinations() + tpu_strategy_combinations( ) + multi_worker_strategy_combinations_eager_only() def all_strategy_minus_default_and_tpu_combinations(): return tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.one_device_strategy, tf.__internal__.distribute.combinations.one_device_strategy_gpu, tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, ], mode=['graph', 'eager']) def all_strategy_combinations_minus_default(): return (all_strategy_minus_default_and_tpu_combinations() + tpu_strategy_combinations() + multi_worker_strategy_combinations_eager_only()) def strategy_and_optimizer_combinations(): non_tpu_strategies = tf.__internal__.test.combinations.times( strategy_minus_tpu_combinations(), tf.__internal__.test.combinations.combine( optimizer=[ optimizer_combinations.adagrad_optimizer_v1_fn, optimizer_combinations.adam_optimizer_v1_fn, optimizer_combinations.gradient_descent_optimizer_v1_fn, optimizer_combinations.rmsprop_optimizer_v1_fn, optimizer_combinations.adadelta_optimizer_keras_v2_fn, optimizer_combinations.adagrad_optimizer_keras_v2_fn, optimizer_combinations.adam_optimizer_keras_v2_fn, optimizer_combinations.adamax_optimizer_keras_v2_fn, optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, optimizer_combinations.nadam_optimizer_keras_v2_fn, optimizer_combinations.rmsprop_optimizer_keras_v2_fn, optimizer_combinations.ftrl_optimizer_keras_v2_fn ])) tpu_strategies_graph = tf.__internal__.test.combinations.combine( distribution=tpu_strategies, mode=['graph'], optimizer=[ optimizer_combinations.adagrad_optimizer_v1_fn, optimizer_combinations.adam_optimizer_v1_fn, optimizer_combinations.gradient_descent_optimizer_v1_fn, optimizer_combinations.rmsprop_optimizer_v1_fn, optimizer_combinations.adagrad_optimizer_keras_v2_fn, optimizer_combinations.adam_optimizer_keras_v2_fn, optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, optimizer_combinations.rmsprop_optimizer_keras_v2_fn ]) tpu_strategies_eager = tf.__internal__.test.combinations.combine( distribution=tpu_strategies, mode=['eager'], optimizer=[ optimizer_combinations.adagrad_optimizer_keras_v2_fn, optimizer_combinations.adam_optimizer_keras_v2_fn, optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, optimizer_combinations.rmsprop_optimizer_keras_v2_fn ]) multi_worker_eager = tf.__internal__.test.combinations.combine( distribution=multi_worker_mirrored_strategies, mode=['eager'], optimizer=[ optimizer_combinations.adadelta_optimizer_keras_v2_fn, optimizer_combinations.adagrad_optimizer_keras_v2_fn, optimizer_combinations.adam_optimizer_keras_v2_fn, optimizer_combinations.adamax_optimizer_keras_v2_fn, optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, optimizer_combinations.nadam_optimizer_keras_v2_fn, optimizer_combinations.rmsprop_optimizer_keras_v2_fn, optimizer_combinations.ftrl_optimizer_keras_v2_fn ]) return (non_tpu_strategies + tpu_strategies_eager + tpu_strategies_graph + multi_worker_eager) class BatchCountingCB(keras.callbacks.Callback): def __init__(self): super(BatchCountingCB, self).__init__() self.train_begin_batches = [] self.train_end_batches = [] self.test_begin_batches = [] self.test_end_batches = [] self.predict_begin_batches = [] self.predict_end_batches = [] def on_train_batch_begin(self, batch, logs=None): self.train_begin_batches.append(batch) def on_train_batch_end(self, batch, logs=None): self.train_end_batches.append(batch) def on_test_batch_begin(self, batch, logs=None): self.test_begin_batches.append(batch) def on_test_batch_end(self, batch, logs=None): self.test_end_batches.append(batch) def on_predict_batch_begin(self, batch, logs=None): self.predict_begin_batches.append(batch) def on_predict_batch_end(self, batch, logs=None): self.predict_end_batches.append(batch) class TestDistributionStrategyWithNumpyArrays(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calculating_input_params_no_steps_no_batch_size(self, distribution): # Calculate the per_replica_batch_size scaling factor for strategies # that use per_core_batch_size replica_scale_factor = 1.0 if not distributed_training_utils.global_batch_size_supported(distribution): replica_scale_factor = distribution.num_replicas_in_sync with self.cached_session(): # Default global batch size 32 for input with 64 samples run in 2 steps steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 64, steps=None, batch_size=None) self.assertEqual(batch_size, 32 // replica_scale_factor) self.assertEqual(steps, 2) # Computed global batch size 20 is lower than 32 if we pass less samples. steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 20, steps=None, batch_size=None) self.assertEqual(batch_size, 20 // replica_scale_factor) self.assertEqual(steps, 1) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calculating_input_params_with_steps_no_batch_size( self, distribution): # Calculate the per_replica_batch_size scaling factor for strategies # that use per_core_batch_size replica_scale_factor = 1.0 if not distributed_training_utils.global_batch_size_supported(distribution): replica_scale_factor = distribution.num_replicas_in_sync with self.cached_session(): # Computed global batch size is correct for number of specified 1 step steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 64, steps=1, batch_size=None) self.assertEqual(batch_size, 64 // replica_scale_factor) self.assertEqual(steps, 1) # Computed global batch size is correct for number of specified 2 steps steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 64, steps=2, batch_size=None) self.assertEqual(batch_size, 32 // replica_scale_factor) self.assertEqual(steps, 2) # All samples can not be consumed in specified number of steps with self.assertRaisesRegex(ValueError, 'not divisible by steps'): distributed_training_utils_v1.get_input_params( distribution, 63, steps=2, batch_size=None) # This cases is different for different strategies due to the # difference in supported batch size being global or per-replica. if replica_scale_factor == 1: # Computed global batch size is correct even if not sharadable steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 63, steps=3, batch_size=None) self.assertEqual(batch_size, 21) self.assertEqual(steps, 3) else: # Computed global batch size can not be sharded across replicas with self.assertRaisesRegex( ValueError, 'could not be sharded evenly ' 'across the sync replicas'): distributed_training_utils_v1.get_input_params( distribution, 63, steps=1, batch_size=None) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calculating_input_params_no_steps_with_batch_size( self, distribution): # Calculate the per_replica_batch_size scaling factor for strategies # that use per_core_batch_size replica_scale_factor = 1.0 if not distributed_training_utils.global_batch_size_supported(distribution): replica_scale_factor = distribution.num_replicas_in_sync with self.cached_session(): # Computed steps is correct for specified batch size steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 64, steps=None, batch_size=16) self.assertEqual(batch_size, 16) self.assertEqual(steps, 4 // replica_scale_factor) # Computed steps is correct for specified batch size steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 64, steps=None, batch_size=32) self.assertEqual(batch_size, 32) self.assertEqual(steps, 2 // replica_scale_factor) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calculating_input_params_with_steps_with_batch_size( self, distribution): with self.cached_session(): # No change to steps and batch size if both specified and feasible steps, batch_size = distributed_training_utils_v1.get_input_params( distribution, 64, steps=5, batch_size=3) self.assertEqual(batch_size, 3) self.assertEqual(steps, 5) # Number of samples is less than global batch size * steps with self.assertRaisesRegex(ValueError, 'less than samples required'): distributed_training_utils_v1.get_input_params( distribution, 64, steps=10, batch_size=13) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calling_model_with_numpy_arrays(self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics) inputs = np.zeros((64, 3), dtype=np.float32) targets = np.zeros((64, 4), dtype=np.float32) # Call fit with validation data model.fit( inputs, targets, epochs=1, batch_size=2, verbose=0, validation_data=(inputs, targets)) # TODO(anjalisridhar): We need tests for when the batch size and steps # are smaller and results in a 0 batch_size and steps value. model.evaluate(inputs, targets) model.evaluate(inputs, targets, batch_size=8) model.predict(inputs) model.predict(inputs, batch_size=8) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calling_model_with_mixed_precision(self, distribution): if isinstance(distribution, (tf.compat.v1.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): self.skipTest('b/152097775') if backend.is_tpu_strategy(distribution): policy_name = 'mixed_bfloat16' else: policy_name = 'mixed_float16' with self.cached_session(), \ distribution.scope(), \ policy.policy_scope(policy_name): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) y = keras.layers.Activation('softmax', dtype='float32')(y) model = keras.Model(x, y) loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics) # We need to pass float32 since TPUs do not support float64, even though # these arrays will immediately be casted to bfloat16 on TPUs. We also # cannot pass bfloat16, as Numpy does not support it. inputs = np.zeros((64, 3), dtype='float32') targets = np.zeros((64, 4), dtype='float32') model.fit( inputs, targets, epochs=1, batch_size=2, verbose=0, validation_data=(inputs, targets)) model.evaluate(inputs, targets) model.evaluate(inputs, targets, batch_size=8) model.predict(inputs) model.predict(inputs, batch_size=8) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_operator_overload_mixed_precision(self, distribution): # Regression test that tests a fixed bug does not reoccur. Adding an # AutoCastVariable to a tensor on a TPU, where the variable was the LHS of # the '+' operator, used to cause the gradient w.r.t. the variable to be # None. if isinstance(distribution, (tf.compat.v1.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): self.skipTest('b/152097775') if backend.is_tpu_strategy(distribution): policy_name = 'mixed_bfloat16' else: policy_name = 'mixed_float16' class MyLayer(keras.layers.Layer): def build(self, _): self.v1 = self.add_weight('v', ()) self.v2 = self.add_weight('v', ()) def call(self, inp): inp += self.v1 return self.v2 + inp with self.cached_session(), distribution.scope(): layer = MyLayer(dtype=policy_name) def run_fn(): x = np.array([1.]) with tf.GradientTape() as tape: y = layer(x) grad_v1, grad_v2 = tape.gradient(y, [layer.v1, layer.v2]) return grad_v1, grad_v2 if tf.executing_eagerly(): run_fn = tf.function(run_fn) grad_v1, grad_v2 = distribution.run(run_fn) self.assertIsNotNone(grad_v1) self.assertIsNotNone(grad_v2) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[tf.__internal__.distribute.combinations.one_device_strategy], mode=['graph', 'eager'])) def test_optimizer_in_cross_replica_context_raises_error(self, distribution): with self.cached_session(), distribution.scope(): model = keras.models.Sequential([keras.layers.Dense(1)]) x = np.array([[1.]]) with tf.GradientTape() as tape: y = model(x) gradients = tape.gradient(y, model.trainable_variables) optimizer = gradient_descent_keras.SGD() with self.assertRaisesRegex(RuntimeError, 'cannot be called in cross-replica context'): optimizer.apply_gradients(zip(gradients, model.trainable_variables)) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calling_model_with_nested_numpy_arrays(self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) model = multi_input_output_model() loss = 'mse' model.compile( optimizer, loss) input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32) input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32) inputs = [input_a_np, input_b_np] output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32) output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32) targets = [output_d_np, output_e_np] # Call fit with validation data model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0) # TODO(anjalisridhar): We need tests for when the batch size and steps are # smaller and results in a 0 batch_size and steps value. model.evaluate(inputs, targets) model.evaluate(inputs, targets, batch_size=8) model.predict(inputs) model.predict(inputs, batch_size=8) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategies_minus_tpu, mode=['graph', 'eager']) + tf.__internal__.test.combinations.combine( distribution=multi_worker_mirrored_strategies, mode=['eager'])) def test_numpy_with_sample_weights(self, distribution): with self.cached_session(), distribution.scope(): model = get_sample_weights_model() optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile( optimizer, loss) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) result = model.evaluate( inputs, targets, batch_size=2, sample_weight=sample_weights, verbose=1) # The per sample loss is multiplied by the corresponding sample weight. # The average of these weighted losses is the return value of the # `evaluate` call. For example, in the test above the average weighted # loss is calculated in the following manner: # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75 # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5 # final result = (batch_1 + batch_2) / 2 = 10.625. # The first time we divide by number of input samples and the second time # we divide by number of steps/batches that the loss is aggregated over. self.assertAllClose(result, 10.625) # We now test without passing sample_weights: # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5 # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5 # final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5 result = model.evaluate(inputs, targets, batch_size=2, verbose=1) self.assertAllClose(result, 13.5) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_flatten_predict_outputs(self, distribution): with self.cached_session(): with distribution.scope(): model = multi_input_output_model() optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) loss = 'mse' model.compile( optimizer, loss) # We take 6 input samples with each input having a dimension of 3 or 5. input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32) input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32) inputs = [input_a_np, input_b_np] outs = model.predict(inputs) # `predict` a list that is equal in length to the number of model outputs. # In this test our model has two outputs and each element of `outs` # corresponds to all the samples of one of the model outputs. self.assertLen(outs, 2) # Each of the output samples have a dimension of 7. We should process all # the available input samples(6). self.assertAllEqual([6, 7], outs[0].shape) self.assertAllEqual([6, 7], outs[1].shape) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(tpu_strategy_combinations_graph_only(), tf.__internal__.test.combinations.combine(batch_size=[4, 6]))) def test_evaluate_with_partial_batch(self, distribution, batch_size): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile(optimizer, loss, metrics=metrics) cpu_model = get_model() cpu_model.compile(optimizer, loss, metrics=metrics) x = np.random.random((10, 3)).astype('float32') y = np.random.random((10, 4)).astype('float32') # As sample size is 10, we batch by 4 so that the last batch is # a partial batch. Also `evaluate()` using numpy array as inputs without # distribution strategy uses entire sample as a single batch. As so, # we remove parameters `batch_size` and `steps`. cpu_model.set_weights(model_with_ds_strategy.get_weights()) evaluate_ground_truth = cpu_model.evaluate(x, y) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. steps = np.ceil(10.0 / batch_size) self.assertAllClose( model_with_ds_strategy.evaluate( x, y, batch_size=batch_size, steps=steps)[1:], evaluate_ground_truth[1:], atol=1e-5, rtol=1e-5) # Test that `steps` is inferred correctly when final partial batch exists. self.assertAllClose( model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[1:], evaluate_ground_truth[1:], atol=1e-5, rtol=1e-5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( tpu_strategy_combinations_graph_only())) def test_predict_with_partial_batch(self, distribution): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile( optimizer, loss) cpu_model = get_model() cpu_model.compile(optimizer, loss) inputs = np.random.random((10, 3)).astype(np.float32) # As sample size is 10, we batch by 4 so that the last batch is # a partial batch. Also `predict()` using numpy array as inputs without # distribution strategy uses entire sample as a single batch. As so, # we remove parameters `batch_size` and `steps`. cpu_model.set_weights(model_with_ds_strategy.get_weights()) predict_ground_truth = cpu_model.predict(inputs) self.assertAllClose( model_with_ds_strategy.predict(inputs, batch_size=4, steps=3), predict_ground_truth, atol=1e-5, rtol=1e-5) # Test that `steps` is inferred correctly when final partial batch exists. self.assertAllClose( model_with_ds_strategy.predict(inputs, batch_size=4), predict_ground_truth, atol=1e-5, rtol=1e-5) @tf.__internal__.distribute.combinations.generate(tpu_strategy_combinations_graph_only()) def test_no_target_model(self, distribution): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) class MyLayer(keras.layers.Layer): def call(self, inputs, training=None): self.add_loss(tf.reduce_sum(inputs), inputs=True) return inputs with distribution.scope(): model = keras.models.Sequential() model.add( keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE)) model.add(MyLayer()) model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax')) model.compile(optimizer) inputs = np.zeros((20, 10), np.float32) model.fit(inputs, epochs=1, steps_per_epoch=2) model.predict(inputs, steps=1) model.evaluate(inputs, steps=1) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( tpu_strategy_combinations_graph_only())) def test_predict_multi_output_model_with_partial_batch( self, distribution): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = simple_multi_inputs_multi_outputs_model() model_with_ds_strategy.compile( optimizer, loss) cpu_model = simple_multi_inputs_multi_outputs_model() cpu_model.compile(optimizer, loss) input_data, _ = get_multi_inputs_multi_outputs_data() input_dict = { 'input_a': input_data['input_a'], 'input_b': input_data['input_b'], } # As sample size is 200, we batch by 18 so that the last batch is # a partial batch. Also `fit()` using numpy array as inputs without # distribution strategy uses entire sample as a single batch. As so, # we remove parameters `batch_size` and `steps`. cpu_model.set_weights(model_with_ds_strategy.get_weights()) self.assertAllClose( model_with_ds_strategy.predict(input_dict, batch_size=18, steps=12), cpu_model.predict(input_dict), atol=1e-4, rtol=1e-4) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_gradients_are_none(self, distribution): if not tf.executing_eagerly(): self.skipTest('None gradients are not supported in graph mode') class DenseWithExtraWeight(keras.layers.Dense): def build(self, input_shape): # Gradients w.r.t. extra_weights are None self.extra_weight_1 = self.add_weight('extra_weight_1', shape=(), initializer='ones') super(DenseWithExtraWeight, self).build(input_shape) self.extra_weight_2 = self.add_weight('extra_weight_2', shape=(), initializer='ones') with distribution.scope(): model = keras.Sequential([DenseWithExtraWeight(4, input_shape=(4,))]) model.compile('adam', 'mse') inputs = np.random.normal(size=(64, 4)) targets = np.random.normal(size=(64, 4)) old_kernel = model.get_weights()[1] model.fit(inputs, targets) new_kernel = model.get_weights()[1] self.assertNotAllEqual(old_kernel, new_kernel) class TestDistributionStrategyWithDatasets(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_calling_model_on_same_dataset(self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics) dataset = get_dataset(distribution) # Call fit with validation data model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) model.predict(get_predict_dataset(distribution), steps=2) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_model_interleaved_eval_same_as_direct_eval( self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD user_controlled_model = get_model() user_controlled_model.compile( optimizer_fn(0.001), loss='mse', metrics=['mae', keras.metrics.CategoricalAccuracy()]) interleaved_model = get_model() interleaved_model.set_weights(user_controlled_model.get_weights()) interleaved_model.compile( optimizer_fn(0.001), loss='mse', metrics=['mae', keras.metrics.CategoricalAccuracy()]) dataset = get_dataset(distribution) # Call fit with validation interleaved interleaved_output = interleaved_model.fit( dataset, epochs=2, steps_per_epoch=2, verbose=1, validation_data=dataset, validation_steps=2, shuffle=False) # Manually control the validation running after each epoch. user_controlled_output = [] for _ in range(2): user_controlled_model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False) user_controlled_output.append( user_controlled_model.evaluate(dataset, steps=2)) self.assertEqual(interleaved_output.history['val_loss'], [x[0] for x in user_controlled_output]) val_mean_absolute_error = interleaved_output.history.get( 'val_mean_absolute_error') if not val_mean_absolute_error: # The name of the metric changed in TF2.0 val_mean_absolute_error = interleaved_output.history['val_mae'] self.assertEqual(val_mean_absolute_error, [x[1] for x in user_controlled_output]) self.assertEqual(interleaved_output.history['val_categorical_accuracy'], [x[2] for x in user_controlled_output]) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) model = multi_input_output_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics) input_a_np = np.random.random((10, 3)).astype('float32') input_b_np = np.random.random((10, 5)).astype('float32') output_d_np = np.random.random((10, 7)).astype('float32') output_e_np = np.random.random((10, 7)).astype('float32') # Test with tuples dataset_tuple = tf.data.Dataset.from_tensor_slices( ((input_a_np, input_b_np), (output_d_np, output_e_np))) dataset_tuple = dataset_tuple.repeat(100) dataset_tuple = dataset_tuple.batch(10) model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1) # Test with dict dataset_dict = tf.data.Dataset.from_tensor_slices(({ 'input_a': input_a_np, 'input_b': input_b_np }, (output_d_np, output_e_np))) dataset_dict = dataset_dict.repeat(100) dataset_dict = dataset_dict.batch(10) model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_fit_with_dictionary_in_the_dataset_b135161171( self, distribution): if backend.is_tpu_strategy(distribution): self.skipTest('b/142805125') def custom_loss(predict, label, weight): bce = keras.losses.binary_crossentropy(label, predict) return tf.reduce_mean(bce * weight) with self.cached_session(): with distribution.scope(): input_img = keras.layers.Input([64, 64, 3], name='img') input_lbl = keras.layers.Input([64, 64, 1], name='lbl') input_weight = keras.layers.Input([64, 64], name='weight') predict = keras.layers.Conv2D(2, [1, 1], padding='same')(input_img) loss_lambda = keras.layers.Lambda( lambda x: custom_loss(*x), name='my_loss') my_loss = loss_lambda([predict, input_lbl, input_weight]) model = keras.models.Model( inputs=[input_img, input_lbl, input_weight], outputs=[predict, my_loss]) model.add_loss(model.get_layer('my_loss').output) model.compile( optimizer='adam') if tf.executing_eagerly(): def map_fn(img, lbl, weight): inputs = {'img': img, 'lbl': lbl, 'weight': weight} return (inputs,) else: def map_fn(img, lbl, weight): inputs = {'img': img, 'lbl': lbl, 'weight': weight} return inputs, {} fake_imgs = np.ones([50, 64, 64, 3], dtype=np.float32) fake_lbls = np.ones([50, 64, 64, 1], dtype=np.float32) fake_weights = np.ones([50, 64, 64], dtype=np.float32) data = tf.data.Dataset.from_tensor_slices( (fake_imgs, fake_lbls, fake_weights)).map(map_fn).batch(10) model.fit(data) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_fit_eval_and_predict_methods_on_dataset_without_steps( self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics) inputs = np.zeros((1000, 3), dtype=np.float32) targets = np.zeros((1000, 4), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. fit_with_numpy = model.fit( inputs, targets, epochs=1, batch_size=10).history eval_with_numpy = model.evaluate(inputs, targets, batch_size=10) predict_with_numpy = model.predict(inputs, batch_size=10) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.batch(10, drop_remainder=True) fit_with_ds = model.fit(dataset, epochs=1).history eval_with_ds = model.evaluate(dataset) predict_dataset = tf.data.Dataset.from_tensor_slices(inputs) predict_dataset = predict_dataset.batch(10, drop_remainder=True) predict_with_ds = model.predict(predict_dataset) self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_predict_on_dataset_with_unknown_cardinality_without_steps( self, distribution, mode): if mode == 'graph' and backend.is_tpu_strategy(distribution): self.skipTest('partial batch not supported with TPU in graph mode.') with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((20, 3), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. predict_with_numpy = model.predict(inputs, batch_size=10) predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) predict_with_ds = model.predict(predict_dataset) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_on_dataset_with_unknown_cardinality_without_steps( self, distribution, mode): # TODO(b/155867206): Investigate why this test occasionally segfaults on TPU # in eager mode. if mode == 'eager' and backend.is_tpu_strategy(distribution): self.skipTest('caused segfault with TPU in eager mode.') if mode == 'graph' and backend.is_tpu_strategy(distribution): self.skipTest('partial batch not supported with TPU in graph mode.') with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics) inputs = np.zeros((100, 3), dtype=np.float32) targets = np.zeros((100, 4), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. fit_with_numpy = model.fit( inputs, targets, epochs=1, batch_size=10).history fit_with_numpy_multiple_epochs = model.fit( inputs, targets, epochs=2, batch_size=10).history eval_with_numpy = model.evaluate(inputs, targets, batch_size=10) predict_with_numpy = model.predict(inputs, batch_size=10) dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs, targets) predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) eval_with_ds = model.evaluate(dataset) predict_with_ds = model.predict(predict_dataset) self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) fit_with_ds = model.fit(dataset, epochs=1).history fit_with_ds_multiple_epochs = model.fit(dataset, epochs=2).history self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( fit_with_numpy_multiple_epochs, fit_with_ds_multiple_epochs, atol=1e-4, rtol=1e-4) @tf.__internal__.distribute.combinations.generate(tpu_strategy_combinations_graph_only()) def test_on_dataset_with_unknown_cardinality(self, distribution): with self.cached_session(): with distribution.scope(): model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( tf.compat.v1.train.GradientDescentOptimizer(0.001), loss, metrics=metrics) inputs = np.zeros((1000, 3), dtype=np.float32) targets = np.zeros((1000, 4), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. eval_with_numpy = model.evaluate(inputs, targets, batch_size=10) predict_with_numpy = model.predict(inputs, batch_size=10) dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs, targets) predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) self.assertEqual( keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)), tf.data.experimental.UNKNOWN_CARDINALITY) eval_with_ds = model.evaluate(dataset, steps=100) predict_with_ds = model.predict(predict_dataset, steps=100) self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) with self.assertRaisesRegex(ValueError, 'Number of steps could not be inferred'): model.fit(dataset, epochs=1) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_fit_eval_and_predict_methods_on_dataset( self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics) dataset = get_dataset(distribution) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1) model.predict(get_predict_dataset(distribution), steps=2) @tf.__internal__.distribute.combinations.generate(strategy_and_optimizer_combinations()) def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer): with self.cached_session(): with distribution.scope(): model = get_model() loss = 'mse' model.compile( optimizer(), loss) dataset = get_dataset(distribution) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1) model.predict(get_predict_dataset(distribution), steps=2) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, tf.__internal__.distribute.combinations.one_device_strategy ], mode=['graph', 'eager'])) def test_dataset_wrong_input_shape(self, distribution, mode): if mode == 'graph': self.skipTest( 'TODO(b/120943676, b/120957836): Re-enable for graph once the ' 'validation code is restored.') with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) model = get_model() loss = 'mse' model.compile( optimizer, loss) # Wrong input shape inputs = np.zeros((10, 5), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) with self.assertRaisesRegex(ValueError, 'is incompatible with'): model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu ], mode=['graph', 'eager'])) def test_dataset_external_batch_input_validation( self, distribution): with self.cached_session(): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) model = get_model() loss = 'mse' model.compile( optimizer, loss) # Batching is done outside tf.data's `batch` inputs = np.zeros((100, 10, 3), dtype=np.float32) targets = np.zeros((100, 10, 4), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus ], mode=['graph', 'eager'])) def test_learning_phase_value(self, distribution): # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare # meaningful values. Currently we don't pass the learning phase if the # Lambda layer uses the learning phase. with self.cached_session(): with distribution.scope(): x = keras.layers.Input(shape=(1,), name='input') y = keras.layers.Dense(1, kernel_initializer='ones')(x) z = keras.layers.Dropout(0.9999)(y) model = keras.Model(x, z) initial_weights = model.get_weights() optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.005) loss = 'mse' metrics = ['acc'] model.compile( optimizer, loss, metrics=metrics) batch_size = 8 if isinstance(distribution, (tf.distribute.MirroredStrategy, tf.compat.v1.distribute.MirroredStrategy)): # MirroredStrategy uses global batch size. batch_size = 8 * distribution.num_replicas_in_sync inputs = np.ones((10, 1), dtype=np.float32) targets = np.ones((10, 1), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat().batch(batch_size) hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1) self.assertAlmostEqual(hist.history['acc'][0], 0, 0) with distribution.scope(): model.set_weights(initial_weights) # TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185. # evaluate_output = model.evaluate(dataset, steps=20) # self.assertAlmostEqual(evaluate_output[1], 1, 0) inputs = np.ones((10, 1), dtype=np.float32) predict_dataset = tf.data.Dataset.from_tensor_slices(inputs) predict_dataset = predict_dataset.repeat().batch(batch_size) output = model.predict(predict_dataset, steps=10) # `predict` runs for 10 steps ref_output = np.ones((160, 1), dtype=np.float32) self.assertArrayNear(output, ref_output, 1e-1) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def testOptimizerWithCallbacks(self, distribution): with self.cached_session(): with distribution.scope(): model = get_model() optimizer = gradient_descent_keras.SGD(0.01) loss = 'mse' model.compile( optimizer, loss) dataset = get_dataset(distribution) def schedule(_): return 0.001 model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, callbacks=[keras.callbacks.LearningRateScheduler(schedule)]) self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr)) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(tpu_strategy_combinations_graph_only(), tf.__internal__.test.combinations.combine(batch_size=[4, 6]))) def test_evaluate_with_dataset_with_partial_batch(self, distribution, batch_size): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile(optimizer, loss, metrics=metrics) cpu_model = get_model() cpu_model.compile(optimizer, loss, metrics=metrics) x = np.random.random((10, 3)).astype('float32') y = np.random.random((10, 4)).astype('float32') dataset = tf.data.Dataset.from_tensor_slices((x, y)) # As sample size is 10, we make the last batch a partial batch. cpu_model.set_weights(model_with_ds_strategy.get_weights()) dataset_with_partial_batch = dataset.batch(batch_size) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. steps = np.ceil(10.0 / batch_size) self.assertAllClose( model_with_ds_strategy.evaluate( dataset_with_partial_batch, steps=steps)[1:], cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:], atol=1e-5, rtol=1e-5) self.assertAllClose( model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:], cpu_model.evaluate(dataset_with_partial_batch)[1:], atol=1e-5, rtol=1e-5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( tpu_strategy_combinations_graph_only())) def test_predict_with_dataset_with_partial_batch( self, distribution): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile( optimizer, loss) cpu_model = get_model() cpu_model.compile(optimizer, loss) inputs = np.random.random((10, 3)).astype(np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs)) # As sample size is 10, we batch by 4 so that the last batch is # a partial batch. dataset_with_partial_batch = dataset.batch(4) cpu_model.set_weights(model_with_ds_strategy.get_weights()) self.assertAllClose( model_with_ds_strategy.predict(dataset_with_partial_batch, steps=3), cpu_model.predict(dataset_with_partial_batch, steps=3), atol=1e-5, rtol=1e-5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( tpu_strategy_combinations_graph_only())) def test_predict_multi_output_model_with_dataset_with_partial_batch( self, distribution): with self.cached_session(): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = simple_multi_inputs_multi_outputs_model() model_with_ds_strategy.compile( optimizer, loss) cpu_model = simple_multi_inputs_multi_outputs_model() cpu_model.compile(optimizer, loss) input_data, _ = get_multi_inputs_multi_outputs_data() input_dict = { 'input_a': input_data['input_a'], 'input_b': input_data['input_b'], } dataset = tf.data.Dataset.from_tensor_slices(input_dict) # As sample size is 200, we batch by 18 using 12 steps per epoch so # that the last batch is a partial batch. dataset_with_partial_batch = dataset.batch(18) cpu_model.set_weights(model_with_ds_strategy.get_weights()) self.assertAllClose( model_with_ds_strategy.predict(dataset_with_partial_batch, steps=12), cpu_model.predict(dataset_with_partial_batch, steps=12), atol=1e-4, rtol=1e-4) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations_minus_default()) def test_match_model_input_matches_with_dataset_tensors(self, distribution): def _create_model_input_output_tensors(): input_a = keras.layers.Input(shape=(16,), name='z_input_sorted_last') input_b = keras.layers.Input(shape=(32,), name='a_input_sorted_first') intermediate_a = keras.layers.Dense(10)(input_a) intermediate_b = keras.layers.Dense(10)(input_b) merged = keras.layers.Add()([intermediate_a, intermediate_b]) output = keras.layers.Dense(2)(merged) return input_a, input_b, output input_dict = { 'z_input_sorted_last': np.random.rand(32, 16).astype(np.float32), 'a_input_sorted_first': np.random.rand(32, 32).astype(np.float32) } target = np.ones((32, 2), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((input_dict, target)) dataset = dataset.batch(4, drop_remainder=True) with self.cached_session(): with distribution.scope(): input_a, input_b, output = _create_model_input_output_tensors() # `input_a`, which has input name that comes last in alphanumeric # order, is the first input of the model input layers. If tensors # from `input_dict` is blindly flattened and passed to model # inputs incorrectly, this would result in `input_a` input layer # matching with tensor `a_input_sorted_first` and would result in # shape mismatch. model_with_array_input = keras.models.Model( inputs=[input_a, input_b], outputs=output) model_with_array_input.compile('sgd', 'mse') model_weights = model_with_array_input.get_weights() model_with_array_input_fit = model_with_array_input.fit( dataset, steps_per_epoch=1, epochs=1).history input_a, input_b, output = _create_model_input_output_tensors() model_with_dict_input = keras.models.Model( inputs={ 'z_input_sorted_last': input_a, 'a_input_sorted_first': input_b, }, outputs=output) model_with_dict_input.compile('sgd', 'mse') model_with_dict_input.set_weights(model_weights) model_with_dict_input_fit = model_with_dict_input.fit( dataset, steps_per_epoch=1, epochs=1).history self.assertAllClose( model_with_dict_input_fit, model_with_array_input_fit, atol=1e-4, rtol=1e-4) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategies_minus_tpu, mode=['graph', 'eager']) + tf.__internal__.test.combinations.combine( distribution=multi_worker_mirrored_strategies, mode=['eager'])) def test_dataset_with_sample_weights(self, distribution): with self.cached_session(), distribution.scope(): model = get_sample_weights_model() optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile( optimizer, loss) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) ds = tf.data.Dataset.from_tensor_slices( (inputs, targets, sample_weights)).batch(2) result = model.evaluate(ds, verbose=1) # The per sample loss is multiplied by the corresponding sample weight. # The average of these weighted losses is the return value of the # `evaluate` call. For example, in the test above the average weighted # loss is calculated in the following manner: # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75 # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5 # final result = (batch_1 + batch_2) / 2 = 10.625. # The first time we divide by number of input samples and the second time # we divide by number of steps/batches that the loss is aggregated over. self.assertAllClose(result, 10.625) # We now test without passing sample_weights: # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5 # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5 # final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5 ds = tf.data.Dataset.from_tensor_slices((inputs, targets)).batch(2) result = model.evaluate(ds, verbose=1) self.assertAllClose(result, 13.5) class TestDistributionStrategyWithDatasetsFile(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(TestDistributionStrategyWithDatasetsFile, self).setUp() self.input_file_name = os.path.join(self.get_temp_dir(), 'input.tfrecord') inputs = np.zeros((20, 3), dtype=np.float32) input_dataset = tf.data.Dataset.from_tensor_slices(inputs) input_dataset = input_dataset.map(tf.io.serialize_tensor) writer = tf.data.experimental.TFRecordWriter(self.input_file_name) writer.write(input_dataset) # TODO(wxinyi): add a multi-worker test for TPU @tf.__internal__.distribute.combinations.generate(multi_worker_strategy_combinations_eager_only()) def test_predict_on_dataset_shard_options_file_multi_worker_mirrored( self, distribution, mode): # This test is to verify if we successfully switch auto_shard_policy of a # input dataset inside model.predict with MultiWorkerMirroredStrategy to # AutoShardPolicy.DATA. Since there is only one input file for multiple # workers, AutoShardPolicy.AUTO or AutoShardPolicy.FILE will lead to an # error. However, since we switch to AutoShardPolicy.DATA in model.predict, # no error is raised. del mode with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' model.compile(optimizer, loss) dataset = tf.data.TFRecordDataset(self.input_file_name) dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.float32)) dummy_op = lambda inp: True dataset = dataset.filter(dummy_op).batch(8, drop_remainder=True) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = \ tf.data.experimental.AutoShardPolicy.FILE dataset = dataset.with_options(options) model.predict(dataset, steps=1) class TestRegularizerLoss(tf.test.TestCase, parameterized.TestCase): class IdentityRegularizer(keras.regularizers.Regularizer): def __call__(self, x): return tf.identity(x) class AddLayer(keras.layers.Layer): def build(self, _): self.v = self.add_weight( 'v', (), initializer='ones', regularizer=TestRegularizerLoss.IdentityRegularizer()) def call(self, inputs): return inputs + self.v @staticmethod def loss_fn(_, y_pred): return tf.reduce_mean(y_pred) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times(all_strategy_combinations_minus_default())) def test_regularizer_loss(self, distribution): batch_size = 2 if not distributed_training_utils.global_batch_size_supported(distribution): batch_size //= distribution.num_replicas_in_sync # Given an input x, which is always 1, and variable v, this model computes # Loss=x+v+regularizer_loss, where regularizer_loss=v and the variable is # initialized to 1. Therefore, this model computes Loss=1+2v, and so the # gradient dLoss/dv = 2. This gradient of 2 is averaged over all examples # in a batch and then multiplied by the learning rate of 1. As a result, # the model update for one batch should subtract 2 from v, resulting in v # being -1. If the regularizer loss is not scaled correctly by number of # replicas, the variable value will be incorrect when number of replicas # >1. For e.g. it will be -2 if num replicas = 2. with distribution.scope(): x = keras.layers.Input(shape=(1,), batch_size=batch_size) y = TestRegularizerLoss.AddLayer()(x) model = keras.models.Model(inputs=x, outputs=y) opt = gradient_descent_keras.SGD(1.) model.compile( opt, loss=TestRegularizerLoss.loss_fn) model.fit( x=np.array([[1.], [1.]], dtype=np.float32), y=np.array([[1.], [1.]], dtype=np.float32), batch_size=batch_size) v = model.get_weights()[0] self.assertEqual(-1.0, v) @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class TestDistributionStrategyWithKerasModels(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_distribution_strategy_on_sequential_model( self, distribution): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) model = simple_sequential_model() loss = 'mse' model.compile( optimizer, loss) inputs = np.zeros((20, 10), np.float32) targets = np.zeros((20, 2), np.float32) model.fit(inputs, targets, epochs=1, batch_size=10) model.predict(inputs, batch_size=10) model.evaluate(inputs, targets, batch_size=10) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations()) def test_distribution_strategy_on_functional_model( self, distribution): with distribution.scope(): optimizer_fn = gradient_descent_keras.SGD optimizer = optimizer_fn(learning_rate=0.001) model = get_model() loss = 'mse' model.compile( optimizer, loss) inputs = np.zeros((64, 3), dtype=np.float32) targets = np.zeros((64, 4), dtype=np.float32) model.fit(inputs, targets, epochs=1) model.predict(inputs) model.evaluate(inputs, targets) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_distributed_dataset(self, distribution): with distribution.scope(): class CBCounter(keras.callbacks.Callback): def __init__(self): self.epochs = 0 self.train_batches = 0 self.test_batches = 0 def on_epoch_end(self, batch, logs=None): self.epochs += 1 def on_train_batch_end(self, batch, logs=None): self.train_batches += 1 def on_test_batch_end(self, batch, logs=None): self.test_batches += 1 model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') cb_counter = CBCounter() x, y = np.ones((100, 10)), np.ones((100, 1)) ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.batch(10).repeat(2) ds = distribution.experimental_distribute_dataset(ds) val_ds = tf.data.Dataset.from_tensor_slices((x, y)) val_ds = val_ds.batch(20) val_ds = distribution.experimental_distribute_dataset(val_ds) model.fit( ds, steps_per_epoch=10, validation_data=val_ds, validation_steps=5, epochs=2, callbacks=[cb_counter]) self.assertEqual(cb_counter.train_batches, 20) self.assertEqual(cb_counter.test_batches, 10) self.assertEqual(cb_counter.epochs, 2) # Check for `steps_per_epoch`. if distribution.num_replicas_in_sync > 1: with self.assertRaisesRegex(ValueError, 'distributed dataset, you must specify'): model.fit(ds, epochs=2) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_distributed_datasets_from_function(self, distribution): with distribution.scope(): class CBCounter(keras.callbacks.Callback): def __init__(self): self.epochs = 0 self.train_batches = 0 self.test_batches = 0 def on_epoch_end(self, batch, logs=None): self.epochs += 1 def on_train_batch_end(self, batch, logs=None): self.train_batches += 1 def on_test_batch_end(self, batch, logs=None): self.test_batches += 1 model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') cb_counter = CBCounter() def make_dataset(_): x, y = np.ones((100, 10)), np.ones((100, 1)) ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.batch(5).repeat() return ds ds = distribution.distribute_datasets_from_function(make_dataset) val_ds = distribution.distribute_datasets_from_function(make_dataset) model.fit( ds, steps_per_epoch=10, validation_data=val_ds, validation_steps=5, epochs=2, callbacks=[cb_counter]) self.assertEqual(cb_counter.train_batches, 20) self.assertEqual(cb_counter.test_batches, 10) self.assertEqual(cb_counter.epochs, 2) # Check for `steps_per_epoch`. if distribution.num_replicas_in_sync > 1: with self.assertRaisesRegex(ValueError, 'distributed dataset, you must specify'): model.fit(ds, epochs=2) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_host_training_loop(self, distribution): if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy): self.skipTest('b/172032817') with distribution.scope(): inputs = keras.Input((10, 10, 3)) x = keras.layers.Conv2D(3, kernel_size=3)(inputs) x = keras.layers.Flatten()(x) outputs = keras.layers.Dense(1)(x) model = keras.Model(inputs, outputs) model.compile('sgd', 'mse', steps_per_execution=10) bc = BatchCountingCB() x, y = np.ones((100, 10, 10, 3)), np.ones((100, 1)) model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc]) self.assertEqual(bc.train_begin_batches, [0, 10, 20, 30, 40]) self.assertEqual(bc.train_end_batches, [9, 19, 29, 39, 49]) model.evaluate(x, y, batch_size=2, callbacks=[bc]) self.assertEqual(bc.test_begin_batches, [0, 10, 20, 30, 40]) self.assertEqual(bc.test_end_batches, [9, 19, 29, 39, 49]) model.predict(x, batch_size=2, callbacks=[bc]) self.assertEqual(bc.predict_begin_batches, [0, 10, 20, 30, 40]) self.assertEqual(bc.predict_end_batches, [9, 19, 29, 39, 49]) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_host_training_loop_last_partial_execution(self, distribution): if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy): self.skipTest('b/172032817') with distribution.scope(): inputs = keras.Input(10) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) model.compile('sgd', 'mse', steps_per_execution=20) bc = BatchCountingCB() x, y = np.ones((100, 10)), np.ones((100, 1)) model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc]) self.assertEqual(bc.train_begin_batches, [0, 20, 40]) self.assertEqual(bc.train_end_batches, [19, 39, 49]) model.evaluate(x, y, batch_size=2, callbacks=[bc]) self.assertEqual(bc.test_begin_batches, [0, 20, 40]) self.assertEqual(bc.test_end_batches, [19, 39, 49]) model.predict(x, batch_size=2, callbacks=[bc]) self.assertEqual(bc.predict_begin_batches, [0, 20, 40]) self.assertEqual(bc.predict_end_batches, [19, 39, 49]) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_host_training_loop_dataset_unknown_size(self, distribution): if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy): self.skipTest('b/172032817') with distribution.scope(): inputs = keras.Input(10) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) model.compile('sgd', 'mse', steps_per_execution=20) x, y = np.ones((100, 10)), np.ones((100, 1)) ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(2) ds = ds.filter(lambda *args, **kwargs: True) # Makes the size UNKNOWN. bc = BatchCountingCB() with self.assertRaisesRegex(ValueError, 'steps_per_execution'): model.fit(ds, epochs=2, callbacks=[bc]) train_ds = ds.repeat(2) model.fit(train_ds, steps_per_epoch=50, epochs=2, callbacks=[bc]) self.assertEqual(bc.train_begin_batches, [0, 20, 40, 0, 20, 40]) self.assertEqual(bc.train_end_batches, [19, 39, 49, 19, 39, 49]) with self.assertRaisesRegex(ValueError, 'steps_per_execution'): model.evaluate(ds, callbacks=[bc]) test_ds = ds.repeat(2) model.evaluate(test_ds, steps=50, callbacks=[bc]) self.assertEqual(bc.test_begin_batches, [0, 20, 40]) self.assertEqual(bc.test_end_batches, [19, 39, 49]) predict_ds = ds.repeat(2) model.predict(predict_ds, steps=50, callbacks=[bc]) self.assertEqual(bc.predict_begin_batches, [0, 20, 40]) self.assertEqual(bc.predict_end_batches, [19, 39, 49]) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_host_training_loop_truncate_to_epoch(self, distribution): if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy): self.skipTest('b/172032817') with distribution.scope(): inputs = keras.Input(10) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) model.compile('sgd', 'mse', steps_per_execution=500) x, y = np.ones((100, 10)), np.ones((100, 1)) bc = BatchCountingCB() model.fit(x, y, batch_size=2, epochs=2, callbacks=[bc]) self.assertEqual(bc.train_begin_batches, [0, 0]) self.assertEqual(bc.train_end_batches, [49, 49]) x, y = np.ones((50, 10)), np.ones((50, 1)) model.evaluate(x, y, batch_size=2, callbacks=[bc]) self.assertEqual(bc.test_begin_batches, [0]) self.assertEqual(bc.test_end_batches, [24]) x = np.ones((50, 10)) model.predict(x, batch_size=2, callbacks=[bc]) self.assertEqual(bc.predict_begin_batches, [0]) self.assertEqual(bc.predict_end_batches, [24]) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_gradient_clipping(self, distribution): class MyLayer(keras.layers.Layer): def build(self, _): self.v1 = tf.Variable(1.) self.v2 = tf.Variable(1.) def call(self, x): return 3 * self.v1 - 3 * self.v2 x, y = np.ones((10, 1)), np.ones((10, 1)) with distribution.scope(): layer = MyLayer() model = keras.Sequential([layer]) optimizer = gradient_descent_keras.SGD(1., clipnorm=2., clipvalue=2.) model.compile(optimizer, 'mae') if isinstance(distribution, (tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): with self.assertRaisesRegex(ValueError, 'not supported'): model.fit(x, y, batch_size=10, epochs=1) else: model.fit(x, y, batch_size=10, epochs=1) self.assertAllClose(self.evaluate(layer.v1), 3.) self.assertAllClose(self.evaluate(layer.v2), -1.) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_custom_gradient_transformation(self, distribution): if isinstance(distribution, (tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy)): self.skipTest('Not supported with `CentralStorageStrategy`') class MyLayer(keras.layers.Layer): def build(self, _): self.v1 = tf.Variable(1.) self.v2 = tf.Variable(-1.) def call(self, x): return x + self.v1 + self.v2 def custom_transform(grads_and_vars): # Always set gradients to 1. return [(tf.ones_like(g), v) for g, v in grads_and_vars] x, y = np.ones((10, 1)), np.ones((10, 1)) with distribution.scope(): layer = MyLayer() model = keras.Sequential([layer]) optimizer = gradient_descent_keras.SGD( 1., gradient_transformers=[custom_transform]) model.compile(optimizer, 'mae') model.fit(x, y, batch_size=10, epochs=1) self.assertAllClose(self.evaluate(layer.v1), 0.) self.assertAllClose(self.evaluate(layer.v2), -2.) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( all_strategy_combinations_minus_default())) def test_distribution_strategy_one_dimensional(self, distribution): with distribution.scope(): inp = keras.layers.Input(shape=(10,)) out = keras.layers.Dense(3, activation='softmax')(inp) model = keras.Model(inputs=[inp], outputs=[out]) model.compile( optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) x = np.random.random((64, 10)).astype('float32') y = np.random.randint(3, size=64) model.fit(x, y, epochs=1, steps_per_epoch=2) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus ], mode=['graph', 'eager'], reduction=[ losses_utils.ReductionV2.AUTO, losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, losses_utils.ReductionV2.SUM ])) def test_distribution_strategy_with_loss_reduction_types( self, distribution, reduction): np.random.seed(_RANDOM_SEED) def _get_model(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) return model x = np.random.random((64, 10)) y = np.random.random((64, 1)) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.batch(32) model = _get_model() model.compile( 'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction)) history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False) with distribution.scope(): ds_model = _get_model() ds_model.compile( 'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction)) ds_history = ds_model.fit( dataset, steps_per_epoch=2, epochs=1, shuffle=False) self.assertArrayNear(history.history['loss'], ds_history.history['loss'], 1e-5) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( all_strategy_combinations_minus_default())) def test_distribution_strategy_with_symbolic_add_loss( self, mode, distribution): def _make_model_with_add_loss(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) model.add_loss(tf.reduce_mean(x1)) model.add_loss(tf.reduce_mean(outputs)) return model x = np.ones((64, 10)).astype('float32') model = _make_model_with_add_loss() model.compile('sgd') history = model.fit(x, epochs=1) with distribution.scope(): ds_model = _make_model_with_add_loss() ds_model.compile( 'sgd') ds_history = ds_model.fit(x, epochs=1) self.assertAllClose(history.history, ds_history.history) # TODO(omalleyt): Investigate flakiness and re-enable. @tf.__internal__.distribute.combinations.generate(all_strategy_minus_default_and_tpu_combinations()) def DISABLED_test_distribution_strategy_with_callable_add_loss( self, distribution): def _make_model(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1) d = keras.layers.Dense(1, kernel_initializer='zeros') outputs = d(x2) model = keras.Model(inputs, outputs) model.add_loss(lambda: 100. * tf.reduce_mean(d.kernel)) return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model() self.assertLen(model.losses, 1) model.compile('sgd', 'mse') history = model.fit(x, y, steps_per_epoch=2, epochs=1) with distribution.scope(): ds_model = _make_model() self.assertLen(ds_model.losses, 1) ds_model.compile('sgd', 'mse') ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1) self.assertAllClose(history.history, ds_history.history) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( all_strategy_minus_default_and_tpu_combinations())) def test_distribution_strategy_with_add_metric_in_call( self, distribution): class Bias(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_weight(name='bias', initializer='zeros', shape=()) def call(self, inputs): self.add_metric( tf.reduce_mean(inputs), name='bias', aggregation='mean') return inputs + self.bias def _make_model_with_add_metric(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = Bias()(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model_with_add_metric() self.assertLen(model.metrics, 1) model.compile('sgd', 'mse') history = model.fit( x, y, validation_data=(x, y), validation_steps=2, epochs=2) with distribution.scope(): ds_model = _make_model_with_add_metric() self.assertLen(ds_model.metrics, 1) ds_model.compile( 'sgd', 'mse') ds_history = ds_model.fit( x, y, validation_data=(x, y), validation_steps=2, epochs=2) # includes stateful loss metric in eager. metrics_len = 2 if tf.executing_eagerly() else 1 self.assertLen(ds_model.metrics, metrics_len) self.assertAllClose(history.history, ds_history.history) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.one_device_strategy, tf.__internal__.distribute.combinations.one_device_strategy_gpu, tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus ], mode=['eager'])) def test_distribution_strategy_with_add_metric_object( self, distribution): class Bias(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_weight(name='bias', initializer='zeros', shape=()) self.mean = keras.metrics.Mean(name='mean') def call(self, inputs): self.add_metric(self.mean(inputs)) return inputs + self.bias def _make_model_with_add_metric_object(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = Bias()(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model_with_add_metric_object() self.assertLen(model.metrics, 1) model.compile('sgd', 'mse') history = model.fit( x, y, validation_data=(x, y), validation_steps=2, epochs=2) with distribution.scope(): ds_model = _make_model_with_add_metric_object() self.assertLen(ds_model.metrics, 1) ds_model.compile( 'sgd', 'mse') ds_history = ds_model.fit( x, y, validation_data=(x, y), validation_steps=2, epochs=2) # includes stateful loss metric in eager. metrics_len = 2 if tf.executing_eagerly() else 1 self.assertLen(ds_model.metrics, metrics_len) self.assertAllClose(history.history, ds_history.history) @tf.__internal__.distribute.combinations.generate( # TODO(phillypham): Why does validation_steps > 1 not work on TPUs? tf.__internal__.test.combinations.times( all_strategy_minus_default_and_tpu_combinations())) def test_distribution_strategy_with_add_metric_outside_call( self, distribution): def _make_model_with_add_metric(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x1) model = keras.Model(inputs, outputs) model.add_metric( tf.reduce_mean(x1), name='mid_mean', aggregation='mean') return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model_with_add_metric() self.assertLen(model.metrics, 1) model.compile('sgd', 'mse') history = model.fit( x, y, validation_data=(x, y), validation_steps=2, epochs=2) with distribution.scope(): ds_model = _make_model_with_add_metric() self.assertLen(ds_model.metrics, 1) ds_model.compile( 'sgd', 'mse') ds_history = ds_model.fit( x, y, validation_data=(x, y), validation_steps=2, epochs=2) # includes stateful loss metric in eager. metrics_len = 2 if tf.executing_eagerly() else 1 self.assertLen(ds_model.metrics, metrics_len) self.assertAllClose(history.history, ds_history.history) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategies_minus_tpu + multi_worker_mirrored_strategies, mode=['eager'])) def test_sparse_tensor_outputs(self, distribution): class ToSparse(keras.layers.Layer): """Create a sparse tensor based on a given dense tensor.""" def call(self, inputs): indices = tf.where(tf.not_equal(inputs, 0)) values = tf.gather_nd(inputs, indices) shape = tf.shape(inputs, out_type='int64') return tf.SparseTensor(indices, values, dense_shape=shape) model = keras.Sequential([ToSparse()]) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data, batch_size=2) expected_indices = np.array([[0, 0], [1, 0], [1, 1]]) expected_values = np.array([1, 2, 3]) expected_dense_shape = np.array([2, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategies_minus_tpu + multi_worker_mirrored_strategies, mode=['eager'])) def test_ragged_tensor_outputs(self, distribution): class ToRagged(keras.layers.Layer): """Create a ragged tensor based on a given dense tensor.""" def __init__(self, padding, ragged_rank=1, **kwargs): super(ToRagged, self).__init__(**kwargs) self._padding = padding self._ragged_rank = ragged_rank def call(self, inputs): return tf.RaggedTensor.from_tensor( inputs, padding=self._padding, ragged_rank=self._ragged_rank) model = keras.Sequential([ToRagged(padding=0)]) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data, batch_size=2) expected_values = [[1], [2, 3]] self.assertAllEqual(expected_values, output) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategies_minus_default_minus_tpu + tpu_strategies + multi_worker_mirrored_strategies, mode=['eager'])) def test_correctness_of_add_loss_with_merge_call(self, distribution): batch_size = 32 def _get_model(): inputs = keras.layers.Input(shape=(1,)) labels = keras.layers.Input(shape=(1,)) x = keras.layers.Dense(10, activation='relu')(inputs) y = keras.layers.Dense(1)(x) model = keras.models.Model([inputs, labels], y) model.add_loss(keras.losses.mean_squared_error(labels, y)) return model def _get_data(): x_train = np.random.rand(64, 1) y_train = 3 * x_train x_train = x_train.astype('float32') y_train = y_train.astype('float32') dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) dataset = dataset.batch(batch_size) return dataset with distribution.scope(): model = _get_model() optimizer = gradient_descent_keras.SGD(0.2) @tf.function def train_step(dist_inputs): def step_fn(inputs): with tf.GradientTape() as tape: logits = model(inputs) # Invoke a merge_call() tf.distribute.get_replica_context().merge_call( lambda d: None) # Verify that there is only one loss on the model. assert len(model.losses) == 1 loss_from_model = tf.reduce_sum( model.losses) * 1.0 / batch_size # Compute loss in this loop. loss = keras.losses.mean_squared_error(inputs[1], logits) loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size) # Verify that the loss computed in this loop is equivalent to the # loss from the model that was added via add_loss. tf.compat.v1.assert_equal(loss, loss_from_model) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) return loss per_replica_losses = distribution.run(step_fn, args=(dist_inputs,)) return distribution.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) dataset = distribution.experimental_distribute_dataset(_get_data()) for _ in range(2): for x in dataset: train_step(x) @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['graph', 'eager'])) def test_unimplemented_parameter_server_strategy(self): cluster_spec = multi_worker_testing_utils.create_in_process_cluster( num_workers=3, num_ps=2) cluster_resolver = SimpleClusterResolver( cluster_spec=tf.train.ClusterSpec(cluster_spec), task_type='worker', task_id=1, num_accelerators={'GPU': 0}) distribution = tf.compat.v1.distribute.experimental.ParameterServerStrategy( cluster_resolver) self.assertIsInstance(distribution, tf.compat.v1.distribute.experimental.ParameterServerStrategy) with self.assertRaisesRegex(NotImplementedError, 'ParameterServerStrategy*'): with distribution.scope(): model = simple_sequential_model() optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss) # Models to exercise inserting ancillary layers with add_loss and add_metric. def _functional_with_add_loss_and_metric(input_shape, num_classes, l1, l2): inputs = keras.Input(input_shape, name='images') x = keras.layers.Conv2D(32, kernel_size=5, activation='relu')(inputs) x = keras.layers.MaxPooling2D(pool_size=2)(x) x = keras.layers.Conv2D(64, kernel_size=5, activation='relu')(x) x = keras.layers.MaxPooling2D(pool_size=2)(x) # Apply L2 regularization to embedding. Use a mix of TensorFlow ops and layers # to exercise all code paths. x = keras.layers.Flatten(name='embedding')(x) l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(x), -1)) # Apply L1 regularization to next layer. x = keras.layers.Dense(1024, activation='relu', name='sparse_embedding')(x) l1_loss = keras.layers.Lambda( lambda x: tf.reduce_mean(tf.reduce_sum(x, -1)), name='l1_loss')( x) outputs = keras.layers.Dense(num_classes, name='logits')(x) model = keras.Model(inputs=inputs, outputs=outputs) # Weight regularization terms. model.add_loss(keras.layers.Lambda(lambda x: x * l2)(l2_loss)) model.add_metric(l2_loss, aggregation='mean', name='l2_loss') model.add_loss(l1_loss * l1) model.add_metric(l1_loss, aggregation='mean', name='l1_loss') return model def _sequential_with_add_loss_and_metric(input_shape, num_classes, l1, l2): model = keras.Sequential([ keras.layers.Conv2D( 32, kernel_size=5, activation='relu', input_shape=input_shape), keras.layers.MaxPooling2D(pool_size=2), keras.layers.Conv2D(64, kernel_size=5, activation='relu'), keras.layers.MaxPooling2D(pool_size=2), keras.layers.Flatten(name='embedding'), keras.layers.Dense(1024, activation='relu', name='sparse_embedding'), keras.layers.Dense(num_classes, name='logits'), ]) # Extract layer outputs, add regularization terms, and rescale the metric. # Use a mix of TensorFlow ops and layers to exercise all code paths. x = model.get_layer('sparse_embedding').get_output_at(-1) l1_loss = l1 * tf.reduce_mean(tf.reduce_sum(x, -1)) model.add_loss(l1_loss) model.add_metric( keras.layers.Lambda(lambda x: tf.divide(x, l1))(l1_loss), aggregation='mean', name='l1_loss') x = model.get_layer('embedding').get_output_at(-1) l2_loss = keras.layers.Lambda( lambda x: l2 * tf.reduce_mean(tf.reduce_sum(x * x, -1)), name='l2_loss')( x) model.add_loss(l2_loss) model.add_metric(l2_loss / l2, aggregation='mean', name='l2_loss') return model def _functional_with_layer_reuse(input_shape, num_classes, l1, l2): base_model = keras.Sequential([ keras.layers.Conv2D( 32, kernel_size=5, activation='relu', input_shape=input_shape), keras.layers.MaxPooling2D(pool_size=2), keras.layers.Conv2D(64, kernel_size=5, activation='relu'), keras.layers.MaxPooling2D(pool_size=2), keras.layers.Flatten(), keras.layers.Dense(1024, activation='relu'), keras.layers.Dense(num_classes, name='logits'), ]) inputs = keras.Input(input_shape, name='images') logits = base_model(inputs) model = keras.Model(inputs=inputs, outputs=logits) # Reuse sequential layer and create new nodes. zero_logits = base_model(tf.zeros_like(inputs)) one_logits = base_model(tf.ones_like(inputs)) # L2 loss. l2_loss = tf.reduce_mean( tf.reduce_sum(tf.square(logits - zero_logits), -1)) model.add_loss(l2_loss * l2) model.add_metric(l2_loss, aggregation='mean', name='l2_loss') # L1 loss. l1_loss = tf.reduce_mean( tf.reduce_sum(tf.abs(logits - one_logits), -1)) model.add_loss(l1_loss * l1) model.add_metric(l1_loss, aggregation='mean', name='l1_loss') return model class TestDistributionStrategyWithMultipleAddLossAndMetricCalls( tf.test.TestCase, parameterized.TestCase): """Tests complex models with multiple add loss and metric calls.""" @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( all_strategy_combinations_minus_default(), tf.__internal__.test.combinations.combine( model_fn=[ _functional_with_add_loss_and_metric, _sequential_with_add_loss_and_metric, _functional_with_layer_reuse, ], l1=[0.01], l2=[0.1]))) def test_fit_and_evaluate(self, distribution, model_fn, l1, l2): # Make fake MNIST-like image data. np.random.seed(_RANDOM_SEED) dataset = tf.data.Dataset.from_tensor_slices( (np.random.uniform(size=(64, 28, 28, 1)).astype(np.float32), np.random.randint(0, 10, size=(64,)))) dataset = dataset.shuffle(64).batch( 8 * distribution.num_replicas_in_sync, drop_remainder=True) # Make model with distribution strategy and initialize with dataset shape. input_shape = tf.data.experimental.get_structure(dataset)[0].shape[1:] with distribution.scope(): model = model_fn(input_shape, 10, l1, l2) model.compile( optimizer=keras.optimizers.adam_v2.Adam(1e-4), loss=keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE), metrics=[ keras.metrics.SparseCategoricalAccuracy(), keras.metrics.SparseCategoricalCrossentropy(from_logits=True), ]) # Non-eager training doesn't support steps_per_epoch=None. for unused_epoch in range(2): model.fit(dataset) results = dict(zip(model.metrics_names, model.evaluate(dataset))) # Sanity checks. self.assertBetween(results['sparse_categorical_accuracy'], 0.02, 1.) self.assertGreater(results['l2_loss'], 0.) self.assertGreater(results['l1_loss'], 0.) # Assert correctness of the loss calculation and updating of metrics. self.assertNear( results['l1_loss'] * l1 + results['l2_loss'] * l2 + results['sparse_categorical_crossentropy'], results['loss'], 1e-6) class DeterministicModel(keras.Model): """Deterministic Model that always outputs the same initial result. It verifies the `call` method is run inside the same distribution strategy that the model was initially passed. """ def __init__(self, strategy): super(DeterministicModel, self).__init__() self.x = None self.strategy = strategy def build(self, input_shape): self.x = tf.Variable(tf.ones(shape=())) def call(self, inputs, training=None, mask=None): active_strategy = tf.distribute.get_strategy() if active_strategy is not self.strategy: raise ValueError('Model must execute call w/ the original strategy') return self.x * inputs class TestModelCapturesStrategy(tf.test.TestCase, parameterized.TestCase): """Tests that model creation captures the strategy.""" @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager'])) def test_fit_and_evaluate(self, distribution): dataset = tf.data.Dataset.from_tensor_slices( (tf.ones(shape=(64,)), tf.ones(shape=(64,)))) dataset = dataset.batch(8 * distribution.num_replicas_in_sync) # Make model with distribution strategy with distribution.scope(): model = DeterministicModel(distribution) optimizer = keras.optimizers.adam_v2.Adam(1e-4) # Compile & evaluate the model outside of the distribution strategy scope model.compile( optimizer=optimizer, loss=keras.losses.MeanSquaredError(), metrics=['binary_accuracy']) # Call `optimizer.iterations` out of strategy scope. self.assertEqual(model.optimizer.iterations.numpy(), 0) # Non-eager training doesn't support steps_per_epoch=None. for unused_epoch in range(2): model.fit(dataset) results = model.evaluate(dataset) results = dict(zip(model.metrics_names, results)) # Check that the metrics have a result we expect self.assertEqual(results['binary_accuracy'], 1.0) self.assertAllClose(results['loss'], 0.0) # Assert that all metric/optimizer/model variables were made in the # distribution strategy (Test that compile uses the captured # distribution strategy) metric_vars = tf.nest.flatten( [metric.variables for metric in model.metrics]) for var in metric_vars: self.assertTrue(distribution.extended.variable_created_in_scope(var)) for var in model.optimizer._weights: self.assertTrue(distribution.extended.variable_created_in_scope(var)) for var in model.variables: self.assertTrue(distribution.extended.variable_created_in_scope(var)) # Make sure the metric must be created in the same scope as the model: # This shouldn't raise any validation errors with distribution.scope(): metric = keras.metrics.BinaryAccuracy() model.compile( optimizer=optimizer, loss=keras.losses.MeanSquaredError(), metrics=[metric]) # This should raise an error because the metric is constructed # outside of the scope, and not by compile if tf.distribute.has_strategy(): with self.assertRaisesRegex(ValueError, 'All metrics must be created in'): model.compile( optimizer=keras.optimizers.adam_v2.Adam(1e-4), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.BinaryAccuracy()]) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, mode=['eager'])) def test_optimizer(self, distribution): temp_dir = os.path.join(self.get_temp_dir(), 'ckpt') def create_model(): model = keras.models.Sequential([ keras.layers.Dense(1), ]) model.compile(optimizer='adam', loss='mse') model.build([None, 1]) # create weights. self.assertEmpty(model.optimizer.weights) return model model = create_model() x = y = tf.ones(shape=(1, 1)) model.fit(x=x, y=y, batch_size=1) model.save_weights(temp_dir) with distribution.scope(): model = create_model() model.load_weights(temp_dir) self.assertNotEmpty(model.optimizer.weights) self.assertTrue( distributed_training_utils.is_distributed_variable( model.optimizer.weights[0])) with distribution.scope(): model = create_model() # create/restore slot variables outside of scope is fine. model.load_weights(temp_dir) self.assertNotEmpty(model.optimizer.weights) self.assertTrue( distributed_training_utils.is_distributed_variable( model.optimizer.weights[0])) if __name__ == '__main__': base_layer_utils.enable_v2_dtype_behavior() tf.__internal__.distribute.multi_process_runner.test_main()
105,909
38.459762
119
py
keras
keras-master/keras/distribute/tpu_strategy_test_utils.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions for tests using TPUStrategy.""" import tensorflow.compat.v2 as tf from absl import flags FLAGS = flags.FLAGS flags.DEFINE_string("tpu", "", "Name of TPU to connect to.") flags.DEFINE_string("project", None, "Name of GCP project with TPU.") flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.") def get_tpu_cluster_resolver(): resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project, ) return resolver def get_tpu_strategy(): resolver = get_tpu_cluster_resolver() tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) return tf.distribute.experimental.TPUStrategy(resolver)
1,442
34.195122
80
py
keras
keras-master/keras/distribute/keras_dnn_correctness_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras DNN model using DistributionStrategy.""" import tensorflow.compat.v2 as tf import numpy as np import keras from keras import backend from keras import testing_utils from keras.distribute import keras_correctness_test_base from keras.distribute import strategy_combinations from keras.optimizer_v2 import gradient_descent as gradient_descent_keras def all_strategy_combinations_with_eager_and_graph_modes(): return (tf.__internal__.test.combinations.combine( distribution=strategy_combinations.all_strategies, mode=['graph', 'eager']) + tf.__internal__.test.combinations.combine( distribution=strategy_combinations.multi_worker_mirrored_strategies, mode='eager')) def all_strategy_combinations_with_graph_mode(): return (tf.__internal__.test.combinations.combine( distribution=keras_correctness_test_base.all_strategies, mode=['graph'])) def is_default_strategy(strategy): with strategy.scope(): return not tf.distribute.has_strategy() @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class TestDistributionStrategyDnnCorrectness( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, initial_weights=None, distribution=None, input_shapes=None): with keras_correctness_test_base.MaybeDistributionScope(distribution): # We add few non-linear layers to make it non-trivial. model = keras.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,))) model.add( keras.layers.Dense( 10, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-4))) model.add(keras.layers.Dense(10, activation='relu')) model.add(keras.layers.Dense(1)) if initial_weights: model.set_weights(initial_weights) model.compile( loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.05), metrics=['mse']) return model def get_data(self): x_train = np.random.rand(9984, 1).astype('float32') y_train = 3 * x_train x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32) return x_train, y_train, x_predict def get_data_with_partial_last_batch(self): x_train = np.random.rand(10000, 1).astype('float32') y_train = 3 * x_train x_eval = np.random.rand(10000, 1).astype('float32') y_eval = 3 * x_eval x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32) return x_train, y_train, x_eval, y_eval, x_predict def get_data_with_partial_last_batch_eval(self): x_train = np.random.rand(9984, 1).astype('float32') y_train = 3 * x_train x_eval = np.random.rand(10000, 1).astype('float32') y_eval = 3 * x_eval x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32) return x_train, y_train, x_eval, y_eval, x_predict @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_dnn_correctness(self, distribution, use_numpy, use_validation_data): self.run_correctness_test(distribution, use_numpy, use_validation_data) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base .test_combinations_with_tpu_strategies_graph() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_dnn_correctness_with_partial_last_batch_eval(self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch='eval') @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base .strategy_minus_tpu_and_input_config_combinations_eager() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_dnn_correctness_with_partial_last_batch(self, distribution, use_numpy, use_validation_data): distribution.extended.experimental_enable_get_next_as_optional = True self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch='train_and_eval', training_epochs=1) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations_with_graph_mode()) def test_dnn_with_dynamic_learning_rate(self, distribution): self.run_dynamic_lr_test(distribution) class TestDistributionStrategyDnnMetricCorrectness( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add( keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones')) model.compile( loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.05), metrics=[keras.metrics.BinaryAccuracy()]) return model def run_metric_correctness_test(self, distribution): with self.cached_session(): self.set_up_test_config() x_train, y_train, _ = self.get_data() model = self.get_model( distribution=distribution) batch_size = 64 batch_size = ( keras_correctness_test_base.get_batch_size(batch_size, distribution)) train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = ( keras_correctness_test_base.batch_wrapper(train_dataset, batch_size)) history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10) self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0]) @tf.__internal__.distribute.combinations.generate( all_strategy_combinations_with_eager_and_graph_modes()) def test_simple_dnn_metric_correctness(self, distribution): self.run_metric_correctness_test(distribution) class TestDistributionStrategyDnnMetricEvalCorrectness( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add( keras.layers.Dense( 3, activation='relu', input_dim=4, kernel_initializer='ones')) model.add( keras.layers.Dense( 1, activation='sigmoid', kernel_initializer='ones')) model.compile( loss='mae', metrics=['accuracy', keras.metrics.BinaryAccuracy()], optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001)) return model def run_eval_metrics_correctness_test(self, distribution): with self.cached_session(): self.set_up_test_config() model = self.get_model( distribution=distribution) # verify correctness of stateful and stateless metrics. x = np.ones((100, 4)).astype('float32') y = np.ones((100, 1)).astype('float32') dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat() dataset = keras_correctness_test_base.batch_wrapper(dataset, 4) outs = model.evaluate(dataset, steps=10) self.assertEqual(outs[1], 1.) self.assertEqual(outs[2], 1.) y = np.zeros((100, 1)).astype('float32') dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat() dataset = keras_correctness_test_base.batch_wrapper(dataset, 4) outs = model.evaluate(dataset, steps=10) self.assertEqual(outs[1], 0.) self.assertEqual(outs[2], 0.) @tf.__internal__.distribute.combinations.generate( all_strategy_combinations_with_eager_and_graph_modes()) def test_identity_model_metric_eval_correctness(self, distribution): self.run_eval_metrics_correctness_test(distribution) class SubclassedModel(keras.Model): def __init__(self, initial_weights, input_shapes): super(SubclassedModel, self).__init__() self.dense1 = keras.layers.Dense(10, activation='relu', input_shape=(1,)) self.dense2 = keras.layers.Dense( 10, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-4)) self.dense3 = keras.layers.Dense(10, activation='relu') self.dense4 = keras.layers.Dense(1) if input_shapes: self.build(input_shapes) else: # This covers cases when the input is DatasetV1Adapter. self.build((None, 1)) if initial_weights: self.set_weights(initial_weights) def call(self, inputs): x = self.dense1(inputs) x = self.dense2(x) x = self.dense3(x) return self.dense4(x) @testing_utils.run_all_without_tensor_float_32( 'Uses Dense layers, which call matmul') class TestDistributionStrategyDnnCorrectnessWithSubclassedModel( TestDistributionStrategyDnnCorrectness): def get_model(self, initial_weights=None, distribution=None, input_shapes=None): with keras_correctness_test_base.MaybeDistributionScope(distribution): model = SubclassedModel(initial_weights, input_shapes) model.compile( loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.05), metrics=['mse']) return model @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations() + keras_correctness_test_base.multi_worker_mirrored_eager()) def test_dnn_correctness(self, distribution, use_numpy, use_validation_data): if (tf.executing_eagerly()) or is_default_strategy(distribution): self.run_correctness_test(distribution, use_numpy, use_validation_data) elif (backend.is_tpu_strategy(distribution) and not tf.executing_eagerly()): with self.assertRaisesRegex( ValueError, 'Expected `model` argument to be a functional `Model` instance, ' 'but got a subclass model instead.'): self.run_correctness_test(distribution, use_numpy, use_validation_data) else: with self.assertRaisesRegex( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): self.run_correctness_test(distribution, use_numpy, use_validation_data) @tf.__internal__.distribute.combinations.generate(all_strategy_combinations_with_graph_mode()) def test_dnn_with_dynamic_learning_rate(self, distribution): if ((tf.executing_eagerly() and not backend.is_tpu_strategy(distribution)) or is_default_strategy(distribution)): self.run_dynamic_lr_test(distribution) elif backend.is_tpu_strategy(distribution): with self.assertRaisesRegex( ValueError, 'Expected `model` argument to be a functional `Model` instance, ' 'but got a subclass model instead.'): self.run_dynamic_lr_test(distribution) else: with self.assertRaisesRegex( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): self.run_dynamic_lr_test(distribution) @tf.__internal__.distribute.combinations.generate( keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()) def test_dnn_correctness_with_partial_last_batch_eval(self, distribution, use_numpy, use_validation_data): with self.assertRaisesRegex( ValueError, 'Expected `model` argument to be a functional `Model` instance, ' 'but got a subclass model instead.'): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch='eval') if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main()
13,097
39.177914
96
py
keras
keras-master/keras/distribute/keras_optimizer_v2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests that show that DistributionStrategy works with optimizer v2.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import numpy as np import keras from keras.optimizer_v2 import adam from keras.optimizer_v2 import gradient_descent def get_model(): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) return model class MirroredStrategyOptimizerV2Test(tf.test.TestCase, parameterized.TestCase): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, ], mode=['graph', 'eager'])) def testKerasOptimizerWithUnequalInput(self, distribution): with distribution.scope(): var = tf.Variable( 2.0, name='var', aggregation=tf.VariableAggregation.SUM) optimizer = adam.Adam(learning_rate=0.01, beta_1=0.2, beta_2=0.2) all_vars = [] def model_fn(): def loss_fn(): replica_id = _replica_id() return tf.cast(replica_id + 1, dtype=tf.float32) * 0.5 * var train_op = optimizer.minimize(loss_fn, var_list=[var]) return train_op, optimizer def train_fn(): train_op, optimizer = distribution.extended.call_for_each_replica( model_fn) if not all_vars: all_vars.append(var) all_vars.append(optimizer.get_slot(var, 'm')) all_vars.append(optimizer.get_slot(var, 'v')) return distribution.group(train_op) if not tf.executing_eagerly(): with self.cached_session() as sess: train_fn = sess.make_callable(train_fn()) self.evaluate(tf.compat.v1.global_variables_initializer()) # first step. train_fn() # var(1) = var(0) - lr * m(1) * sqrt(1 - beta2) / sqrt(v(1)) / (1 - beta1) # = 2.0 - 0.01 * 1.2 * sqrt(0.8) / sqrt(1.8) / 0.8 self.assertAllClose(1.99, self.evaluate(all_vars[0])) # m(1) = beta1 * m(0) + (1-beta1) * grad = 0.2 * 0 + 0.8 * (1 + 2) / 2 self.assertAllClose(1.2, self.evaluate(all_vars[1])) # v(1) = beta2 * v(0) + (1-beta2) * grad^2 = 0.2 * 0 + 0.8 * 2.25 self.assertAllClose(1.8, self.evaluate(all_vars[2])) # second step. train_fn() # var(1) = var(0) - lr * 2 = 1.98 self.assertAllClose(1.98, self.evaluate(all_vars[0])) # m(2) = beta1 * m(1) + (1-beta1) * grad = 0.2 * 1.2 + 0.8 * 1.5 self.assertAllClose(1.44, self.evaluate(all_vars[1])) # v(2) = beta2 * v(1) + (1-beta2) * grad^2 = 0.2 * 1.8 + 0.8 * 2.25 self.assertAllClose(2.16, self.evaluate(all_vars[2])) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, ], mode=['graph', 'eager'])) def testOptimizerWithKerasModelAndNumpyArrays(self, distribution): with self.cached_session(): with distribution.scope(): model = get_model() optimizer = gradient_descent.SGD(0.001) loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics) inputs = np.zeros((64, 3), dtype=np.float32) targets = np.zeros((64, 4), dtype=np.float32) model.fit( inputs, targets, epochs=1, batch_size=2, verbose=0, validation_data=(inputs, targets)) model.evaluate(inputs, targets) model.predict(inputs) def _replica_id(): replica_id = tf.distribute.get_replica_context().replica_id_in_sync_group if not isinstance(replica_id, tf.Tensor): replica_id = tf.constant(replica_id) return replica_id if __name__ == '__main__': tf.test.main()
4,647
33.947368
93
py