repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
keras | keras-master/keras/distribute/checkpointing_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras.optimizer_v2 import adam
class TrainingCheckpointTests(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
],
mode=["eager"]))
def testCheckpointRestoreOptimizerSlots(self, distribution):
def state():
with distribution.scope():
v = tf.Variable(tf.random.normal([]))
opt = adam.Adam(0.001)
@tf.function
def step():
def f():
with tf.GradientTape() as tape:
loss = v + v
gradients = tape.gradient(loss, [v])
opt.apply_gradients(zip(gradients, [v]))
distribution.run(f)
return v, opt, step
def checkpoint():
v, opt, step = state()
step()
# Save random weights into checkpoint.
checkpoint = tf.train.Checkpoint(v=v, opt=opt)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.test_session():
save_path = checkpoint.save(prefix)
return save_path
save_path = checkpoint()
v, opt, step = state()
checkpoint = tf.train.Checkpoint(v=v, opt=opt)
# Restore from the checkpoint inside a distribution.scope().
with self.test_session():
with distribution.scope():
checkpoint.restore(save_path)
step()
slot = opt.get_slot(v, "m")
self.assertEqual(v._distribute_strategy, slot._distribute_strategy)
v, opt, step = state()
checkpoint = tf.train.Checkpoint(v=v, opt=opt)
# Restore from the checkpoint outside a distribution.scope().
with self.test_session():
with self.assertRaisesRegex(
ValueError, "optimizer slot variable under the scope"):
checkpoint.restore(save_path)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.cloud_tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
],
mode=["eager"]))
def testCheckpointSaveRestoreIoDevice(self, distribution):
def state():
with distribution.scope():
v = tf.Variable(tf.random.normal([]))
return v
ckpt_options = tf.train.CheckpointOptions(
experimental_io_device="/job:localhost")
def checkpoint():
v = state()
# Save random weights into checkpoint.
checkpoint = tf.train.Checkpoint(v=v)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.test_session():
save_path = checkpoint.save(prefix, options=ckpt_options)
return save_path
save_path = checkpoint()
v = state()
checkpoint = tf.train.Checkpoint(v=v)
# Restore from the checkpoint inside a distribution.scope().
# Check that restore works without error.
with self.test_session():
with distribution.scope():
checkpoint.restore(save_path, options=ckpt_options)
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| 4,607 | 34.72093 | 93 | py |
keras | keras-master/keras/distribute/sidecar_evaluator.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module for evaluation loop."""
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export # pylint: disable=g-direct-tensorflow-import
_PRINT_EVAL_STEP_EVERY_SEC = 60.0
_ITERATIONS_UNINITIALIZED = -1
def list_checkpoint_attributes(ckpt_dir_or_file):
"""Lists all the attributes in a checkpoint.
Checkpoint keys are paths in a checkpoint graph, and attribute is the first
element in the path. e.g. with a checkpoint key
"optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE", optimizer is the attribute. The
attribute is also used to save/restore a variable in a checkpoint,
e.g. tf.train.Checkpoint(optimizer=optimizer, model=model).
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
Set of attributes in a checkpoint.
"""
reader = tf.train.load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
return {name.split('/')[0] for name in variable_map.keys()}
@keras_export('keras.experimental.SidecarEvaluator', v1=[])
class SidecarEvaluator:
"""A class designed for a dedicated evaluator task.
`SidecarEvaluator` is expected to be run in a process on a separate machine
from the training cluster. It is meant for the purpose of a dedicated
evaluator, evaluating the metric results of a training cluster which has one
or more workers performing the training, and saving checkpoints.
The `SidecarEvaluator` API is compatible with both Custom Training Loop (CTL),
and Keras `Model.fit` to be used in the training cluster. Using the model
(with compiled metrics) provided at `__init__`, `SidecarEvaluator` repeatedly
performs evaluation "epochs" when it finds a checkpoint that has not yet been
used. Depending on the `steps` argument, an eval epoch is evaluation over all
eval data, or up to certain number of steps (batches). See examples below for
how the training program should save the checkpoints in order to be recognized
by `SidecarEvaluator`.
Since under the hood, `SidecarEvaluator` uses `model.evaluate` for evaluation,
it also supports arbitrary Keras callbacks. That is, if one or more callbacks
are provided, their `on_test_batch_begin` and `on_test_batch_end` methods are
called at the start and end of a batch, and their `on_test_begin` and
`on_test_end` are called at the start and end of an evaluation epoch. Note
that `SidecarEvaluator` may skip some checkpoints because it always picks up
the latest checkpoint available, and during an evaluation epoch, multiple
checkpoints can be produced from the training side.
Example:
```python
model = tf.keras.models.Sequential(...)
model.compile(metrics=tf.keras.metrics.SparseCategoricalAccuracy(
name="eval_metrics"))
data = tf.data.Dataset.from_tensor_slices(...)
tf.keras.experimental.SidecarEvaluator(
model=model,
data=data,
checkpoint_dir='/tmp/checkpoint_dir', # dir for training-saved checkpoint
steps=None, # Eval until dataset is exhausted
max_evaluations=None, # The evaluation needs to be stopped manually
callbacks=[tf.keras.callbacks.TensorBoard(log_dir='/tmp/log_dir')]
).start()
```
`SidecarEvaluator.start` writes a series of summary
files which can be visualized by tensorboard (which provides a webpage link):
```bash
$ tensorboard --logdir=/tmp/log_dir/validation
...
TensorBoard 2.4.0a0 at http://host:port (Press CTRL+C to quit)
```
If the training cluster uses a CTL, the `checkpoint_dir` should contain
checkpoints that track both `model` and `optimizer`, to fulfill
`SidecarEvaluator`'s expectation. This can be done by a
`tf.train.Checkpoint` and a `tf.train.CheckpointManager`:
```python
checkpoint_dir = ... # Same `checkpoint_dir` supplied to `SidecarEvaluator`.
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir=..., max_to_keep=...)
checkpoint_manager.save()
```
If the training cluster uses Keras `Model.fit` API, a
`tf.keras.callbacks.ModelCheckpoint` should be used, with
`save_weights_only=True`, and the `filepath` should have 'ckpt-{epoch}'
appended:
```python
checkpoint_dir = ... # Same `checkpoint_dir` supplied to `SidecarEvaluator`.
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_dir, 'ckpt-{epoch}'),
save_weights_only=True)
model.fit(dataset, epochs, callbacks=[model_checkpoint])
```
"""
def __init__(self,
model,
data,
checkpoint_dir,
steps=None,
max_evaluations=None,
callbacks=None):
"""Initializes an `SidecarEvaluator` object.
Args:
model: Model to use for evaluation. The model object used here should be a
`tf.keras.Model`, and should be the same as the one that is used in
training, where `tf.keras.Model`s are checkpointed. The model should
have one or more metrics compiled before using `SidecarEvaluator`.
data: The input data for evaluation. `SidecarEvaluator` supports all data
types that Keras `model.evaluate` supports as the input data `x`, such
as a `tf.data.Dataset`.
checkpoint_dir: Directory where checkpoint files are saved.
steps: Number of steps to perform evaluation for, when evaluating a single
checkpoint file. If `None`, evaluation continues until the dataset is
exhausted. For repeated evaluation dataset, user must specify `steps` to
avoid infinite evaluation loop.
max_evaluations: Maximum number of the checkpoint file to be evaluated,
for `SidecarEvaluator` to know when to stop. The evaluator will stop
after it evaluates a checkpoint filepath ending with
'<ckpt_name>-<max_evaluations>'. If using
`tf.train.CheckpointManager.save` for saving checkpoints, the kth saved
checkpoint has the filepath suffix '<ckpt_name>-<k>' (k=1 for the first
saved), and if checkpoints are saved every epoch after training, the
filepath saved at the kth epoch would end with '<ckpt_name>-<k>. Thus,
if training runs for n epochs, and the evaluator should end after the
training finishes, use n for this parameter. Note that this is not
necessarily equal to the number of total evaluations, since some
checkpoints may be skipped if evaluation is slower than checkpoint
creation. If `None`, `SidecarEvaluator` will evaluate indefinitely, and
the user must terminate evaluator program themselves.
callbacks: List of `keras.callbacks.Callback` instances to apply during
evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks).
"""
self.model = model
self.data = data
self.checkpoint_dir = checkpoint_dir
self._iterations = tf.Variable(
name='iterations',
initial_value=_ITERATIONS_UNINITIALIZED,
dtype=tf.int64)
self.max_evaluations = max_evaluations
self.steps = steps
self.callbacks = callbacks or []
def start(self):
"""Starts the evaluation loop."""
optimizer_checkpoint = tf.train.Checkpoint(iter=self._iterations)
checkpoint = tf.train.Checkpoint(
model=self.model, optimizer=optimizer_checkpoint)
for latest_checkpoint in tf.train.checkpoints_iterator(self.checkpoint_dir):
try:
# `expect_partial` because the checkpoint can have other `Trackable`s
# such as `optimizer`.
checkpoint.restore(latest_checkpoint).expect_partial()
checkpoint_attributes = list_checkpoint_attributes(latest_checkpoint)
# The checkpoint should contain model and optimizer for SidecarEvaluator
# to work. But the model weights saved by ModelCheckpoint callback does
# not contain model as an attribute. To make SidecarEvaluator compatibly
# work in this case, use model.load_weights to load the model's weights,
# while self._iterations is still restored by checkpoint variable.
if 'model' not in checkpoint_attributes:
self.model.load_weights(latest_checkpoint)
# The model checkpoint might not include optimizer in cases, e.g.
# using a custom training loop. Directly assign the iterations
# property to be used in callbacks.
if self.model.optimizer:
self.model.optimizer.iterations.assign(self._iterations)
except (tf.errors.OpError,) as e:
# A couple errors can happen here with the coordinator racing to write
# checkpoint:
# 1) OpError: open failed for <file path>: No such file or directory
# 2) NotFoundError (subclass of OpError): Unsuccessful
# TensorSliceReader constructor.
# TODO(rchao): Remove this except block once b/150954027 is resolved.
logging.info(
'SidecarEvaluator encountered an error when loading the checkpoint '
f'at {latest_checkpoint}. Retrying. '
f'Error: {e.__class__.__name__}: {e}')
continue
if self._iterations.numpy() == _ITERATIONS_UNINITIALIZED:
raise RuntimeError(
'Variable `iterations` cannot be loaded from the '
f'checkpoint file at {self.checkpoint_dir}. '
'Please ensure `iterations` is '
'included in the checkpoint saved during training.')
logging.info(
'Evaluation starts: Model weights loaded from latest '
f'checkpoint file {latest_checkpoint}')
self.model.evaluate(
self.data, steps=self.steps, callbacks=self.callbacks, verbose=2)
return_metrics = {}
for metric in self.model.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
logging.info(
'End of evaluation. Metrics: %s', ' '.join([
'{}={}'.format(name, value.numpy())
for name, value in return_metrics.items()
]))
if (self.max_evaluations and
(self.max_evaluations == int(latest_checkpoint.split('-')[-1]))):
# Exit the loop because we have evaluated the final checkpoint file.
logging.info('Last checkpoint evaluated. SidecarEvaluator stops.')
return
| 11,184 | 44.46748 | 103 | py |
keras | keras-master/keras/distribute/saved_model_test_base.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing saving/loading with DS."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
from keras import backend
from keras.distribute import model_combinations
_RANDOM_SEED = 1337
_DEFAULT_FUNCTION_KEY = 'serving_default'
_TOLERANCE = 1e-30
# TPU uses bfloat16 for computation in hardware underlying, so it has less
# precision than CPU/GPU.
_TPU_TOLERANCE = 1e-7
PREDICT_STEPS = 1
simple_models = [
model_combinations.simple_functional_model,
model_combinations.simple_sequential_model,
model_combinations.simple_subclass_model,
]
strategies = [
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
]
def get_tolerance(save_distribution, restore_distribution):
if backend.is_tpu_strategy(save_distribution) or backend.is_tpu_strategy(
restore_distribution):
return _TPU_TOLERANCE
return _TOLERANCE
def simple_models_with_strategies():
return tf.__internal__.test.combinations.combine(
model_and_input=simple_models,
distribution=strategies,
mode=['eager'])
def simple_models_with_strategy_pairs():
return tf.__internal__.test.combinations.combine(
model_and_input=simple_models,
distribution_for_saving=strategies,
distribution_for_restoring=strategies,
mode=['eager'])
def tfmodule_models_with_strategies():
return tf.__internal__.test.combinations.combine(
model_and_input=[model_combinations.simple_tfmodule_model],
distribution=strategies,
mode=['eager'])
def tfmodule_models_with_strategy_pairs():
return tf.__internal__.test.combinations.combine(
model_and_input=[model_combinations.simple_tfmodule_model],
distribution_for_saving=strategies,
distribution_for_restoring=strategies,
mode=['eager'])
def load_and_run_with_saved_model_api(distribution, saved_dir, predict_dataset,
output_name):
"""Loads a saved_model using tf.saved_model API, and runs it."""
func = tf.saved_model.load(saved_dir)
if distribution:
dist_predict_dataset = distribution.experimental_distribute_dataset(
predict_dataset)
per_replica_predict_data = next(iter(dist_predict_dataset))
result = distribution.run(
func.signatures[_DEFAULT_FUNCTION_KEY],
args=(per_replica_predict_data,))
result = result[output_name]
# Convert the per_replica value to a list, then concatenate them
reduced = distribution.experimental_local_results(result)
concat = tf.concat(reduced, 0)
return concat
else:
result = func.signatures[_DEFAULT_FUNCTION_KEY](next(iter(predict_dataset)))
return result[output_name]
class TestSavedModelBase(tf.test.TestCase, parameterized.TestCase):
"""Base class for testing saving/loading with DS."""
def setUp(self):
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
self._root_dir = 'base'
super(TestSavedModelBase, self).setUp()
def _save_model(self, model, saved_dir):
"""Save the given model to the given saved_dir.
This method needs to be implemented by the subclasses.
Args:
model: a keras model object to save.
saved_dir: a string representing the path to save the keras model
"""
raise NotImplementedError('must be implemented in descendants')
def _load_and_run_model(self,
distribution,
saved_dir,
predict_dataset,
output_name='output_1'):
"""Load the model and run 1 step of predict with it.
This method must be implemented by the subclasses.
Args:
distribution: the distribution strategy used to load the model. None if no
distribution strategy is used
saved_dir: the string representing the path where the model is saved.
predict_dataset: the data used to do the predict on the model for
cross_replica context.
output_name: the string representing the name of the output layer of the
model.
"""
raise NotImplementedError('must be implemented in descendants')
def _train_model(self, model, x_train, y_train, batch_size):
training_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train))
training_dataset = training_dataset.repeat()
training_dataset = training_dataset.batch(batch_size)
# Train the model for 1 epoch
model.fit(x=training_dataset, epochs=1, steps_per_epoch=100)
def _predict_with_model(self, distribution, model, predict_dataset):
return model.predict(predict_dataset, steps=PREDICT_STEPS)
def _get_predict_dataset(self, x_predict, batch_size):
predict_dataset = tf.data.Dataset.from_tensor_slices(x_predict)
predict_dataset = predict_dataset.repeat()
predict_dataset = predict_dataset.batch(batch_size)
return predict_dataset
def run_test_save_no_strategy_restore_strategy(self, model_and_input,
distribution):
"""Save a model without DS, and restore it with DS."""
saved_dir = os.path.join(self.get_temp_dir(), '0')
model = model_and_input.get_model()
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
predict_dataset = self._get_predict_dataset(x_predict, batch_size)
self._train_model(model, x_train, y_train, batch_size)
result_before_save = self._predict_with_model(None, model, predict_dataset)
self._save_model(model, saved_dir)
with distribution.scope():
result_after_save = self._load_and_run_model(
distribution=distribution,
saved_dir=saved_dir,
predict_dataset=predict_dataset)
tolerance = get_tolerance(None, distribution)
self.assertAllClose(result_before_save, result_after_save, atol=tolerance)
def run_test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope):
"""Save a model with DS, and restore it without DS."""
saved_dir = os.path.join(self.get_temp_dir(), '1')
with distribution.scope():
model = model_and_input.get_model()
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
self._train_model(model, x_train, y_train, batch_size)
predict_dataset = self._get_predict_dataset(x_predict, batch_size)
result_before_save = self._predict_with_model(
distribution, model, predict_dataset)
if save_in_scope:
with distribution.scope():
self._save_model(model, saved_dir)
else:
self._save_model(model, saved_dir)
load_result = self._load_and_run_model(
distribution=None,
saved_dir=saved_dir,
predict_dataset=predict_dataset)
tolerance = get_tolerance(distribution, None)
self.assertAllClose(result_before_save, load_result, atol=tolerance)
def run_test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope):
"""Save a model with DS, and restore it with potentially different DS."""
saved_dir = os.path.join(self.get_temp_dir(), '2')
with distribution_for_saving.scope():
model = model_and_input.get_model()
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
self._train_model(model, x_train, y_train, batch_size)
predict_dataset = self._get_predict_dataset(x_predict, batch_size)
result_before_save = self._predict_with_model(
distribution_for_saving, model, predict_dataset)
if save_in_scope:
with distribution_for_saving.scope():
self._save_model(model, saved_dir)
else:
self._save_model(model, saved_dir)
with distribution_for_restoring.scope():
load_result = self._load_and_run_model(
distribution=distribution_for_restoring,
saved_dir=saved_dir,
predict_dataset=predict_dataset)
tolerance = get_tolerance(distribution_for_saving,
distribution_for_restoring)
self.assertAllClose(result_before_save, load_result, atol=tolerance)
def run_test_save_strategy(self, model_and_input,
distribution, save_in_scope):
"""Save a model with DS."""
saved_dir = os.path.join(self.get_temp_dir(), '3')
with distribution.scope():
model = model_and_input.get_model()
x_train, y_train, _ = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
self._train_model(model, x_train, y_train, batch_size)
if save_in_scope:
with distribution.scope():
self._save_model(model, saved_dir)
else:
self._save_model(model, saved_dir)
return saved_dir
| 10,367 | 36.028571 | 83 | py |
keras | keras-master/keras/distribute/minimize_loss_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for running legacy optimizer code with DistributionStrategy."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy
from keras.distribute import optimizer_combinations
from keras.distribute.test_example import batchnorm_example
from keras.distribute.test_example import minimize_loss_example
from keras.layers import core
from keras.optimizer_v2 import optimizer_v2
VAR_MAP_V1 = {
"GradientDescent": ("dense/kernel", "dense/bias"),
"Adagrad": ("dense/kernel/Adagrad", "dense/kernel", "dense/bias/Adagrad",
"dense/bias"),
"Ftrl": ("dense/kernel/Ftrl", "dense/kernel", "dense/bias/Ftrl",
"dense/bias", "dense/kernel/Ftrl_1", "dense/bias/Ftrl_1"),
"RMSProp": ("dense/kernel", "dense/bias/RMSProp", "dense/bias/RMSProp_1",
"dense/bias", "dense/kernel/RMSProp_1", "dense/kernel/RMSProp")
}
VAR_MAP_V2 = {
"SGD": ("dense/bias", "SGD/learning_rate", "SGD/decay", "SGD/iter",
"dense/kernel", "SGD/momentum"),
"Adagrad":
("Adagrad/iter", "dense/bias", "dense/kernel", "Adagrad/learning_rate",
"Adagrad/decay", "Adagrad/dense/kernel/accumulator",
"Adagrad/dense/bias/accumulator")
}
class MinimizeLossStepTest(tf.test.TestCase, parameterized.TestCase):
def _get_iterator(self, strategy, input_fn):
iterator = strategy.make_input_fn_iterator(lambda _: input_fn())
self.evaluate(iterator.initializer)
return iterator
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v1_optimizers(),
tf.__internal__.test.combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ tf.__internal__.test.combinations.combine(mode=["eager"], use_callable_loss=[True])) +
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v2_optimizers(),
tf.__internal__.test.combinations.combine(
mode=["graph", "eager"], use_callable_loss=[True])) +
tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v2,
mode=["graph"],
use_callable_loss=[True]) + tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v1,
mode=["graph"],
use_callable_loss=[True, False]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss):
with distribution.scope():
optimizer = optimizer_fn()
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer, use_bias=True, use_callable_loss=use_callable_loss)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=2).run_op
if not tf.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(tf.compat.v1.global_variables_initializer())
weights, biases = [], []
for _ in range(5):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v1_optimizers(),
tf.__internal__.test.combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ tf.__internal__.test.combinations.combine(mode=["eager"], use_callable_loss=[True])) +
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v2_optimizers(),
tf.__internal__.test.combinations.combine(
mode=["graph", "eager"], use_callable_loss=[True])))
def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn,
use_callable_loss):
with distribution.scope():
optimizer = optimizer_fn()
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer, use_bias=True, use_callable_loss=use_callable_loss)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(iterator.get_next(),)))
if not tf.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(tf.compat.v1.global_variables_initializer())
weights, biases = [], []
for _ in range(10):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v1_and_v2_optimizers(),
tf.__internal__.test.combinations.combine(mode=["graph", "eager"])) + tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v1_and_v2,
mode=["graph"]))
def testOptimizerInsideModelFn(self, distribution, optimizer_fn):
if (not tf.executing_eagerly() and
tf.compat.v1.control_flow_v2_enabled()):
self.skipTest("b/138751864")
created_variables = []
trainable_variables = []
def appending_creator(next_creator, **kwargs):
v = next_creator(**kwargs)
created_variables.append(v.name)
if "trainable" in kwargs and kwargs["trainable"]:
trainable_variables.append(v.name)
return v
# Creator scope needs to be set before it's used inside
# `distribution.scope`.
with tf.variable_creator_scope(
appending_creator), distribution.scope():
optimizer = optimizer_fn()
model_fn, dataset_fn, _ = minimize_loss_example(
optimizer, use_bias=True, use_callable_loss=True)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1).run_op
if not tf.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(tf.compat.v1.global_variables_initializer())
run_step()
def get_expected_variables(num_parameter_devices):
name = optimizer._name
if isinstance(optimizer, optimizer_v2.OptimizerV2):
variables = VAR_MAP_V2[name]
else:
variables = VAR_MAP_V1[name]
extended_variables = [
v + "/replica_{}".format(replica)
for v in variables
for replica in range(1, num_parameter_devices)
]
variables = list(variables) + extended_variables
return set(v + ":0" for v in variables)
self.assertEqual(
get_expected_variables(len(distribution.extended.parameter_devices)),
set(created_variables))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v1_and_v2_optimizers(),
tf.__internal__.test.combinations.combine(
mode=["graph", "eager"],
# TODO(isaprykin): Allow False here. Currently subsequent
# replicas will re-execute UPDATE_OPS of previous replicas.
update_ops_in_cross_replica_mode=[True])) +
tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v1_and_v2,
mode=["graph"],
update_ops_in_cross_replica_mode=[False])))
def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,
renorm, update_ops_in_cross_replica_mode):
"""Verifies that moving mean updates are reduced across replicas."""
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
model_fn, dataset_fn, batchnorm = batchnorm_example(
optimizer_fn,
batch_per_epoch=num_replicas,
momentum=momentum,
renorm=renorm,
update_ops_in_replica_mode=not update_ops_in_cross_replica_mode)
def step_fn(ctx, inputs):
del ctx # Unused
fetches = distribution.experimental_local_results(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
if update_ops_in_cross_replica_mode:
fetches += tuple(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS))
return tf.group(fetches)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1).run_op
if not tf.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(tf.compat.v1.global_variables_initializer())
expected_moving_means = [0.] * 8
def averaged_batch_mean(i):
# Each batch has shape [16, 8] where the ith element in jth list is
# (8 * j + i + replica_id * 100). So the batch mean in each replica is
# (60 + i + replica_id * 100). So here comes its batch mean over all
# replicas:
return 60. + i + (num_replicas - 1.) / 2. * 100.
for _ in range(10):
run_step()
moving_means = self.evaluate(batchnorm.moving_mean)
# We make sure that the moving_mean is updated as if the sample mean is
# calculated over all replicas.
for i, expected_moving_mean in enumerate(expected_moving_means):
expected_moving_means[i] -= ((
expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum))
self.assertNear(expected_moving_means[i], moving_means[i], 0.0001)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(loss_reduction=[
tf.compat.v1.losses.Reduction.SUM, tf.compat.v1.losses.Reduction.MEAN,
tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE,
tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS
]),
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
]),
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(optimizer_fn=optimizer_combinations
.gradient_descent_optimizer_v1_fn),
tf.__internal__.test.combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) +
tf.__internal__.test.combinations.combine(
mode=["eager"], use_callable_loss=[True])) +
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(optimizer_fn=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn),
tf.__internal__.test.combinations.combine(
mode=["graph", "eager"], use_callable_loss=[True]))) +
tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations
.gradient_descent_optimizer_v1_fn,
mode=["graph"],
use_callable_loss=[True, False]) + tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn,
mode=["graph"],
use_callable_loss=[True])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
use_callable_loss):
with distribution.scope():
all_vars = []
def model_fn(inputs):
x, y = inputs
w = tf.compat.v1.get_variable("w", initializer=[[2.]])
all_vars.append(w)
def loss_fn():
# Use fixed initialization to make the steps deterministic.
predict = tf.matmul(x, w)
loss = tf.compat.v1.losses.mean_squared_error(
y, predict, reduction=loss_reduction)
if loss_reduction == tf.compat.v1.losses.Reduction.SUM:
return loss
return loss / distribution.num_replicas_in_sync
optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate
if isinstance(optimizer, optimizer_v2.OptimizerV2):
return optimizer.minimize(loss_fn, [w])
else:
if use_callable_loss:
return optimizer.minimize(loss_fn)
else:
return optimizer.minimize(loss_fn())
def dataset_fn():
features = tf.data.Dataset.from_tensors([[2.], [7.]])
labels = tf.data.Dataset.from_tensors([[6.], [21.]])
return tf.data.Dataset.zip((features, labels)).repeat()
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1).run_op
if not tf.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(tf.compat.v1.global_variables_initializer())
run_step()
v = all_vars[0]
self.assertTrue(all(v is vi for vi in all_vars[1:]))
weight = numpy.squeeze(self.evaluate(v))
# Our model is:
# predict = x * w
# loss = (predict - y)^2
# dloss/dpredict = 2*(predict - y)
# dloss/dw = 2 * x^T @ (predict - y)
# For our batch size of 2, assuming sum loss reduction:
# x = [2, 7]
# y = [6, 21]
# w_initial = 2
# predict = [4, 14]
# predict - y = [-2, -7]
# dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106
# So unreplicated the update to w with lr=0.001 is -0.2 * -106 = 0.106
# with sum loss reduction, or 0.053 with mean.
if loss_reduction == tf.compat.v1.losses.Reduction.SUM:
# Note that the "distribution.num_replicas_in_sync" factor will go away
# once we split the input across replicas, instead of pulling a complete
# batch of input per replica.
self.assertNear(weight, 2 + 0.106 * distribution.num_replicas_in_sync,
0.0001)
else:
# One of the mean loss reductions.
self.assertNear(weight, 2 + 0.053, 0.0001)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
optimizer_combinations.distributions_and_v1_and_v2_optimizers(),
tf.__internal__.test.combinations.combine(mode=["graph", "eager"]),
tf.__internal__.test.combinations.combine(is_tpu=[False])) + tf.__internal__.test.combinations.combine(
distribution=[tf.__internal__.distribute.combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v1_and_v2,
mode=["graph"],
is_tpu=[True]))
def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):
with distribution.scope():
def dataset_fn():
dataset = tf.data.Dataset.from_tensors([[1.]]).repeat()
# TODO(priyag): batch with drop_remainder=True causes shapes to be
# fully defined for TPU. Remove this when XLA supports dynamic shapes.
return dataset.batch(batch_size=1, drop_remainder=True)
optimizer = optimizer_fn()
layer = core.Dense(1, use_bias=True)
key1 = "foo"
value1 = "bar"
def model_fn(output_context, x):
"""A very simple model written by the user."""
def loss_fn():
y = tf.reshape(layer(x), []) - tf.constant(1.)
return y * y
if isinstance(optimizer, optimizer_v2.OptimizerV2):
train_op = optimizer.minimize(
loss_fn, lambda: layer.trainable_variables)
else:
train_op = optimizer.minimize(loss_fn)
loss = loss_fn()
output_context.set_last_step_output(
name="replica_loss_reduced",
output=loss,
reduce_op=tf.distribute.ReduceOp.MEAN)
output_context.set_non_tensor_output(key1, value1)
return (train_op, loss)
def step_fn(output_context, inputs):
(train_op, loss) = distribution.extended.call_for_each_replica(
model_fn, args=(output_context, inputs))
output_context.set_last_step_output(
name="cross_replica_loss_reduced",
output=loss,
reduce_op=tf.distribute.ReduceOp.MEAN)
output_context.set_last_step_output(
name="cross_replica_loss_not_reduced",
output=loss)
return distribution.group(train_op)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
initial_loss = lambda: tf.constant(1e7)
# Initial values corresponding to reduced losses are just single
# tensors. But for non reduced losses, we need to have initial
# values that are of the same structure as non reduced losses. In
# MirroredStrategy, this will be a list of losses, in TPUStrategy
# it will be single tensor. Using `call_for_each_replica` followed
# by `experimental_local_results` gives us the desired initial
# value structure.
not_reduced = distribution.experimental_local_results(
distribution.extended.call_for_each_replica(initial_loss))
initial_loop_values = {
"replica_loss_reduced": initial_loss(),
"cross_replica_loss_reduced": initial_loss(),
"cross_replica_loss_not_reduced": not_reduced,
}
ctx = distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=2,
initial_loop_values=initial_loop_values)
self.assertEqual({key1: (value1,)}, ctx.non_tensor_outputs)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["replica_loss_reduced"],
reduced=True, distribution=distribution)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["cross_replica_loss_reduced"],
reduced=True, distribution=distribution)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["cross_replica_loss_not_reduced"],
reduced=False, distribution=distribution)
return (ctx.run_op, ctx.last_step_outputs["replica_loss_reduced"])
if not tf.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(tf.compat.v1.global_variables_initializer())
weights, biases, losses = [], [], []
for _ in range(5):
_, loss = run_step()
losses.append(loss)
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
loss_is_not_increasing = all(y <= x for x, y in zip(losses, losses[1:]))
self.assertTrue(loss_is_not_increasing)
error = abs(
numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
error_is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(error_is_not_increasing)
def _verify_loss_output(self, initial_loss, loss_output, reduced,
distribution):
if not reduced:
self.assertLen(distribution.experimental_local_results(loss_output),
distribution.num_replicas_in_sync)
loss_tensor = distribution.reduce(tf.distribute.ReduceOp.MEAN, loss_output,
axis=None)
else:
unwrapped_output = distribution.experimental_local_results(loss_output)
self.assertLen(unwrapped_output, 1)
loss_tensor = unwrapped_output[0]
self.assertEqual(initial_loss.dtype, loss_tensor.dtype)
self.assertEqual(initial_loss.shape, loss_tensor.shape)
@tf.__internal__.distribute.combinations.generate(
optimizer_combinations.distributions_and_v2_optimizers())
def test_empty_var_list(self, distribution, optimizer_fn):
opt = optimizer_fn()
with distribution.scope():
def run_fn():
opt.minimize(lambda: tf.constant(1.), [])
opt.apply_gradients([])
distribution.run(run_fn)
if __name__ == "__main__":
tf.test.main()
| 23,539 | 42.836127 | 122 | py |
keras | keras-master/keras/distribute/distributed_file_utils_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributed_file_utils."""
import tensorflow.compat.v2 as tf
import os
from keras.distribute import distributed_file_utils
class DistributedFileUtilsTest(tf.test.TestCase):
class MockedExtended:
pass
class MockedChiefStrategy:
def __init__(self):
self.extended = DistributedFileUtilsTest.MockedExtended()
self.extended._in_multi_worker_mode = lambda: True
self.extended.should_checkpoint = True
class MockedWorkerStrategy:
def __init__(self):
self.extended = DistributedFileUtilsTest.MockedExtended()
self.extended._in_multi_worker_mode = lambda: True
self.extended.should_checkpoint = False
self.extended._task_id = 3
class MockedSingleWorkerStrategy:
def __init__(self):
self.extended = DistributedFileUtilsTest.MockedExtended()
self.extended._in_multi_worker_mode = lambda: False
def _write_dummy_file(self, file_to_write):
with open(file_to_write, 'w') as f:
f.write('foo bar')
def testChiefWriteDirAndFilePath(self):
dirpath = self.get_temp_dir()
filepath = os.path.join(dirpath, 'foo.bar')
strategy = DistributedFileUtilsTest.MockedChiefStrategy()
self.assertEqual(
distributed_file_utils.write_filepath(filepath, strategy), filepath)
self.assertEqual(
distributed_file_utils.write_dirpath(dirpath, strategy), dirpath)
def testWorkerWriteDirAndFilePath(self):
dirpath = self.get_temp_dir()
filepath = os.path.join(dirpath, 'foo.bar')
strategy = DistributedFileUtilsTest.MockedWorkerStrategy()
self.assertEqual(
distributed_file_utils.write_filepath(filepath, strategy),
os.path.join(dirpath, 'workertemp_3', 'foo.bar'))
self.assertEqual(
distributed_file_utils.write_dirpath(dirpath, strategy),
os.path.join(dirpath, 'workertemp_3'))
def testChiefDoesNotRemoveDirAndFilePath(self):
temp_dir = self.get_temp_dir()
strategy = DistributedFileUtilsTest.MockedChiefStrategy()
dir_to_write = distributed_file_utils.write_dirpath(temp_dir, strategy)
file_to_write = os.path.join(dir_to_write, 'tmp')
self.assertFalse(os.path.exists(file_to_write))
self._write_dummy_file(file_to_write)
self.assertTrue(os.path.exists(file_to_write))
distributed_file_utils.remove_temp_dir_with_filepath(
file_to_write, strategy)
self.assertTrue(os.path.exists(file_to_write))
def testWorkerDoesRemoveFilePath(self):
temp_dir = self.get_temp_dir()
strategy = DistributedFileUtilsTest.MockedWorkerStrategy()
dir_to_write = distributed_file_utils.write_dirpath(temp_dir, strategy)
file_to_write = os.path.join(dir_to_write, 'tmp')
self.assertFalse(os.path.exists(file_to_write))
self._write_dummy_file(file_to_write)
self.assertTrue(os.path.exists(file_to_write))
distributed_file_utils.remove_temp_dir_with_filepath(
file_to_write, strategy)
self.assertFalse(os.path.exists(file_to_write))
def testWorkerDoesRemoveDirPath(self):
temp_dir = self.get_temp_dir()
strategy = DistributedFileUtilsTest.MockedWorkerStrategy()
dir_to_write = distributed_file_utils.write_dirpath(temp_dir, strategy)
file_to_write = os.path.join(dir_to_write, 'tmp')
self.assertFalse(os.path.exists(file_to_write))
self._write_dummy_file(file_to_write)
self.assertTrue(os.path.exists(file_to_write))
distributed_file_utils.remove_temp_dirpath(temp_dir, strategy)
self.assertFalse(os.path.exists(file_to_write))
self.assertFalse(os.path.exists(os.path.dirname(file_to_write)))
def testMultipleRemoveOrigDirPathIsFine(self):
temp_dir = self.get_temp_dir()
strategy = DistributedFileUtilsTest.MockedWorkerStrategy()
dir_to_write = distributed_file_utils.write_dirpath(temp_dir, strategy)
file_to_write = os.path.join(dir_to_write, 'tmp')
self._write_dummy_file(file_to_write)
distributed_file_utils.remove_temp_dirpath(temp_dir, strategy)
distributed_file_utils.remove_temp_dirpath(temp_dir, strategy)
distributed_file_utils.remove_temp_dirpath(temp_dir, strategy)
def testMultipleRemoveDirToWritePathIsFine(self):
temp_dir = self.get_temp_dir()
strategy = DistributedFileUtilsTest.MockedWorkerStrategy()
dir_to_write = distributed_file_utils.write_dirpath(temp_dir, strategy)
file_to_write = os.path.join(dir_to_write, 'tmp')
self._write_dummy_file(file_to_write)
distributed_file_utils.remove_temp_dirpath(dir_to_write, strategy)
distributed_file_utils.remove_temp_dirpath(dir_to_write, strategy)
distributed_file_utils.remove_temp_dirpath(dir_to_write, strategy)
if __name__ == '__main__':
tf.test.main()
| 5,369 | 39.37594 | 80 | py |
keras | keras-master/keras/distribute/distributed_training_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
from absl import flags
from keras import backend
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
# TODO(b/118776054): Currently we support global batch size for TPUStrategy and
# core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
# no longer needed.
def global_batch_size_supported(distribution_strategy):
return distribution_strategy.extended._global_batch_size # pylint: disable=protected-access
def call_replica_local_fn(fn, *args, **kwargs):
"""Call a function that uses replica-local variables.
This function correctly handles calling `fn` in a cross-replica
context.
Args:
fn: The function to call.
*args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`.
Returns:
The result of calling `fn`.
"""
# TODO(b/132666209): Remove this function when we support assign_*
# for replica-local variables.
strategy = None
if 'strategy' in kwargs:
strategy = kwargs.pop('strategy')
else:
if tf.distribute.has_strategy():
strategy = tf.distribute.get_strategy()
# TODO(b/120571621): TPUStrategy does not implement replica-local variables.
is_tpu = backend.is_tpu_strategy(strategy)
if ((not is_tpu) and strategy and tf.distribute.in_cross_replica_context()):
with strategy.scope():
return strategy.extended.call_for_each_replica(fn, args, kwargs)
return fn(*args, **kwargs)
def is_distributed_variable(v):
"""Returns whether `v` is a distributed variable."""
return (isinstance(v, tf.distribute.DistributedValues) and
isinstance(v, tf.Variable))
def get_strategy():
"""Creates a `tf.distribute.Strategy` object from flags.
Example usage:
```python
strategy = utils.get_strategy()
with strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model.compile(...)
train_ds, test_ds = ...
model.fit(train_ds, validation_data=test_ds, epochs=10)
```
Returns:
`tf.distribute.Strategy` instance.
"""
cls = FLAGS.keras_distribute_strategy_class
accepted_strats = {
'tpu', 'multi_worker_mirrored', 'mirrored',
'parameter_server', 'one_device'}
if cls == 'tpu':
tpu_addr = FLAGS.keras_distribute_strategy_tpu_addr
if not tpu_addr:
raise ValueError(
'When using a TPU strategy, you must set the flag '
'`keras_distribute_strategy_tpu_addr` (TPU address).')
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_addr)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
elif cls == 'multi_worker_mirrored':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
elif cls == 'mirrored':
strategy = tf.distribute.MirroredStrategy()
elif cls == 'parameter_server':
cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
elif cls == 'one_device':
strategy = tf.distribute.OneDeviceStrategy('/gpu:0')
else:
raise ValueError(
'Unknown distribution strategy flag. Received: '
f'keras_distribute_strategy_class={cls}. '
f'It should be one of {accepted_strats}')
return strategy
| 4,116 | 33.889831 | 94 | py |
keras | keras-master/keras/distribute/distributed_file_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities that help manage directory path in distributed settings.
In multi-worker training, the need to write a file to distributed file
location often requires only one copy done by one worker despite many workers
that are involved in training. The option to only perform saving by chief is
not feasible for a couple of reasons: 1) Chief and workers may each contain
a client that runs the same piece of code and it's preferred not to make
any distinction between the code run by chief and other workers, and 2)
saving of model or model's related information may require SyncOnRead
variables to be read, which needs the cooperation of all workers to perform
all-reduce.
This set of utility is used so that only one copy is written to the needed
directory, by supplying a temporary write directory path for workers that don't
need to save, and removing the temporary directory once file writing is done.
Example usage:
```
# Before using a directory to write file to.
self.log_write_dir = write_dirpath(self.log_dir, get_distribution_strategy())
# Now `self.log_write_dir` can be safely used to write file to.
...
# After the file is written to the directory.
remove_temp_dirpath(self.log_dir, get_distribution_strategy())
```
Experimental. API is subject to change.
"""
import tensorflow.compat.v2 as tf
import os
def _get_base_dirpath(strategy):
task_id = strategy.extended._task_id # pylint: disable=protected-access
return 'workertemp_' + str(task_id)
def _is_temp_dir(dirpath, strategy):
return dirpath.endswith(_get_base_dirpath(strategy))
def _get_temp_dir(dirpath, strategy):
if _is_temp_dir(dirpath, strategy):
temp_dir = dirpath
else:
temp_dir = os.path.join(dirpath, _get_base_dirpath(strategy))
tf.io.gfile.makedirs(temp_dir)
return temp_dir
def write_dirpath(dirpath, strategy):
"""Returns the writing dir that should be used to save file distributedly.
`dirpath` would be created if it doesn't exist.
Args:
dirpath: Original dirpath that would be used without distribution.
strategy: The tf.distribute strategy object currently used.
Returns:
The writing dir path that should be used to save with distribution.
"""
if strategy is None:
# Infer strategy from `distribution_strategy_context` if not given.
strategy = tf.distribute.get_strategy()
if strategy is None:
# If strategy is still not available, this is not in distributed training.
# Fallback to original dirpath.
return dirpath
if not strategy.extended._in_multi_worker_mode(): # pylint: disable=protected-access
return dirpath
if strategy.extended.should_checkpoint:
return dirpath
# If this worker is not chief and hence should not save file, save it to a
# temporary directory to be removed later.
return _get_temp_dir(dirpath, strategy)
def remove_temp_dirpath(dirpath, strategy):
"""Removes the temp path after writing is finished.
Args:
dirpath: Original dirpath that would be used without distribution.
strategy: The tf.distribute strategy object currently used.
"""
if strategy is None:
# Infer strategy from `distribution_strategy_context` if not given.
strategy = tf.distribute.get_strategy()
if strategy is None:
# If strategy is still not available, this is not in distributed training.
# Fallback to no-op.
return
# TODO(anjalisridhar): Consider removing the check for multi worker mode since
# it is redundant when used with the should_checkpoint property.
if (strategy.extended._in_multi_worker_mode() and # pylint: disable=protected-access
not strategy.extended.should_checkpoint):
# If this worker is not chief and hence should not save file, remove
# the temporary directory.
tf.compat.v1.gfile.DeleteRecursively(_get_temp_dir(dirpath, strategy))
def write_filepath(filepath, strategy):
"""Returns the writing file path to be used to save file distributedly.
Directory to contain `filepath` would be created if it doesn't exist.
Args:
filepath: Original filepath that would be used without distribution.
strategy: The tf.distribute strategy object currently used.
Returns:
The writing filepath that should be used to save file with distribution.
"""
dirpath = os.path.dirname(filepath)
base = os.path.basename(filepath)
return os.path.join(write_dirpath(dirpath, strategy), base)
def remove_temp_dir_with_filepath(filepath, strategy):
"""Removes the temp path for file after writing is finished.
Args:
filepath: Original filepath that would be used without distribution.
strategy: The tf.distribute strategy object currently used.
"""
remove_temp_dirpath(os.path.dirname(filepath), strategy)
| 5,402 | 36.006849 | 87 | py |
keras | keras-master/keras/distribute/parameter_server_evaluation_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for evaluation using Keras model and ParameterServerStrategy."""
import tensorflow.compat.v2 as tf
import time
import keras
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.ops import resource_variable_ops
# TODO(yuefengz): move the following implementation to Keras core.
class KerasMetricTypeSpec(tf.TypeSpec):
def __init__(self, cls, config, weights):
self._cls = cls
self._config = config
self._weights = weights
def _serialize(self):
return (self._cls.__name__, self._config)
@property
def value_type(self):
return self._cls
def most_specific_compatible_type(self, other):
if (type(self) is not type(other) or self._cls != other._cls or
self._config != other._config):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
return KerasMetricTypeSpec(self._cls, self._config, self._weights)
@property
def _component_specs(self):
ret = []
for w in self._weights:
ret.append(
resource_variable_ops.VariableSpec(
w.shape, w.dtype, w.name.split(":")[0], trainable=False))
return ret
def _to_components(self, value):
return value.weights
def _from_components(self, weights):
counter = [0]
def fetch_variable(next_creator, **kwargs):
del next_creator, kwargs
# TODO(yuefengz): verify the var creation order matches the weights
# property
var = weights[counter[0]]
counter[0] += 1
return var
with tf.variable_creator_scope(fetch_variable):
ret = self._cls.from_config(self._config)
assert len(weights) == len(ret.weights)
return ret
class MeanMetricAsCompositeTensor(keras.metrics.Mean,
tf.__internal__.CompositeTensor):
def element_spec(self):
raise NotImplementedError("element_spec not implemented")
@property
def _type_spec(self):
return KerasMetricTypeSpec(self.__class__, self.get_config(), self.weights)
class EvaluationTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super(EvaluationTest, cls).setUpClass()
cls._cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=3, num_ps=2, rpc_layer="grpc")
cls._cluster_def = cls._cluster.cluster_resolver.cluster_spec().as_dict()
cluster_resolver = SimpleClusterResolver(
tf.train.ClusterSpec(cls._cluster_def), rpc_layer="grpc")
cls.strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
cls.cluster_coord = tf.distribute.experimental.coordinator.ClusterCoordinator(cls.strategy)
@classmethod
def tearDownClass(cls):
cls._cluster.stop()
cls._cluster = None
super(EvaluationTest, cls).tearDownClass()
def testPassMetricToTfFunction(self):
metric1 = MeanMetricAsCompositeTensor()
metric2 = MeanMetricAsCompositeTensor()
self.assertEqual(metric1.result(), 0.0)
self.assertEqual(metric2.result(), 0.0)
tf.nest.assert_same_structure(
metric1, metric2._type_spec, expand_composites=True)
tf.nest.assert_same_structure(
metric1._type_spec, metric2, expand_composites=True)
@tf.function
def func(m):
m.update_state([1.0, 2.0])
func(metric1)
self.assertEqual(metric1.result(), 1.5)
self.assertEqual(metric2.result(), 0.0)
concrete_f = func.get_concrete_function(metric1._type_spec)
concrete_f(metric2)
self.assertEqual(metric1.result(), 1.5)
self.assertEqual(metric2.result(), 1.5)
def testModelEvaluatePrototype(self):
def metric_fn():
return MeanMetricAsCompositeTensor()
# TODO(yuefengz): make _create_per_worker_resources public and get rid of
# the type_spec hack.
per_worker_metric = self.cluster_coord._create_per_worker_resources(
metric_fn)
metric_on_coordinator = metric_fn()
for metric_remote_value in per_worker_metric._values:
metric_remote_value._type_spec = metric_on_coordinator._type_spec
def dataset_fn():
return tf.data.Dataset.range(1024)
# TODO(yuefengz): integrate it into model.evaluate.
@tf.function
def eval_fn(total_shard, shard_id, metric):
metric.reset_states()
dataset_shard = dataset_fn().shard(total_shard, shard_id)
for i in dataset_shard:
metric.update_state(i)
# TODO(yuefengz): we should return the internal state of the metric and
# then use the combiner API.
return metric.result()
total_shards = 128
result_remote_values = []
for i in range(total_shards):
result_remote_values.append(
self.cluster_coord.schedule(
eval_fn, args=(total_shards, i, per_worker_metric)))
self._cluster.kill_task("worker", 0)
self._cluster.kill_task("worker", 1)
time.sleep(1)
self._cluster.start_task("worker", 0)
self._cluster.start_task("worker", 1)
results = [r.fetch() for r in result_remote_values]
result = sum(results) / len(results)
self.assertEqual(result, 511.5)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 5,989 | 30.861702 | 95 | py |
keras | keras-master/keras/distribute/multi_worker_testing_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing multi-worker distribution strategies with Keras."""
import tensorflow.compat.v2 as tf
import threading
import unittest
import keras
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from keras.optimizer_v2 import gradient_descent
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.server_lib import ClusterSpec
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except (ImportError, ModuleNotFoundError) as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
ASSIGNED_PORTS = set()
lock = threading.Lock()
def mnist_synthetic_dataset(batch_size, steps_per_epoch):
"""Generate synthetic MNIST dataset for testing."""
# train dataset
x_train = tf.ones([batch_size * steps_per_epoch, 28, 28, 1],
dtype=tf.float32)
y_train = tf.ones([batch_size * steps_per_epoch, 1],
dtype=tf.int32)
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.repeat()
# train_ds = train_ds.shuffle(100)
train_ds = train_ds.batch(64, drop_remainder=True)
# eval dataset
x_test = tf.random.uniform([10000, 28, 28, 1], dtype=tf.float32)
y_test = tf.random.uniform([10000, 1],
minval=0,
maxval=9,
dtype=tf.int32)
eval_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
eval_ds = eval_ds.batch(64, drop_remainder=True)
return train_ds, eval_ds
def get_mnist_model(input_shape):
"""Define a deterministically-initialized CNN model for MNIST testing."""
inputs = keras.Input(shape=input_shape)
x = keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
kernel_initializer=keras.initializers.TruncatedNormal(seed=99))(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Flatten()(x) + keras.layers.Flatten()(x)
x = keras.layers.Dense(
10,
activation="softmax",
kernel_initializer=keras.initializers.TruncatedNormal(seed=99))(x)
model = keras.Model(inputs=inputs, outputs=x)
# TODO(yuefengz): optimizer with slot variables doesn't work because of
# optimizer's bug.
# TODO(yuefengz): we should not allow non-v2 optimizer.
model.compile(
loss=keras.losses.sparse_categorical_crossentropy,
optimizer=gradient_descent.SGD(learning_rate=0.001),
metrics=["accuracy"])
return model
def make_parameter_server_cluster(num_workers, num_ps):
cluster_def = create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
return SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc")
def pick_unused_port():
"""Returns an unused and unassigned local port."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
global ASSIGNED_PORTS
with lock:
while True:
try:
port = portpicker.pick_unused_port()
except portpicker.NoFreePortFoundError:
raise unittest.SkipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
if port > 10000 and port not in ASSIGNED_PORTS:
ASSIGNED_PORTS.add(port)
logging.info("Using local port %r", port)
return port
def _create_cluster(num_workers,
num_ps,
has_chief=False,
has_eval=False,
protocol="grpc",
worker_config=None,
ps_config=None,
eval_config=None,
worker_name="worker",
ps_name="ps",
chief_name="chief"):
"""Creates and starts local servers and returns the cluster_spec dict."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [pick_unused_port() for _ in range(num_workers)]
ps_ports = [pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
if num_workers > 0:
cluster_dict[worker_name] = ["localhost:%s" % port for port in worker_ports]
if num_ps > 0:
cluster_dict[ps_name] = ["localhost:%s" % port for port in ps_ports]
if has_eval:
cluster_dict["evaluator"] = ["localhost:%s" % pick_unused_port()]
if has_chief:
cluster_dict[chief_name] = ["localhost:%s" % pick_unused_port()]
cs = tf.train.ClusterSpec(cluster_dict)
for i in range(num_workers):
tf.distribute.Server(
cs,
job_name=worker_name,
protocol=protocol,
task_index=i,
config=worker_config,
start=True)
for i in range(num_ps):
tf.distribute.Server(
cs,
job_name=ps_name,
protocol=protocol,
task_index=i,
config=ps_config,
start=True)
if has_chief:
tf.distribute.Server(
cs,
job_name=chief_name,
protocol=protocol,
task_index=0,
config=worker_config,
start=True)
if has_eval:
tf.distribute.Server(
cs,
job_name="evaluator",
protocol=protocol,
task_index=0,
config=eval_config,
start=True)
return cluster_dict
def create_in_process_cluster(num_workers,
num_ps,
has_chief=False,
has_eval=False,
rpc_layer="grpc"):
"""Create an in-process cluster that consists of only standard server."""
# Leave some memory for cuda runtime.
gpu_mem_frac = 0.7 / (num_workers + int(has_chief) + int(has_eval))
worker_config = tf.compat.v1.ConfigProto()
worker_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac
# The cluster may hang if workers don't have enough inter_op threads. See
# b/172296720 for more details.
if worker_config.inter_op_parallelism_threads < num_workers + 1:
worker_config.inter_op_parallelism_threads = num_workers + 1
# Enable collective ops which has no impact on non-collective ops.
if has_chief:
worker_config.experimental.collective_group_leader = (
"/job:chief/replica:0/task:0")
else:
worker_config.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
ps_config = tf.compat.v1.ConfigProto()
ps_config.device_count["GPU"] = 0
eval_config = tf.compat.v1.ConfigProto()
eval_config.experimental.collective_group_leader = ""
# Create in-process servers. Once an in-process tensorflow server is created,
# there is no way to terminate it. So we create one cluster per test process.
# We could've started the server in another process, we could then kill that
# process to terminate the server. The reasons why we don"t want multiple
# processes are
# 1) it is more difficult to manage these processes;
# 2) there is something global in CUDA such that if we initialize CUDA in the
# parent process, the child process cannot initialize it again and thus cannot
# use GPUs (https://stackoverflow.com/questions/22950047).
cluster = None
try:
cluster = _create_cluster(
num_workers,
num_ps=num_ps,
has_chief=has_chief,
has_eval=has_eval,
worker_config=worker_config,
ps_config=ps_config,
eval_config=eval_config,
protocol=rpc_layer)
except tf.errors.UnknownError as e:
if "Could not start gRPC server" in e.message:
raise unittest.SkipTest("Cannot start std servers.")
else:
raise
return cluster
| 8,364 | 34.147059 | 84 | py |
keras | keras-master/keras/distribute/sidecar_evaluator_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test covering sidecar_evaluator.py."""
import tensorflow.compat.v2 as tf
import enum
import os
from absl import logging
from absl.testing import parameterized
import numpy as np
import keras
from keras.distribute import sidecar_evaluator as sidecar_evaluator_lib
from keras.optimizer_v2 import gradient_descent
_BATCH_SIZE = 32
class TestModel(keras.Model):
def __init__(self):
super().__init__(name='test_model')
self.dense = keras.layers.Dense(10)
def call(self, inputs):
return self.dense(inputs)
class DictMetric(keras.metrics.MeanSquaredError):
def result(self):
res = super().result()
return {'mean_squared_error_1': res, 'mean_squared_error_2': res}
class ModelType(enum.Enum):
SEQUENTIAL = 'sequential'
SUBCLASS = 'subclass'
def _test_model_builder(model_type: ModelType, compile_model, build_model):
if model_type == ModelType.SEQUENTIAL:
model = keras.Sequential([keras.layers.Dense(10)])
elif model_type == ModelType.SUBCLASS:
model = TestModel()
if compile_model:
model.compile(
gradient_descent.SGD(),
loss='mse',
metrics=[keras.metrics.CategoricalAccuracy(),
DictMetric()])
if build_model:
model.build((None, 32))
return model
class SidecarEvaluatorTest(tf.test.TestCase, parameterized.TestCase):
def assertSummaryEventsWritten(self, log_dir):
# Asserts summary files do get written when log_dir is provided.
summary_files = tf.io.gfile.listdir(log_dir)
self.assertNotEmpty(
summary_files, 'Summary should have been written and '
'log_dir should not be empty.')
# Asserts the content of the summary file.
event_pb_written = False
event_tags = []
for summary_file in summary_files:
for event_pb in tf.compat.v1.train.summary_iterator(
os.path.join(log_dir, summary_file)):
if event_pb.step > 0:
self.assertEqual(event_pb.step, 32)
event_tags.append(event_pb.summary.value[0].tag)
event_pb_written = True
self.assertCountEqual(event_tags, [
'evaluation_categorical_accuracy_vs_iterations',
'evaluation_loss_vs_iterations',
'evaluation_mean_squared_error_1_vs_iterations',
'evaluation_mean_squared_error_2_vs_iterations',
])
# Verifying at least one non-zeroth step is written to summary.
self.assertTrue(event_pb_written)
def assertModelsSameVariables(self, model_a, model_b):
# Check both have the same number of variables.
self.assertEqual(len(model_a.variables), len(model_b.variables))
# Check variable values to be equal.
for var_a, var_b in zip(model_a.variables, model_b.variables):
self.assertAllEqual(var_a.numpy(), var_b.numpy())
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], model_type=[ModelType.SEQUENTIAL,
ModelType.SUBCLASS]))
def testIterationsNotSavedWillRaiseError(self, model_type):
model = _test_model_builder(
model_type=model_type, compile_model=False, build_model=True)
checkpoint_dir = self.get_temp_dir()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
checkpoint_manager.save()
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
model, data=None, checkpoint_dir=checkpoint_dir)
with self.assertRaisesRegex(
RuntimeError, '`iterations` cannot be loaded '
'from the checkpoint file.'):
sidecar_evaluator.start()
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], model_type=[ModelType.SEQUENTIAL,
ModelType.SUBCLASS]))
def testModelNotBuiltRaiseError(self, model_type):
model = _test_model_builder(
model_type=model_type, compile_model=False, build_model=False)
checkpoint_dir = self.get_temp_dir()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
checkpoint_manager.save()
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
model, data=None, checkpoint_dir=checkpoint_dir)
with self.assertRaisesRegex(AssertionError, 'Nothing to load.'):
sidecar_evaluator.start()
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False]))
def testSidecarEvaluatorOutputsSummary(self, model_type, build_model):
# Create a model with synthetic data, and fit for one epoch.
model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=False)
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=1)
# Save a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'summary')
logging.info('checkpoint_dir = %s, log_dir = %s', checkpoint_dir, log_dir)
checkpoint = tf.train.Checkpoint(
model=model, optimizer=model.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
logging.info('Checkpoint manager saved to: %s', checkpoint_manager.save())
self.assertNotEmpty(
tf.io.gfile.listdir(checkpoint_dir),
'Checkpoint should have been written and '
'checkpoint_dir should not be empty.')
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model)
# Have a sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir)])
sidecar_evaluator.start()
# Eval model has been restored to the same state as the original model, so
# their weights should match. If not, restoration of the model didn't
# work.
self.assertModelsSameVariables(model, eval_model)
self.assertSummaryEventsWritten(os.path.join(log_dir, 'validation'))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False]))
def testSidecarEvaluatorOutputsSummarySavedWithCallback(
self, model_type, build_model):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'checkpoints')
log_dir = os.path.join(self.get_temp_dir(), 'summary')
# Create a model with synthetic data, and fit for one epoch.
model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=False)
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(_BATCH_SIZE)
save_callback = keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_dir, 'ckpt-{epoch}'),
save_weights_only=True)
model.fit(dataset, epochs=1, callbacks=[save_callback])
self.assertNotEmpty(
tf.io.gfile.listdir(checkpoint_dir),
'Checkpoint should have been written and '
'checkpoint_dir should not be empty.')
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model)
# Have an sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir)])
with self.assertLogs() as cm:
sidecar_evaluator.start()
metrics_logging = [
line for line in cm.output if 'End of evaluation' in line
]
self.assertLen(metrics_logging, 1)
expected_logged_metrics = [
'loss', 'categorical_accuracy', 'mean_squared_error_1',
'mean_squared_error_2'
]
for metric_name in expected_logged_metrics:
self.assertRegex(metrics_logging[0], f'{metric_name}=')
# Eval model has been restored to the same state as the original model, so
# their weights should match. If not, restoration of the model didn't
# work.
self.assertModelsSameVariables(model, eval_model)
# check the iterations is restored.
self.assertEqual(sidecar_evaluator._iterations.numpy(), _BATCH_SIZE)
self.assertSummaryEventsWritten(os.path.join(log_dir, 'validation'))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 9,868 | 36.812261 | 80 | py |
keras | keras-master/keras/distribute/ctl_correctness_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom Training Loop correctness test."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras.distribute import optimizer_combinations
from keras.distribute import strategy_combinations
_NUM_SAMPLES = 66
_BATCH_SIZE = 32
_RANDOM_SEED = 1337
_NUM_EPOCHS = 2
_STEPS_PER_EPOCH = 2
class MaybeStrategyScope:
"""Provides a context allowing no distribution strategy."""
def __init__(self, strategy):
self._strategy = strategy
self._scope = None
def __enter__(self):
if self._strategy:
self._scope = self._strategy.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._strategy:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def get_model(sync_batchnorm=False):
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(
10, activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-4)))
if sync_batchnorm:
model.add(keras.layers.SyncBatchNormalization())
else:
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
return model
def get_data():
x_train = np.random.rand(_NUM_SAMPLES, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(_BATCH_SIZE)
return train_dataset
def compute_loss(labels, logits, reg_losses):
pred_loss = keras.losses.mean_squared_error(labels, logits)
scaled_loss = tf.nn.compute_average_loss(
pred_loss, global_batch_size=_BATCH_SIZE)
l2_loss = tf.nn.scale_regularization_loss(reg_losses)
return scaled_loss + l2_loss
def iteration_inside_func(initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=None,
sync_batchnorm=None,
jit_compile=False):
"""Helper function to test iterating over data inside a tf.function."""
with MaybeStrategyScope(strategy):
if strategy and sync_batchnorm:
model = get_model(sync_batchnorm)
else:
model = get_model()
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
@tf.function
def train_epoch(dist_input):
"""Training StepFn."""
@tf.function(jit_compile=jit_compile)
def step_fn(inputs):
samples, labels = inputs
with tf.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
total_loss = 0.0
num_batches = 0
if iteration_type == 'dataset':
for x in dist_input:
if strategy:
per_replica_losses = strategy.run(step_fn, args=(x,))
total_loss += strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None)
else:
total_loss += step_fn(x)
num_batches += 1
else:
iterator = iter(dist_input)
for _ in range(_STEPS_PER_EPOCH):
if strategy:
per_replica_losses = strategy.run(step_fn, args=(next(iterator),))
total_loss += strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None)
else:
total_loss += step_fn(next(iterator))
num_batches += 1
return total_loss / tf.cast(num_batches, dtype=tf.float32)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
for _ in range(_NUM_EPOCHS):
loss = train_epoch(dataset)
return (model.get_weights(),
loss,
training_accuracy.result())
def iteration_outside_func(initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=None,
sync_batchnorm=None,
jit_compile=False):
"""Helper function to test iterating over data outside a tf.function."""
with MaybeStrategyScope(strategy):
model = get_model(sync_batchnorm=sync_batchnorm)
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
@tf.function
def train_step(dist_inputs):
"""Training StepFn."""
@tf.function(jit_compile=jit_compile)
def step_fn(inputs):
samples, labels = inputs
with tf.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
if strategy:
per_replica_losses = strategy.run(step_fn, args=(dist_inputs,))
return strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None)
else:
return step_fn(dist_inputs)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
total_loss = 0.0
num_batches = 0
if iteration_type == 'dataset':
for _ in range(_NUM_EPOCHS):
for x in dataset:
total_loss += train_step(x)
num_batches += 1
else:
for _ in range(_NUM_EPOCHS):
iterator = iter(dataset)
for _ in range(_STEPS_PER_EPOCH):
total_loss += train_step(next(iterator))
num_batches += 1
return (model.get_weights(),
total_loss / tf.cast(num_batches, dtype=tf.float32),
training_accuracy.result())
class TestDistributionStrategyDnnCorrectness(tf.test.TestCase,
parameterized.TestCase):
"""Test custom training loop correctness with a simple DNN model."""
def setUp(self):
super(TestDistributionStrategyDnnCorrectness, self).setUp()
tf.compat.v1.enable_v2_behavior()
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.all_strategies,
optimizer_fn=optimizer_combinations.optimizers_v2,
mode=['eager'],
iteration_type=['iterator', 'dataset'],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[False]) + tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.multiworker_strategies,
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn
],
mode=['eager'],
iteration_type=['iterator', 'dataset'],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[False]) +
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
],
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn
],
mode=['eager'],
iteration_type=['iterator', 'dataset'],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[True]))
def test_dnn_correctness_minus_tpus(self, distribution, optimizer_fn,
iteration_type, inside_func,
sync_batchnorm, jit_compile):
# TODO(anjs): Identify why this particular V1 optimizer needs a higher tol.
if 'FtrlV1' in optimizer_fn._name and 'TPU' in type(distribution).__name__:
self.skipTest('Reduced tolerance of the order of 1e-1 required.')
self.dnn_correctness(distribution, optimizer_fn, iteration_type,
inside_func, sync_batchnorm, jit_compile)
def dnn_correctness(self,
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm=None,
jit_compile=False):
model = get_model(sync_batchnorm)
initial_weights = model.get_weights()
dataset = get_data()
if inside_func:
iteration_func = iteration_inside_func
else:
iteration_func = iteration_outside_func
wts_with_ds, loss_with_ds, acc_with_ds = iteration_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=distribution,
sync_batchnorm=sync_batchnorm,
jit_compile=jit_compile)
wts, loss, acc = iteration_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
sync_batchnorm=sync_batchnorm,
jit_compile=False)
self.assertAllClose(wts, wts_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(loss, loss_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(acc, acc_with_ds, atol=1e-3, rtol=1e-3)
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| 10,892 | 34.598039 | 86 | py |
keras | keras-master/keras/distribute/distribute_coordinator_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distribute coordinator.
The module is used only for utils to support legacy TF1 code path involving
distribute coordinator, and is not expected to change in any way. This is
subject to cleanup once TF1 is no longer supported.
TODO(rchao): Remove this module once TF1 is not supported.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import copy
import json
import os
import threading
import time
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.platform import tf_logging as logging
_worker_context = threading.local()
_thread_local = threading.local()
def get_current_worker_context():
"""Returns the current task context."""
try:
return _worker_context.current
except AttributeError:
return None
class _TaskType:
PS = "ps"
WORKER = "worker"
CHIEF = "chief"
EVALUATOR = "evaluator"
CLIENT = "client"
def _get_num_workers(cluster_spec):
"""Gets number of workers including chief."""
if not cluster_spec:
return 0
return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(
cluster_spec.as_dict().get(_TaskType.CHIEF, []))
class _WorkerContext:
"""The worker context class.
This context object provides configuration information for each task. One
context manager with a worker context object will be created per
invocation to the `worker_fn` where `get_current_worker_context` can be called
to access the worker context object.
"""
def __init__(self,
strategy,
cluster_spec,
task_type,
task_id,
session_config=None,
rpc_layer="grpc",
worker_barrier=None):
"""Initialize the worker context object.
Args:
strategy: a `DistributionStrategy` object.
cluster_spec: a ClusterSpec object. It can be empty or None in the local
training case.
task_type: a string indicating the role of the corresponding task, such as
"worker" or "ps". It can be None if it is local training or in-graph
replicated training.
task_id: an integer indicating id of the corresponding task. It can be
None if it is local training or in-graph replicated training.
session_config: an optional `tf.compat.v1.ConfigProto` object.
rpc_layer: optional string specifying the RPC protocol for communication
with worker masters. If None or empty, hosts in the `cluster_spec` will
be used directly.
worker_barrier: optional, the barrier object for worker synchronization.
"""
self._strategy = strategy
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._session_config = session_config
self._worker_barrier = worker_barrier
self._rpc_layer = rpc_layer
self._master_target = self._get_master_target()
self._num_workers = _get_num_workers(cluster_spec)
self._is_chief_node = self._is_chief()
def _debug_message(self):
if self._cluster_spec:
return "[cluster_spec: %r, task_type: %r, task_id: %r]" % (
self._cluster_spec, self.task_type, self.task_id)
else:
return "[local]"
def __enter__(self):
old_context = get_current_worker_context()
if old_context:
raise ValueError(
"You cannot run distribute coordinator in a `worker_fn`.\t" +
self._debug_message())
# pylint: disable=protected-access
_worker_context.current = self
def __exit__(self, unused_exception_type, unused_exception_value,
unused_traceback):
# pylint: disable=protected-access
_worker_context.current = None
def _get_master_target(self):
"""Return the master target for a task."""
# If cluster_spec is None or empty, we use local master.
if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR:
return ""
# If task_type is None, then it is in-graph replicated training. In this
# case we use the chief or first worker's master target.
if not self._task_type:
if _TaskType.CHIEF in self._cluster_spec.jobs:
task_type = _TaskType.CHIEF
task_id = 0
else:
assert _TaskType.WORKER in self._cluster_spec.jobs
task_type = _TaskType.WORKER
task_id = 0
else:
task_type = self._task_type
task_id = self._task_id
prefix = ""
if self._rpc_layer:
prefix = self._rpc_layer + "://"
return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0]
def _is_chief(self):
"""Return whether the task is the chief worker."""
if (not self._cluster_spec or
self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]):
return True
# If not local and chief not in the cluster_spec, use the first worker as
# chief.
if (_TaskType.CHIEF not in self._cluster_spec.jobs and
self._task_type == _TaskType.WORKER and self._task_id == 0):
return True
return False
def wait_for_other_workers(self):
"""Waits for other workers to reach the same call to this method.
Raises:
ValueError: if `worker_barrier` is not passed to the __init__ method.
"""
if not self._worker_barrier:
# TODO(yuefengz): we should throw an error in independent worker mode.
return
self._worker_barrier.wait()
def session_creator(self,
scaffold=None,
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
max_wait_secs=7200):
"""Returns a session creator.
The returned session creator will be configured with the correct master
target and session configs. It will also run either init ops or ready ops
by querying the `strategy` object when `create_session` is called on it.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be
specified.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
a descendant of SessionCreator.
"""
if config:
session_config = copy.deepcopy(config)
session_config.MergeFrom(self._session_config)
else:
session_config = self._session_config
if not self._strategy or self._strategy.extended.experimental_should_init:
logging.info("Creating chief session creator with config: %r", config)
return tf.compat.v1.train.ChiefSessionCreator(
scaffold,
master=self.master_target,
config=session_config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
else:
logging.info("Creating worker session creator with config: %r", config)
return tf.compat.v1.train.WorkerSessionCreator(
scaffold,
master=self.master_target,
config=session_config,
max_wait_secs=max_wait_secs)
@property
def session_config(self):
return copy.deepcopy(self._session_config)
@property
def has_barrier(self):
"""Whether the barrier is set or not."""
return self._worker_barrier is not None
@property
def distributed_mode(self):
"""Whether it is distributed training or not."""
return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR
@property
def cluster_spec(self):
"""Returns a copy of the cluster_spec object."""
return copy.deepcopy(self._cluster_spec)
@property
def task_type(self):
"""Returns the role of the corresponding task."""
return self._task_type
@property
def task_id(self):
"""Returns the id or index of the corresponding task."""
return self._task_id
@property
def master_target(self):
"""Returns the session master for the corresponding task to connect to."""
return self._master_target
@property
def is_chief(self):
"""Returns whether the task is a chief node."""
return self._is_chief_node
@property
def num_workers(self):
"""Returns number of workers in the cluster, including chief."""
return self._num_workers
@property
def experimental_should_init(self):
"""Whether to run init ops."""
return self._strategy.extended.experimental_should_init
@property
def should_checkpoint(self):
"""Whether to save checkpoint."""
return self._strategy.extended.should_checkpoint
@property
def should_save_summary(self):
"""Whether to save summaries."""
return self._strategy.extended.should_save_summary
def _run_single_worker(worker_fn,
strategy,
cluster_spec,
task_type,
task_id,
session_config,
rpc_layer="",
worker_barrier=None,
coord=None):
"""Runs a single worker by calling `worker_fn` under context."""
session_config = copy.deepcopy(session_config)
strategy = copy.deepcopy(strategy)
# If there is an EVALUATOR task, we run single-machine eval on that task.
if task_type == _TaskType.EVALUATOR:
# It is possible to not have a strategy object for EVALUATOR task.
if strategy:
strategy.configure(session_config)
else:
assert strategy
strategy.configure(session_config, cluster_spec, task_type, task_id)
context = _WorkerContext(
strategy,
cluster_spec,
task_type,
task_id,
session_config=session_config,
rpc_layer=rpc_layer,
worker_barrier=worker_barrier)
with context:
if coord:
with coord.stop_on_exception():
return worker_fn(strategy)
else:
return worker_fn(strategy)
def _split_cluster_for_evaluator(cluster_spec, task_type):
"""Split the cluster for evaluator since it needn't talk to other tasks."""
# Splitting the cluster is important to prevent the evaluator from talking to
# other tasks in the cluster. Since we allow evaluator not to use
# distribution strategies and as a result ops in the evaluator task may have
# unspecified devices. Those ops may end up on other tasks if we don't split
# the cluster.
# Note: if you bypass distribute coordinator and bring the cluster yourself,
# you can equivalently set device filters to split clusters. This is already
# done by distribution strategy's `update_config_proto` method.
new_cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()
if task_type == _TaskType.EVALUATOR:
assert _TaskType.EVALUATOR in new_cluster_spec
new_cluster_spec = {
_TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR]
}
else:
new_cluster_spec.pop(_TaskType.EVALUATOR, None)
return normalize_cluster_spec(new_cluster_spec)
def _run_std_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
"""Runs a standard server."""
# Check if the Server is already running. If so, assert that no configuration
# options have changed, and return the existing Server. This allows us to
# call `run_distribute_coordinator` multiple times.
if getattr(_thread_local, "server", None) is not None:
assert _thread_local.cluster_spec == cluster_spec
assert _thread_local.task_type == task_type
assert _thread_local.task_id == task_id
assert _thread_local.session_config_str == repr(session_config)
assert _thread_local.rpc_layer == rpc_layer
assert _thread_local.environment == environment
return _thread_local.server
else:
# This method is not thread-safe.
_thread_local.server_started = True
_thread_local.cluster_spec = cluster_spec
_thread_local.task_type = task_type
_thread_local.task_id = task_id
_thread_local.session_config_str = repr(session_config)
_thread_local.rpc_layer = rpc_layer
_thread_local.environment = environment
assert cluster_spec
target = cluster_spec.task_address(task_type, task_id)
if rpc_layer:
target = rpc_layer + "://" + target
class _FakeServer:
"""A fake server that runs a master session."""
def start(self):
# A tensorflow server starts when a remote session is created.
logging.info(
"Creating a remote session to start a TensorFlow server, "
"target = %r, session_config=%r", target, session_config)
tf.compat.v1.Session(target=target, config=session_config)
def join(self):
while True:
time.sleep(5)
if environment == "google":
server = _FakeServer()
else:
if session_config:
logging.info(
"Starting standard TensorFlow server, target = %r, session_config= "
"%r", target, session_config)
else:
logging.info("Starting standard TensorFlow server, target = %r", target)
cluster_spec = _split_cluster_for_evaluator(cluster_spec, task_type)
server = tf.distribute.Server(
cluster_spec,
job_name=task_type,
task_index=task_id,
config=session_config,
protocol=rpc_layer)
server.start()
_thread_local.server = server
return server
def _configure_session_config_for_std_servers(strategy, eval_strategy,
session_config, cluster_spec,
task_type, task_id):
# pylint: disable=g-doc-args
"""Call strategy's `configure` to mutate the session_config.
The session_config is currently needed as default config for a TensorFlow
server. In the future, we should be able to remove this method and only pass
the session config to a client session.
"""
if task_type == _TaskType.EVALUATOR:
if eval_strategy:
eval_strategy.configure(session_config=session_config)
else:
# The strategy may be shared in standalone client mode.
strategy = copy.deepcopy(strategy)
strategy.configure(
session_config=session_config,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id)
# Remove the device filters specific to the strategy, so that the
# TensorFlow server brought up with one strategy can be used by other
# strategies. The device filters can be set in the client side as well.
del session_config.device_filters[:]
# TODO(yuefengz): propagate cluster_spec in the STANDALONE_CLIENT mode.
# TODO(yuefengz): we may need a smart way to figure out whether the current task
# is the special task when we support cluster_spec propagation.
def run_distribute_coordinator(worker_fn,
strategy,
eval_fn=None,
eval_strategy=None,
cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer="grpc"):
"""Runs the coordinator for distributed TensorFlow.
This function runs a split coordinator for distributed TensorFlow in its
default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec`
specifying server addresses and their roles in a cluster, this coordinator
will figure out how to set them up, give the underlying function the right
targets for master sessions via a scope object and coordinate their training.
The cluster consisting of standard servers needs to be brought up either with
the standard server binary or with a binary running distribute coordinator
with `task_type` set to non-client type which will then turn into standard
servers.
In addition to be the distribute coordinator, this is also the source of
configurations for each job in the distributed training. As there are multiple
ways to configure a distributed TensorFlow cluster, its context object
provides these configurations so that users or higher-level APIs don't have to
figure out the configuration for each job by themselves.
In the between-graph replicated training, this coordinator will create
multiple threads and each calls the `worker_fn` which is supposed to create
its own graph and connect to one worker master given by its context object. In
the in-graph replicated training, it has only one thread calling this
`worker_fn`.
Another mode is the INDEPENDENT_WORKER mode where each server runs a
distribute coordinator which will start a standard server and optionally runs
`worker_fn` depending whether it is between-graph training or in-graph
replicated training.
The `strategy` object is expected to be a DistributionStrategy object which
has implemented methods needed by distributed coordinator such as
`configure(session_config, cluster_spec, task_type, task_id)` which configures
the strategy object for a specific task and `experimental_should_init`
property which instructs the distribute coordinator whether to run init ops
for a task. The distribute coordinator will make a copy of the `strategy`
object, call its `configure` method and pass it to `worker_fn` as an argument.
The `worker_fn` defines the training logic and is called under its own
worker context which can be accessed to via `get_current_worker_context`. A
worker context provides access to configurations for each task, e.g. the
task_type, task_id, master target and so on. Since `worker_fn` will be called
in a thread and possibly multiple times, caller should be careful when it
accesses global data. For example, it is unsafe to define flags in a
`worker_fn` or to define different environment variables for different
`worker_fn`s.
The `worker_fn` for the between-graph replication is defined as if there is
only one worker corresponding to the `worker_fn` and possibly ps jobs. For
example, when training with parameter servers, it assigns variables to
parameter servers and all other operations to that worker. In the in-graph
replication case, the `worker_fn` has to define operations for all worker
jobs. Using a distribution strategy can simplify the `worker_fn` by not having
to worry about the replication and device assignment of variables and
operations.
This method is intended to be invoked by high-level APIs so that users don't
have to explicitly call it to run this coordinator. For those who don't use
high-level APIs, to change a program to use this coordinator, wrap everything
in a the program after global data definitions such as commandline flag
definition into the `worker_fn` and get task-specific configurations from
the worker context.
The `cluster_spec` can be either passed by the argument or parsed from the
"TF_CONFIG" environment variable. Example of a TF_CONFIG:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster})
```
If `cluster_spec` is not given in any format, it becomes local training and
this coordinator will connect to a local session.
For evaluation, if "evaluator" exists in the cluster_spec, a separate thread
will be created to call `eval_fn` with its `task_type` set to "evaluator". If
`eval_fn` is not defined, fall back to `worker_fn`. This implies that
evaluation will be done on a single machine if there is an "evaluator" task.
If "evaluator" doesn't exist in the cluster_spec, it entirely depends on the
`worker_fn` for how to do evaluation.
Args:
worker_fn: the function to be called. The function should accept a
`strategy` object and will be given access to a context object via a
context manager scope.
strategy: a DistributionStrategy object specifying whether it should run
between-graph replicated training or not, whether to run init ops, etc.
This object will also be configured given `session_config`,
`cluster_spec`, `task_type` and `task_id`.
eval_fn: optional function for "evaluator" task. If `eval_fn` is not passed
in but a "evaluator" task is found in the `cluster_spec`, the `worker_fn`
will be used for this task.
eval_strategy: optional DistributionStrategy object for "evaluator" task.
cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles
in a cluster. If not set or empty, fall back to local training.
task_type: the current task type, optional if this is a client.
task_id: the current task id, optional if this is a client.
session_config: an optional `tf.compat.v1.ConfigProto` object which will be
passed to `strategy`'s `configure` method and used to create a session.
rpc_layer: optional string, the protocol for RPC, e.g. "grpc".
Raises:
ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or
a ClusterSpec.
Returns:
In the client job, return the value returned by `worker_fn` if
it is in-graph replication or INDEPENDENT_WORKER mode; return None
otherwise.
"""
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
rpc_layer = tf_config.get("rpc_layer", rpc_layer)
environment = tf_config.get("environment", None)
if not cluster_spec:
cluster_spec = tf_config.get("cluster", {})
task_env = tf_config.get("task", {})
if task_env:
task_type = task_env.get("type", task_type)
task_id = int(task_env.get("index", task_id))
if cluster_spec:
# TODO(yuefengz): validate cluster_spec.
cluster_spec = normalize_cluster_spec(cluster_spec)
elif hasattr(strategy.extended, "_cluster_resolver"):
cluster_resolver = strategy.extended._cluster_resolver # pylint: disable=protected-access
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
rpc_layer = cluster_resolver.rpc_layer or rpc_layer
environment = cluster_resolver.environment
cluster_spec = cluster_resolver.cluster_spec()
# Setting the session config is necessary for some strategies such as
# CollectiveAllReduceStrategy.
session_config = session_config or tf.compat.v1.ConfigProto(
allow_soft_placement=True)
if cluster_spec:
logging.info(
"Running Distribute Coordinator with cluster_spec = %r, "
"task_type = %r, task_id = %r, environment = %r, rpc_layer = %r",
cluster_spec.as_dict(), task_type, task_id, environment, rpc_layer)
if not cluster_spec:
# `mode` is ignored in the local case.
logging.info("Running local Distribute Coordinator.")
_run_single_worker(worker_fn, strategy, None, None, None, session_config,
rpc_layer)
if eval_fn:
_run_single_worker(eval_fn, eval_strategy, None, None, None,
session_config, rpc_layer)
else:
logging.warning("Skipped evaluation since `eval_fn` is not passed in.")
else:
if not eval_fn:
logging.warning("`eval_fn` is not passed in. The `worker_fn` will be "
"used if an \"evaluator\" task exists in the cluster.")
eval_fn = eval_fn or worker_fn
if not eval_strategy:
logging.warning("`eval_strategy` is not passed in. No distribution "
"strategy will be used for evaluation.")
# Every one starts a standard server, get session config from `configure`
# method.
_configure_session_config_for_std_servers(strategy, eval_strategy,
session_config, cluster_spec,
task_type, task_id)
if (task_type != _TaskType.EVALUATOR and
not getattr(strategy.extended, "_std_server_started", False)):
# Right now, with eager mode, context is configured with a std server at
# the very beginning while with graph mode the std server is started when
# distribute coordinator is called. We should consolidate these two paths.
server = _run_std_server(
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
session_config=session_config,
rpc_layer=rpc_layer,
environment=environment)
if task_type in [_TaskType.CHIEF, _TaskType.WORKER]:
if strategy.extended.experimental_between_graph:
# All jobs run `worker_fn` if between-graph.
return _run_single_worker(worker_fn, strategy, cluster_spec, task_type,
task_id, session_config, rpc_layer)
else:
# Only one node runs `worker_fn` if in-graph.
context = _WorkerContext(strategy, cluster_spec, task_type, task_id)
if context.is_chief:
return _run_single_worker(worker_fn, strategy, cluster_spec, None,
None, session_config, rpc_layer)
else:
server.join()
elif task_type == _TaskType.EVALUATOR:
return _run_single_worker(eval_fn, eval_strategy, cluster_spec, task_type,
task_id, session_config, rpc_layer)
else:
if task_type != _TaskType.PS:
raise ValueError("Unexpected task_type: %r" % task_type)
server.join()
def normalize_cluster_spec(cluster_spec):
"""Makes `cluster_spec` into a `ClusterSpec` object.
Args:
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
Returns:
a `ClusterSpec` object.
Raises:
ValueError: if `cluster_spec` is not a dict or a `ClusterSpec` or a
`ClusterDef`.
"""
if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)):
return tf.train.ClusterSpec(cluster_spec)
elif not isinstance(cluster_spec, tf.train.ClusterSpec):
raise ValueError(
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object")
return cluster_spec
| 27,050 | 38.89823 | 94 | py |
keras | keras-master/keras/distribute/dataset_creator_model_fit_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import tensorflow.compat.v2 as tf
import numpy as np
from tensorflow.python.framework import test_util
from keras.distribute import dataset_creator_model_fit_test_base as test_base
from keras.distribute import strategy_combinations
from keras.utils import dataset_creator
# TODO(rchao): Investigate why there cannot be single worker and multi worker
# PS strategies running in the same shard.
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies +
strategy_combinations.parameter_server_strategies_multi_worker,
mode="eager"))
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
"with XLA: b/184384487")
def testModelFit(self, strategy):
model = self._model_fit(strategy)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitwithStepsPerEpochNegativeOne(self, strategy):
def dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).batch(2)
if strategy._should_use_with_coordinator:
with self.assertRaises((tf.errors.OutOfRangeError,
tf.errors.CancelledError)):
self._model_fit(
strategy,
steps_per_epoch=-1,
x=dataset_creator.DatasetCreator(dataset_fn),
validation_data=dataset_creator.DatasetCreator(dataset_fn),
)
else:
self._model_fit(
strategy,
steps_per_epoch=-1,
x=dataset_creator.DatasetCreator(dataset_fn),
validation_data=dataset_creator.DatasetCreator(dataset_fn),
)
def testModelFitWithNumpyData(self, strategy):
x = np.random.rand(100, 10)
y = np.random.rand(100, 1)
model = self._model_fit(
strategy,
x=x,
y=y,
batch_size=1,
validation_data=(x, y),
)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithTensorData(self, strategy):
x = tf.random.uniform((100, 10))
y = tf.random.uniform((100,))
model = self._model_fit(
strategy,
x=x,
y=y,
batch_size=1,
validation_data=(x, y),
)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithLookupLayer(self, strategy):
model = self._model_fit(strategy, use_lookup_layer=True)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithNormalizationLayer(self, strategy):
model = self._model_fit(strategy, with_normalization_layer=True)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithStepsPerExecution(self, strategy):
model = self._model_fit(strategy, steps_per_execution=10)
self.assertEqual(model.optimizer.iterations, 100)
def testModelFitWithNoStepsPerEpoch(self, strategy):
with self.assertRaisesRegex(
ValueError,
"When using a `tf.keras.utils.experimental.DatasetCreator`, "
"`steps_per_epoch`, `validation_steps` or `steps` argument must be "
"provided in `Model.fit`, `Model.evaluate`, or `Model.predict`."):
self._model_fit(strategy, steps_per_epoch=None)
def testModelEvaluate(self, strategy):
self._model_evaluate(strategy)
self.assertGreaterEqual(self._accuracy_metric.result(), 0.0)
def testModelEvaluateWithNumpyData(self, strategy):
x = np.random.rand(100, 10)
y = np.random.rand(100, 1)
self._model_evaluate(
strategy,
x=x,
y=y,
batch_size=1,
)
self.assertGreaterEqual(self._accuracy_metric.result(), 0.0)
def testModelEvaluateWithTensorData(self, strategy):
x = tf.random.uniform((100, 10))
y = tf.random.uniform((100,))
self._model_evaluate(
strategy,
x=x,
y=y,
batch_size=1,
)
self.assertGreaterEqual(self._accuracy_metric.result(), 0.0)
def testModelEvaluateWithNormalizationLayer(self, strategy):
self._model_evaluate(strategy, with_normalization_layer=True)
self.assertGreaterEqual(self._accuracy_metric.result(), 0.0)
def testModelEvaluateWithStepsPerExecution(self, strategy):
self._model_evaluate(strategy, steps_per_execution=10)
self.assertGreaterEqual(self._accuracy_metric.result(), 0.0)
def testModelEvaluateWithNoStepsPerEpoch(self, strategy):
with self.assertRaisesRegex(
ValueError,
"When using a `tf.keras.utils.experimental.DatasetCreator`, "
"`steps_per_epoch`, `validation_steps` or `steps` argument must be "
"provided in `Model.fit`, `Model.evaluate`, or `Model.predict`."):
self._model_evaluate(strategy, steps=None)
def testModelPredict(self, strategy):
_, predictions = self._model_predict(strategy, steps=3)
# Check the first (0th index), fourth (3rd index) and the last predictions
# because the first, fourth and the last input are the same in
# `model.predict` so there predictions should match.
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelPredictWithNumpyData(self, strategy):
x = np.array([[1.], [2.], [3.], [1.], [5.], [1.]])
_, predictions = self._model_predict(strategy, test_data=x)
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelPredictWithTensorData(self, strategy):
x = tf.constant([[1.], [2.], [3.], [1.], [5.], [1.]])
_, predictions = self._model_predict(strategy, test_data=x)
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelPredictWithNormalizationLayer(self, strategy):
_, predictions = self._model_predict(
strategy, with_normalization_layer=True, steps=3)
# Check the first (0th index), fourth (3rd index) and the last predictions
# because the first, fourth and the last input is the same in
# `model.predict` so there predictions should match.
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelPredictWithStepsPerExecution(self, strategy):
_, predictions = self._model_predict(
strategy, steps_per_execution=3, steps=3)
# Check the first (0th index), fourth (3rd index) and the last predictions
# because the first, fourth and the last input is the same in
# `model.predict` so there predictions should match.
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelFitAndPredict(self, strategy):
def fit_dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 1))
y = tf.random.uniform((10,))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2)
x = dataset_creator.DatasetCreator(fit_dataset_fn)
validation_data = dataset_creator.DatasetCreator(fit_dataset_fn)
model = self._model_fit(strategy, x=x, validation_data=validation_data)
_, predictions = self._model_predict(strategy, model, steps=3)
# Check the first (0th index), fourth (3rd index) and the last predictions
# because the first, fourth and the last input is the same in
# `model.predict` so there predictions should match.
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelPredictWithDatasetCreator(self, strategy):
if isinstance(strategy,
tf.distribute.MultiWorkerMirroredStrategy):
self.skipTest("b/189223991")
def _dataset_fn(input_context):
del input_context
x = tf.constant([[1.], [2.], [3.], [1.], [5.], [1.]])
return tf.data.Dataset.from_tensor_slices(x).repeat().batch(2)
_, predictions = self._model_predict(
strategy,
steps=3,
test_data=dataset_creator.DatasetCreator(_dataset_fn),
)
# Check the first (0th index), fourth (3rd index) and the last predictions
# because the first, fourth and the last input is the same in
# `model.predict` so there predictions should match.
self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))
self.assertFalse(
all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
def testModelTrainTFFunction(self, strategy):
model = self._model_fit(strategy)
self.assertIsInstance(model.train_tf_function, tf.__internal__.function.Function)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 10,116 | 37.911538 | 85 | py |
keras | keras-master/keras/distribute/strategy_combinations.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy combinations for combinations.combine()."""
import tensorflow.compat.v2 as tf
multidevice_strategies = [
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.tpu_strategy,
]
multiworker_strategies = [
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x2_gpu
]
strategies_minus_default_minus_tpu = [
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu
]
strategies_minus_tpu = [
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu
]
multi_worker_mirrored_strategies = [
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x2_gpu
]
tpu_strategies = [
tf.__internal__.distribute.combinations.tpu_strategy,
]
parameter_server_strategies_single_worker = [
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_cpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_1gpu,
]
parameter_server_strategies_multi_worker = [
tf.__internal__.distribute.combinations.parameter_server_strategy_3worker_2ps_cpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_3worker_2ps_1gpu,
]
all_strategies = strategies_minus_tpu + tpu_strategies
| 3,013 | 42.057143 | 87 | py |
keras | keras-master/keras/distribute/keras_correctness_test_base.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras using DistributionStrategy."""
import tensorflow.compat.v2 as tf
import functools
from absl.testing import parameterized
import numpy as np
import keras
from keras.distribute import distributed_training_utils
from keras.distribute.strategy_combinations import all_strategies
from keras.distribute.strategy_combinations import multi_worker_mirrored_strategies
from keras.distribute.strategy_combinations import strategies_minus_tpu
from keras.mixed_precision import policy
from keras.preprocessing import sequence
_RANDOM_SEED = 1337
_EVAL_STEPS = 20
_GLOBAL_BATCH_SIZE = 64
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
def eager_mode_test_configuration():
return tf.__internal__.test.combinations.combine(
mode='eager', use_numpy=[True, False], use_validation_data=[True, False])
def graph_mode_test_configuration():
return tf.__internal__.test.combinations.combine(
mode='graph', use_numpy=[True, False], use_validation_data=[True, False])
def all_strategy_and_input_config_combinations():
return (tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=all_strategies),
eager_mode_test_configuration() + graph_mode_test_configuration()))
def all_strategy_and_input_config_combinations_eager():
return (tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=all_strategies),
eager_mode_test_configuration()))
def strategy_minus_tpu_and_input_config_combinations_eager():
return (tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=strategies_minus_tpu),
eager_mode_test_configuration()))
def strategies_for_embedding_models():
"""Returns distribution strategies to test for embedding models.
Since embedding models take longer to train, we disregard DefaultStrategy
in order to prevent testing timeouts.
"""
return [
s for s in all_strategies if s.required_tpu or s.required_gpus or
s is tf.__internal__.distribute.combinations.one_device_strategy
]
def test_combinations_for_embedding_model():
# TODO(sourabhbajaj): Enable tests for eager mode
eager_mode_strategies = [
s for s in strategies_for_embedding_models() if not s.required_tpu
]
return (tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=strategies_for_embedding_models()),
(graph_mode_test_configuration())) + tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=eager_mode_strategies),
(eager_mode_test_configuration())))
def test_combinations_with_tpu_strategies_graph():
tpu_strategies = [
tf.__internal__.distribute.combinations.tpu_strategy,
]
return (tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=tpu_strategies),
graph_mode_test_configuration()))
def multi_worker_mirrored_eager():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=multi_worker_mirrored_strategies),
eager_mode_test_configuration())
def multi_worker_mirrored_eager_and_graph():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=multi_worker_mirrored_strategies),
eager_mode_test_configuration() + graph_mode_test_configuration())
class MaybeDistributionScope:
"""Provides a context allowing no distribution strategy."""
def __init__(self, distribution):
self._distribution = distribution
self._scope = None
def __enter__(self):
if self._distribution:
self._scope = self._distribution.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._distribution:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def batch_wrapper(dataset, batch_size, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
return dataset.batch(batch_size)
def get_batch_size(global_batch_size, distribution):
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
distribution and
not distributed_training_utils.global_batch_size_supported(distribution))
if use_per_core_batch_size:
batch_size //= distribution.num_replicas_in_sync
return batch_size
def get_data_size(data):
"""Gets the size of data in list, tuple, dict, or a numpy array."""
assert isinstance(data, (np.ndarray, list, dict, tuple))
if isinstance(data, np.ndarray):
return len(data)
if isinstance(data, (list, tuple)):
return len(data[0])
return len(data.values())
def get_shapes(data):
shapes = None
if all(hasattr(x, 'shape') for x in tf.nest.flatten(data)):
shapes = tf.nest.map_structure(lambda x: x.shape, data)
return shapes
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution, x_train, y_train, x_eval,
y_eval, x_predict, training_epochs):
"""Generates the inputs for correctness check when enable Keras with DS."""
global_batch_size = _GLOBAL_BATCH_SIZE
batch_size = get_batch_size(global_batch_size, with_distribution)
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_eval, y_eval)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_eval,
'y': y_eval,
}
predict_inputs = {'x': x_predict}
else:
training_data_size = get_data_size(x_train)
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs)
steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size))
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': steps_per_epoch
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_dataset = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size))
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': eval_steps,
}
predict_batch_size = get_batch_size(
get_data_size(x_predict), with_distribution)
predict_dataset = tf.data.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
def fit_eval_and_predict(initial_weights,
input_fn,
model_fn,
distribution=None,
is_stateful_model=False):
"""Generates results for fit/predict/evaluate for given model."""
training_inputs, eval_inputs, predict_inputs = input_fn()
model = model_fn(
initial_weights=initial_weights,
distribution=distribution,
input_shapes=get_shapes(training_inputs['x']))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
if predict_inputs is not None:
# Check correctness of the result of predict() invoked
# multiple times -- as for stateful models, result of
# predict may differ for each batch.
predict_length = 1
if is_stateful_model:
predict_length = 3
for i in range(predict_length):
result_key = 'predict_result_{}'.format(i)
result[result_key] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
def compare_results(results_with_ds,
results_without_ds,
distribution,
testcase,
partial_last_batch=None):
"""Compares results of model compiled with/without distribution strategy."""
if policy.global_policy().compute_dtype in ('float16', 'bfloat16'):
default_tolerance = 1e-2
relaxed_tolerance = 1e-2
elif partial_last_batch == 'train_and_eval':
# We relax the tolerance a lot in the partial last batch case as
# 1. the examples in uneven batches may have different weights when
# applying the gradients in the distributed case.
# 2. TF Keras and TF Keras DS have different ways to handle the case when
# training with epochs > 1 with numpy inputs. In TF Keras, every epoch
# may have a partial batch. While in TF Keras DS, as we convert
# numpy inputs into dataset, it will do a repeat() first and calculate
# steps_per_epoch, so it will at most have one partial batch. This
# makes the 1-CPU result even different.
default_tolerance = 1e-3
relaxed_tolerance = 1e-3
else:
default_tolerance = 4e-5
relaxed_tolerance = 1e-4
def _get_compare_result_tolerance(key):
"""Returns tolerance to compare results."""
# See b/119257215 for more details. DS test run on GPU could have larger
# variance then test on CPU.
if (tf.test.is_gpu_available() and
key.startswith(('weights_1', 'weights_2', 'predict_result'))):
return relaxed_tolerance
return default_tolerance
for key in sorted(results_with_ds.keys()):
if (key.startswith('training_history') and
isinstance(distribution,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = _get_compare_result_tolerance(key)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
if partial_last_batch is not None:
if key.startswith('eval_result'):
results_with_ds[key] = results_with_ds[key][1:]
results_without_ds[key] = results_without_ds[key][1:]
if key.startswith('training_history'):
results_with_ds[key]['val_loss'] = 0
results_without_ds[key]['val_loss'] = 0
testcase.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
def should_skip_tpu_with_eager(distribution):
return (tf.executing_eagerly() and
isinstance(distribution,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)))
class LearningRateBatchScheduler(keras.callbacks.Callback):
"""Scheduler that dynamically sets the learning rate of model."""
def __init__(self, update_freq=None):
self._update_freq = update_freq
def on_batch_begin(self, batch, logs=None):
if self._update_freq and batch % self._update_freq != 0:
return
# To avoid divergence, limit the value range.
lr = 0.001 * (batch % 10)
keras.backend.set_value(self.model.optimizer.lr, lr)
class TestDistributionStrategyCorrectnessBase(tf.test.TestCase,
parameterized.TestCase):
"""Model agnostic testing infra to test correctness of Keras models."""
def set_up_test_config(self,
use_numpy=False,
use_validation_data=False,
with_batch_norm=None):
self.use_numpy = use_numpy
self.use_validation_data = use_validation_data
self.with_batch_norm = with_batch_norm
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
def get_data(self):
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
return (x_train.astype('float32'), y_train.astype('float32'), None)
def get_data_with_partial_last_batch(self):
raise NotImplementedError
def get_data_with_partial_last_batch_eval(self):
raise NotImplementedError
def get_input_for_correctness_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method.
Args:
**kwargs: key word arguments about how to create the input dictionaries
Returns:
Three dictionaries representing the input for fit(), evaluate() and
predict()
"""
return get_correctness_test_inputs(**kwargs)
def get_model(self,
distribution=None,
input_shapes=None):
raise NotImplementedError
def run_correctness_test(self,
distribution,
use_numpy,
use_validation_data,
with_batch_norm=None,
is_stateful_model=False,
partial_last_batch=None,
training_epochs=2):
with self.cached_session():
self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)
if partial_last_batch == 'eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch_eval())
elif partial_last_batch == 'train_and_eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch())
else:
x_train, y_train, x_predict = self.get_data()
x_eval = x_train
y_eval = y_train
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = self.get_model(
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
ds_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=distribution,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
nods_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=None,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
distribution=distribution,
is_stateful_model=is_stateful_model)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
distribution=None,
is_stateful_model=is_stateful_model)
# First, special case, for multi-replica distributed training, batch
# norm is not aggregated globally. So it is expected to have different
# weights.
if (self.with_batch_norm == 'regular' and
distribution.num_replicas_in_sync > 1):
with self.assertRaises(AssertionError):
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
else:
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
def get_input_for_dynamic_lr_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method.
Args:
**kwargs: key word arguments about how to create the input dictionaries
Returns:
Three dictionaries representing the input for fit(), evaluate() and
predict()
"""
training_input = kwargs
return training_input, None, None
def run_dynamic_lr_test(self,
distribution):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
update_freq = None
if (isinstance(distribution, tf.compat.v1.distribute.experimental.TPUStrategy) and
distribution.extended.steps_per_run > 1):
# For TPUStrategy with steps_per_run > 1, the callback is not invoked
# every step. So, to compare the CPU/TPU, we let the CPU to behave the
# same as TPU.
update_freq = distribution.extended.steps_per_run
training_epochs = 2
global_batch_size = 64
ds_batch_size = get_batch_size(global_batch_size, distribution)
nods_batch_size = get_batch_size(global_batch_size, None)
ds_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=ds_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
nods_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=nods_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
distribution=None)
compare_results(
results_with_ds, results_without_ds, distribution, testcase=self)
class TestDistributionStrategyEmbeddingModelCorrectnessBase(
TestDistributionStrategyCorrectnessBase):
"""Base class to test correctness of Keras models with embedding layers."""
def get_data(self,
count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2):
distribution = []
for _ in range(num_classes):
dist = np.abs(np.random.randn(max_word_id))
dist /= np.sum(dist)
distribution.append(dist)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
num_words = np.random.randint(min_words, max_words, size=1)[0]
word_ids = np.random.choice(
max_word_id, size=num_words, replace=True, p=distribution[label])
word_ids = word_ids
labels.append(label)
features.append(word_ids)
features = sequence.pad_sequences(
features, maxlen=max_words)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))
x_predict = x_train[:_GLOBAL_BATCH_SIZE]
return x_train, y_train, x_predict
if __name__ == '__main__':
tf.test.main()
| 21,736 | 34.059677 | 114 | py |
keras | keras-master/keras/distribute/mirrored_variable_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test MirroredVariable in MirroredStrategy and MultiWorkerMirroredStrategy."""
import tensorflow.compat.v2 as tf
from keras.distribute import distributed_training_utils
from keras.layers import core
def _mimic_two_cpus():
try:
cpus = tf.config.list_physical_devices("CPU")
except tf.errors.NotFoundError:
# Testing device not available. Skip the test.
return False
tf.config.set_logical_device_configuration(cpus[0], [
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
])
return True
def get_strategy_with_mimicing_cpus():
if not _mimic_two_cpus():
return None
return (tf.distribute.MultiWorkerMirroredStrategy
._from_local_devices(("/device:CPU:0", "/device:CPU:1")))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=list(
filter(None.__ne__, [
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
get_strategy_with_mimicing_cpus()
])),
mode=["graph", "eager"]))
class MirroredVariableCreationTest(tf.test.TestCase):
"""Base class that tests mirrored variable creator.
Currently it assumes all strategy objects have two replicas.
"""
@classmethod
def setUpClass(cls):
_mimic_two_cpus()
def assertAllDifferent(self, objs):
for i in range(len(objs)):
for j in range(len(objs)):
if i == j:
continue
self.assertIsNot(objs[i], objs[j])
def _is_mirrored(self, val):
if distributed_training_utils.is_distributed_variable(val):
if val._policy: # pylint: disable=protected-access
return val._policy._is_mirrored() # pylint: disable=protected-access
# Since `Mirrored` is a private symbol in tf.distribute, we're checking
# with `DistributedValues` as an approximation.
return isinstance(val, tf.distribute.DistributedValues)
def testWithLayers(self, distribution):
def model_fn(features):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# We rely on names and orders to make sure replica references the same
# MirroredVariable. Uniquifying names may involve global states,
# merge_call switches threads so we need to test things work after
# merge_call.
tf.distribute.get_replica_context().merge_call(lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias), (layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
iterator = distribution.make_input_fn_iterator(
lambda _: tf.data.Dataset.from_tensors([[1.]]).repeat(10))
self.evaluate(iterator.initializer)
features = iterator.get_next()
with distribution.scope():
result = distribution.extended.call_for_each_replica(
model_fn, args=(features,))
for kernel, bias in result:
self.assertTrue(self._is_mirrored(kernel))
self.assertAllDifferent(distribution.experimental_local_results(kernel))
self.assertTrue(self._is_mirrored(bias))
self.assertAllDifferent(distribution.experimental_local_results(kernel))
if __name__ == "__main__":
tf.test.main()
| 3,966 | 34.738739 | 91 | py |
keras | keras-master/keras/distribute/model_collection_base.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A base class to provide a model and corresponding input data for testing."""
class ModelAndInput:
"""Base class to provide model and its corresponding inputs."""
def get_model(self):
"""Returns a compiled keras model object, together with output name.
Returns:
model: a keras model object
output_name: a string for the name of the output layer
"""
raise NotImplementedError("must be implemented in descendants")
def get_data(self):
"""Returns data for training and predicting.
Returns:
x_train: data used for training
y_train: label used for training
x_predict: data used for predicting
"""
raise NotImplementedError("must be implemented in descendants")
def get_batch_size(self):
"""Returns the batch_size used by the model."""
raise NotImplementedError("must be implemented in descendants")
| 1,570 | 35.534884 | 80 | py |
keras | keras-master/keras/distribute/optimizer_combinations.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy and optimizer combinations for combinations.combine()."""
import tensorflow.compat.v2 as tf
from keras.optimizer_v2 import adadelta as adadelta_keras_v2
from keras.optimizer_v2 import adagrad as adagrad_keras_v2
from keras.optimizer_v2 import adam as adam_keras_v2
from keras.optimizer_v2 import adamax as adamax_keras_v2
from keras.optimizer_v2 import ftrl as ftrl_keras_v2
from keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2
from keras.optimizer_v2 import nadam as nadam_keras_v2
from keras.optimizer_v2 import rmsprop as rmsprop_keras_v2
gradient_descent_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"GradientDescentV1",
lambda: tf.compat.v1.train.GradientDescentOptimizer(0.001))
adagrad_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"AdagradV1", lambda: tf.compat.v1.train.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"AdamV1", lambda: tf.compat.v1.train.AdamOptimizer(0.001, epsilon=1))
ftrl_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"FtrlV1", lambda: tf.compat.v1.train.FtrlOptimizer(0.001))
rmsprop_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"RmsPropV1", lambda: tf.compat.v1.train.RMSPropOptimizer(0.001))
# TODO(shiningsun): consider adding the other v1 optimizers
optimizers_v1 = [
gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn,
ftrl_optimizer_v1_fn, rmsprop_optimizer_v1_fn
]
adadelta_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdadeltaKerasV2", lambda: adadelta_keras_v2.Adadelta(0.001))
adagrad_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
adamax_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdamaxKerasV2", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0))
nadam_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"NadamKerasV2", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0))
ftrl_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"FtrlKerasV2", lambda: ftrl_keras_v2.Ftrl(0.001))
gradient_descent_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.001))
rmsprop_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
# TODO(shiningsun): consider adding the other v2 optimizers
optimizers_v2 = [
gradient_descent_optimizer_keras_v2_fn, adagrad_optimizer_keras_v2_fn
]
optimizers_v1_and_v2 = optimizers_v1 + optimizers_v2
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
],
optimizer_fn=optimizers_v2)
def distributions_and_v1_and_v2_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
],
optimizer_fn=optimizers_v1_and_v2)
| 5,340 | 48.453704 | 87 | py |
keras | keras-master/keras/distribute/__init__.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras' Distribution Strategy library."""
# pylint: disable=unused-import
from keras.distribute import sidecar_evaluator
| 813 | 41.842105 | 80 | py |
keras | keras-master/keras/distribute/custom_training_loop_models_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom training loops."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras.distribute import strategy_combinations
from keras.layers import core
from keras.optimizer_v2 import gradient_descent
class CustomModel(tf.Module):
def __init__(self, name=None):
super(CustomModel, self).__init__(name=name)
with self.name_scope:
self._layers = [
keras.layers.Dense(4, name="dense"),
]
@tf.Module.with_name_scope
def __call__(self, x):
for layer in self._layers:
x = layer(x)
return x
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=(strategy_combinations.all_strategies +
strategy_combinations.multiworker_strategies),
mode=["eager"]
)
)
class KerasModelsTest(tf.test.TestCase, parameterized.TestCase):
def test_single_keras_layer_run(self, distribution):
dataset = _get_dataset()
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = keras.layers.Dense(4, name="dense")
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
return grads
outputs = distribution.run(
step_fn, args=(next(iterator),))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
train_step(input_iterator)
def test_keras_model_optimizer_run(self, distribution):
dataset = _get_dataset()
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = _get_model()
optimizer = keras.optimizer_v2.rmsprop.RMSprop()
@tf.function
def train_step(replicated_inputs):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
return loss
outputs = distribution.run(step_fn, args=(replicated_inputs,))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
for x in input_iterator:
train_step(x)
def test_keras_subclass_model_optimizer_run(self, distribution):
def get_subclass_model():
class KerasSubclassModel(keras.Model):
def __init__(self):
super(KerasSubclassModel, self).__init__()
self.l = keras.layers.Dense(4, name="dense")
def call(self, x):
return self.l(x)
return KerasSubclassModel()
dataset = _get_dataset()
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = get_subclass_model()
optimizer = keras.optimizer_v2.rmsprop.RMSprop()
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
return loss
outputs = distribution.run(step_fn, args=(next(iterator),))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
train_step(input_iterator)
def test_keras_model_optimizer_run_loop(self, distribution):
dataset = _get_dataset()
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = _get_model()
optimizer = keras.optimizer_v2.rmsprop.RMSprop()
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
return loss
for _ in tf.range(4):
distribution.run(step_fn, args=(next(iterator),))
train_step(input_iterator)
def test_batch_norm_with_dynamic_batch(self, distribution):
inputs = np.zeros((10, 3, 3, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat()
dataset = dataset.batch(10)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
x = keras.layers.Input(shape=(3, 3, 3), name="input")
y = keras.layers.BatchNormalization(fused=True, name="bn")(x)
y = keras.layers.Flatten()(y)
y = keras.layers.Dense(4, name="dense")(y)
model = keras.Model(x, y)
optimizer = keras.optimizer_v2.rmsprop.RMSprop()
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images, training=True)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
return loss
distribution.run(step_fn, args=(next(iterator),))
train_step(input_iterator)
def test_lstm(self, distribution):
batch_size = 32
def create_lstm_model():
model = keras.models.Sequential()
# We only have LSTM variables so we can detect no gradient issues more
# easily.
model.add(
keras.layers.LSTM(1, return_sequences=False, input_shape=(10, 1)))
return model
def create_lstm_data():
seq_length = 10
x_train = np.random.rand(batch_size, seq_length, 1).astype("float32")
y_train = np.random.rand(batch_size, 1).astype("float32")
return x_train, y_train
x, y = create_lstm_data()
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(batch_size)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = create_lstm_model()
optimizer = keras.optimizer_v2.gradient_descent.SGD()
@tf.function
def train_step(input_iterator):
def step_fn(inputs):
inps, targ = inputs
with tf.GradientTape() as tape:
output = model(inps)
loss = tf.reduce_mean(
keras.losses.binary_crossentropy(
y_true=targ, y_pred=output, from_logits=False))
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
return loss
outputs = distribution.run(
step_fn, args=(next(input_iterator),))
return distribution.experimental_local_results(outputs)
train_step(input_iterator)
def test_nested_tf_functions(self, distribution):
# The test builds two computations with keras layers, one with nested
# tf.function, and the other without nested tf.function. We run these
# computations independently on the model with same weights, and make sure
# the variables are still the same after one training step.
inputs = np.random.random((10, 3)).astype(np.float32)
targets = np.ones((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()
dataset = dataset.batch(10)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
def get_model():
x = keras.layers.Input(shape=(3,), name="input")
y = keras.layers.Dense(4, name="dense")(x)
model = keras.Model(x, y)
return model
with distribution.scope():
model = get_model()
optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)
weights_file = os.path.join(self.get_temp_dir(), ".h5")
model.save_weights(weights_file)
model2 = get_model()
model2.load_weights(weights_file)
# Make sure model and model2 variables are in sync when initialized.
for model_v, model2_v in zip(model.variables, model2.variables):
self.assertAllClose(model_v.numpy(), model2_v.numpy())
def compute_loss(images, targets):
outputs = model(images)
return keras.losses.mean_squared_error(targets, outputs)
@tf.function
def train_step_without_nested_tf_function(inputs):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
loss = compute_loss(images, targets)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
distribution.run(step_fn, args=(inputs,))
@tf.function
def compute_loss2(images, targets):
outputs = model2(images)
return keras.losses.mean_squared_error(targets, outputs)
@tf.function
def train_step_with_nested_tf_function(inputs):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
loss = compute_loss2(images, targets)
grads = tape.gradient(loss, model2.variables)
optimizer.apply_gradients(zip(grads, model2.variables))
distribution.run(step_fn, args=(inputs,))
inputs = next(input_iterator)
train_step_without_nested_tf_function(inputs)
train_step_with_nested_tf_function(inputs)
# Make sure model and model2 variables are still in sync.
for model_v, model2_v in zip(model.variables, model2.variables):
self.assertAllClose(model_v.numpy(), model2_v.numpy())
def test_nested_tf_functions_with_control_flow(self, distribution):
inputs = np.random.random((10, 3)).astype(np.float32)
targets = np.ones((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()
dataset = dataset.batch(10)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
def get_model():
x = keras.layers.Input(shape=(3,), name="input")
y = keras.layers.Dense(4, name="dense")(x)
model = keras.Model(x, y)
return model
with distribution.scope():
model = get_model()
optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
distribution.run(step_fn, args=(next(iterator),))
@tf.function
def train_steps(iterator):
for _ in tf.range(10):
train_step(iterator)
train_steps(input_iterator)
def test_nested_tf_functions_with_tf_function_passing_to_strategy_run(
self, distribution):
self.skipTest("b/190608193")
inputs = np.random.random((10, 3)).astype(np.float32)
targets = np.ones((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()
dataset = dataset.batch(10)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
def get_model():
x = keras.layers.Input(shape=(3,), name="input")
y = keras.layers.Dense(4, name="dense")(x)
model = keras.Model(x, y)
return model
with distribution.scope():
model = get_model()
optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)
@tf.function
def compute_loss(images, targets):
outputs = model(images)
return keras.losses.mean_squared_error(targets, outputs)
@tf.function
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
loss = compute_loss(images, targets)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
inputs = next(input_iterator)
distribution.run(step_fn, args=(inputs,))
def test_customized_tf_module_run(self, distribution):
dataset = _get_dataset()
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = CustomModel()
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
return grads
outputs = distribution.run(
step_fn, args=(next(iterator),))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
train_step(input_iterator)
def test_reduce_loss(self, distribution):
inputs = np.zeros((10, 4), dtype=np.float32)
targets = np.zeros((10, 1), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
x = keras.layers.Input(shape=(4), name="input")
y = keras.layers.Dense(3, name="dense")(x)
model = keras.Model(x, y)
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
outputs = model(images)
loss = keras.losses.sparse_categorical_crossentropy(targets, outputs)
return loss
return distribution.run(step_fn, args=(next(iterator),))
loss = train_step(input_iterator)
loss = distribution.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=0)
def test_variable_run_argument(self, distribution):
# Test that variables passed to run() remain variables. Previous behavior
# in TPUStrategy was to cast to Tensor.
with distribution.scope():
optimizer = gradient_descent.SGD(0.1)
net = core.Dense(1, trainable=True)
dataset = tf.data.Dataset.from_tensors([[1.]])
dataset = dataset.repeat()
dataset = dataset.batch(2, drop_remainder=True)
def replica_step(trainable_variables, features):
with tf.GradientTape() as tape:
net_out = net(features[0], training=True)
loss = (net_out - 1.0) * (net_out - 1.0)
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss
@tf.function
def step(features):
per_replica_losses = distribution.run(
replica_step,
(net.trainable_variables, features),
)
loss = distribution.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return loss
step(next(iter(dataset)))
class KerasModelsXLATest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.tpu_strategies, mode=["eager"]))
def test_tf_function_jit_compile(self, distribution):
dataset = _get_dataset()
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
class CustomDense(keras.layers.Layer):
def __init__(self, num_outputs):
super(CustomDense, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable(
"kernel", shape=[int(input_shape[-1]), self.num_outputs])
@tf.function(jit_compile=True)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
with distribution.scope():
x = keras.layers.Input(shape=(3,))
y = CustomDense(4)(x)
model = keras.Model(x, y)
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images)
loss = keras.losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, model.variables)
return grads
outputs = distribution.run(
step_fn, args=(next(iterator),))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
train_step(input_iterator)
def _get_dataset():
inputs = np.zeros((31, 3), dtype=np.float32)
targets = np.zeros((31, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
return dataset
def _get_model():
x = keras.layers.Input(shape=(3,), name="input")
y = keras.layers.Dense(4, name="dense")(x)
model = keras.Model(x, y)
return model
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| 18,070 | 32.651769 | 80 | py |
keras | keras-master/keras/distribute/sharded_variable_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ClusterCoordinator and Keras models."""
import tensorflow.compat.v2 as tf
import keras
from keras.distribute import multi_worker_testing_utils
from keras.engine import base_layer
class ShardedVariableTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.strategy = tf.distribute.experimental.ParameterServerStrategy(
multi_worker_testing_utils.make_parameter_server_cluster(3, 2),
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner(2))
def assert_list_all_equal(self, list1, list2):
"""Used in lieu of `assertAllEqual`.
This is used to replace standard `assertAllEqual` for the cases where
`list1` and `list2` contain `AggregatingVariable`. Lists with
`AggregatingVariable` are not convertible to numpy array via `np.array`
calls as numpy would raise `ValueError: setting an array element with a
sequence.`
Args:
list1: The first list to compare equality.
list2: The second list to compare equality.
"""
for lhs, rhs in zip(list1, list2):
self.assertEqual(lhs, rhs)
def test_keras_layer_setattr(self):
class Layer(base_layer.Layer):
def __init__(self):
super().__init__()
self.w = tf.Variable([0, 1])
self.b = tf.Variable([2, 3], trainable=False)
with self.strategy.scope():
layer = Layer()
self.assertLen(layer.trainable_weights, 2)
self.assertEqual(layer.trainable_weights[0], [0])
self.assertEqual(layer.trainable_weights[1], [1])
self.assertLen(layer.non_trainable_weights, 2)
self.assertEqual(layer.non_trainable_weights[0], [2])
self.assertEqual(layer.non_trainable_weights[1], [3])
self.assert_list_all_equal(
layer.weights, layer.trainable_weights + layer.non_trainable_weights)
self.assert_list_all_equal(layer.trainable_weights,
layer.trainable_variables)
self.assert_list_all_equal(layer.weights, layer.variables)
checkpoint_deps = set(dep.ref for dep in layer._checkpoint_dependencies)
self.assertEqual(checkpoint_deps, set([layer.w, layer.b]))
def test_keras_layer_add_weight(self):
class Layer(base_layer.Layer):
def __init__(self):
super().__init__()
self.w = self.add_weight(
shape=(2,),
initializer=lambda shape, dtype: tf.constant([0., 1.],),
trainable=True)
self.b = self.add_weight(
shape=(2,),
initializer=lambda shape, dtype: tf.constant([2., 3.]),
trainable=False)
with self.strategy.scope():
layer = Layer()
self.assertLen(layer.trainable_weights, 2)
self.assertEqual(layer.trainable_weights[0], [0.])
self.assertEqual(layer.trainable_weights[1], [1.])
self.assertLen(layer.non_trainable_weights, 2)
self.assertEqual(layer.non_trainable_weights[0], [2.])
self.assertEqual(layer.non_trainable_weights[1], [3.])
self.assert_list_all_equal(
layer.weights, layer.trainable_weights + layer.non_trainable_weights)
self.assert_list_all_equal(layer.trainable_weights,
layer.trainable_variables)
self.assert_list_all_equal(layer.weights, layer.variables)
checkpoint_deps = set(dep.ref for dep in layer._checkpoint_dependencies)
self.assertEqual(checkpoint_deps, set([layer.w, layer.b]))
def test_keras_metrics(self):
with self.strategy.scope():
fp = keras.metrics.FalsePositives(thresholds=[0.2, 0.5, 0.7, 0.8])
auc = keras.metrics.AUC(num_thresholds=10)
@tf.function
def update():
fp.update_state([0., 1., 0., 0.], [0., 0., 0.3, 0.9])
auc.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
@tf.function
def reset():
fp.reset_state()
auc.reset_state()
update()
self.assertEqual(auc.result(), 0.75)
self.assertAllEqual(fp.result(), [2., 1., 1., 1.])
reset()
self.assertEqual(auc.result(), 0.0)
self.assertAllEqual(fp.result(), [0., 0., 0., 0.])
self.assertTrue(hasattr(auc.true_positives, 'variables'))
self.assertTrue(hasattr(fp.accumulator, 'variables'))
def test_saved_model(self):
def create_model():
inputs = keras.layers.Input(shape=(4,))
outputs = keras.layers.Dense(2)(inputs)
model = keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss='mean_squared_error')
return model
with self.strategy.scope():
model = create_model()
inputs = tf.random.normal(shape=(8, 4))
expect = model(inputs)
saved_dir = self.get_temp_dir()
model.save(saved_dir)
loaded_model = keras.models.load_model(saved_dir)
got = loaded_model(inputs)
self.assertAllClose(got, expect)
self.assertGreater(len(model.variables), len(loaded_model.variables))
with self.assertRaises(ValueError):
with self.strategy.scope():
keras.models.load_model(saved_dir)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 5,742 | 34.018293 | 95 | py |
keras | keras-master/keras/distribute/keras_embedding_model_correctness_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness test for tf.keras Embedding models using DistributionStrategy."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras.distribute import keras_correctness_test_base
from keras.optimizer_v2 import gradient_descent as gradient_descent_keras
class DistributionStrategyEmbeddingModelCorrectnessTest(
keras_correctness_test_base
.TestDistributionStrategyEmbeddingModelCorrectnessBase):
def get_model(self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name='words')
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
if self.use_distributed_dense:
word_embed = keras.layers.TimeDistributed(keras.layers.Dense(4))(
word_embed)
avg = keras.layers.GlobalAveragePooling1D()(word_embed)
preds = keras.layers.Dense(2, activation='softmax')(avg)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_embedding_model_correctness(self, distribution, use_numpy,
use_validation_data):
self.use_distributed_dense = False
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_embedding_time_distributed_model_correctness(
self, distribution, use_numpy, use_validation_data):
self.use_distributed_dense = True
self.run_correctness_test(distribution, use_numpy, use_validation_data)
class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
keras_correctness_test_base
.TestDistributionStrategyEmbeddingModelCorrectnessBase):
def get_model(self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids_a = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name='words_a')
word_ids_b = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name='words_b')
def submodel(embedding, word_ids):
word_embed = embedding(word_ids)
rep = keras.layers.GlobalAveragePooling1D()(word_embed)
return keras.Model(inputs=[word_ids], outputs=[rep])
word_embed = keras.layers.Embedding(
input_dim=20,
output_dim=10,
input_length=max_words,
embeddings_initializer=keras.initializers.RandomUniform(0, 1))
a_rep = submodel(word_embed, word_ids_a).outputs[0]
b_rep = submodel(word_embed, word_ids_b).outputs[0]
sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep])
model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim])
if initial_weights:
model.set_weights(initial_weights)
# TODO(b/130808953): Switch back to the V1 optimizer after global_step
# is made mirrored.
model.compile(
optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
loss='mse',
metrics=['mse'])
return model
def get_data(self,
count=(keras_correctness_test_base._GLOBAL_BATCH_SIZE *
keras_correctness_test_base._EVAL_STEPS),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2):
features_a, labels_a, _ = (
super(DistributionStrategySiameseEmbeddingModelCorrectnessTest,
self).get_data(count, min_words, max_words, max_word_id,
num_classes))
features_b, labels_b, _ = (
super(DistributionStrategySiameseEmbeddingModelCorrectnessTest,
self).get_data(count, min_words, max_words, max_word_id,
num_classes))
y_train = np.zeros((count, 1), dtype=np.float32)
y_train[labels_a == labels_b] = 1.0
y_train[labels_a != labels_b] = -1.0
# TODO(b/123360757): Add tests for using list as inputs for multi-input
# models.
x_train = {
'words_a': features_a,
'words_b': features_b,
}
x_predict = x_train
return x_train, y_train, x_predict
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_siamese_embedding_model_correctness(self, distribution, use_numpy,
use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| 6,235 | 38.468354 | 80 | py |
keras | keras-master/keras/distribute/multi_worker_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test multi-worker Keras."""
import tensorflow.compat.v2 as tf
import collections
import copy
import functools
import json
import os
import sys
import threading
from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import
import keras
from keras import backend
from keras import callbacks
from keras import metrics as metrics_module
from keras import models
from keras import optimizer_v1
from keras.distribute import multi_worker_testing_utils
from keras.optimizer_v2 import rmsprop
from keras.utils import kpl_test_utils
# pylint: disable=g-direct-tensorflow-import
def _clone_and_build_model(model, strategy):
# The new "original" model in worker 0.
with strategy.scope():
cloned_model = models.clone_model(model)
# Compile and build model.
if isinstance(model.optimizer, optimizer_v1.TFOptimizer):
optimizer = model.optimizer
# TODO(yuefengz): figure out why the optimizer here is still a
# TFOptimizer.
while isinstance(optimizer, optimizer_v1.TFOptimizer):
optimizer = optimizer.optimizer
optimizer = copy.deepcopy(optimizer)
else:
optimizer_config = model.optimizer.get_config()
optimizer = type(model.optimizer).from_config(optimizer_config)
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics))
return cloned_model
# TODO(b/123918215): Possibly merge this Callback with keras_test.Counter.
class MultiWorkerVerificationCallback(callbacks.Callback):
"""MultiWorkerVerificationCallback verifies the callbacks in multi-worker scheme.
This Callback is intended to be used for verifying the callback is indeed
called the correct number of times in various task types.
Attributes:
_task_dict: A nested dictionary storing the number of times a callback has
been called in specific task type, task index, and method name.
Look up structure is
task_name -> task_id -> tracking_method_name -> invoke_count
For example, a _task_dict of
{
'ps': {
0: {
'on_epoch_begin': 2
},
1: {
'on_epoch_begin': 2
}
},
'worker': {
0: {
'on_epoch_begin': 2
},
1: {
'on_epoch_begin': 2
}
}
}
indicates the ps task has 'on_epoch_begin' called twice on each
of the two indices, and likewise for worker task.
"""
# TODO(rchao): Add other method calls to verify.
METHODS_TO_VERIFY = ['on_epoch_begin']
def __init__(self, num_epoch, num_worker):
"""Initialize a MultiWorkerVerificationCallback.
Args:
num_epoch: Number of epochs this Callback is expected to be called for.
num_worker: Number of workers this Callback is expected to be called from.
"""
super(MultiWorkerVerificationCallback, self).__init__()
self._num_epoch = num_epoch
self._num_worker = num_worker
self._task_dict = {
key: collections.defaultdict(lambda: collections.defaultdict(int))
for key in ['ps', 'worker', 'chief']
}
self._lock = threading.Lock()
self._is_between_graph = None
self.wrap_methods(self.METHODS_TO_VERIFY)
@property
def is_between_graph(self):
return self._is_between_graph
@is_between_graph.setter
def is_between_graph(self, is_between_graph):
self._is_between_graph = is_between_graph
def wrap_methods(self, method_names):
"""Wrap methods so that the counts of calls are tracked.
Args:
method_names: A list of names of methods to track calls.
"""
for method_name in method_names:
method = getattr(self, method_name)
def wrapped_method(method_to_wrap, name, *arg, **kwargs):
# Use lock to ensure += operation is thread-safe.
with self._lock:
task_config = json.loads(os.environ['TF_CONFIG'])['task']
self._task_dict[task_config['type']][task_config['index']][name] += 1
method_to_wrap(*arg, **kwargs)
setattr(self, method_name,
functools.partial(wrapped_method, method, method_name))
def verify(self, test_case):
method_count_dict = {
method_name: self._num_epoch for method_name in self.METHODS_TO_VERIFY
}
assert self._is_between_graph is not None
if self._is_between_graph:
# TODO(b/124171024): In between-graph replication, by default only the
# chief calls callback. Fix this test to cover that, as well as the rare
# cases where all workers call.
worker_call_count = {
i: method_count_dict for i in range(0, self._num_worker)
}
else:
# If in-graph, only the first worker calls callback methods.
worker_call_count = {0: method_count_dict}
chief_call_count = {0: method_count_dict}
task_config = json.loads(os.environ['TF_CONFIG'])['task']['type']
test_case.assertDictEqual(
self._task_dict,
{
# PS' callback is not supposed to be called.
'ps': {},
# Worker or chief should only be called on worker/chief.
'worker': worker_call_count if task_config == 'worker' else {},
'chief': chief_call_count if task_config == 'chief' else {}
})
class KerasMultiWorkerTestIndependentWorker(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
]))
def testSimpleModelIndependentWorkerSync(self, strategy):
verification_callback = MultiWorkerVerificationCallback(
num_epoch=2,
num_worker=len(
json.loads(os.environ['TF_CONFIG'])['cluster']['worker']))
verification_callback.is_between_graph = \
strategy.extended.experimental_between_graph
batch_size = 64
steps = 2
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
orig_loss, _ = model.evaluate(train_ds, steps=steps)
history = model.fit(
x=train_ds,
epochs=2,
steps_per_epoch=steps,
callbacks=[verification_callback])
self.assertIsInstance(history, keras.callbacks.History)
trained_loss, _ = model.evaluate(train_ds, steps=steps)
self.assertLess(trained_loss, orig_loss)
verification_callback.verify(self)
class KPLMultiWorkerTest(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
use_adapt=[False], # TODO(b/180742437): Add tests for using adapt.
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
# TODO(b/183956672): Re-enable
# strategy_combinations.multi_worker_mirrored_2x2_gpu,
]))
def testTrainAndServeWithKPL(self, use_adapt, strategy):
test_utils_obj = kpl_test_utils.DistributeKplTestUtils()
with strategy.scope():
feature_mapper, label_mapper = test_utils_obj.define_kpls_for_training(
use_adapt)
model = test_utils_obj.define_model()
optimizer = rmsprop.RMSprop(learning_rate=0.1)
accuracy = keras.metrics.Accuracy()
def dataset_fn(_):
return test_utils_obj.dataset_fn(feature_mapper, label_mapper)
@tf.function
def train_step(iterator):
"""The step function for one training step."""
def step_fn(inputs):
"""The computation to run on each worker."""
features, labels = inputs
with tf.GradientTape() as tape:
pred = model(features, training=True)
loss = keras.losses.binary_crossentropy(labels, pred)
loss = tf.nn.compute_average_loss(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
accuracy.update_state(labels, actual_pred)
strategy.run(step_fn, args=(next(iterator),))
distributed_dataset = strategy.distribute_datasets_from_function(
dataset_fn)
distributed_iterator = iter(distributed_dataset)
num_epochs = 4
num_steps = 7
for _ in range(num_epochs):
accuracy.reset_state()
for _ in range(num_steps):
train_step(distributed_iterator)
self.assertGreater(accuracy.result().numpy(), 0.5)
self.assertEqual(optimizer.iterations.numpy(), num_epochs * num_steps)
# Test save/load/serving the trained model.
test_utils_obj.test_save_load_serving_model(
model, feature_mapper, test_utils_obj.define_reverse_lookup_layer())
if __name__ == '__main__':
# Enable manual variable initialization to make sure variables are initialized
# by `init_restore_or_wait_for_variables`.
backend.manual_variable_initialization(True)
with tf.compat.v1.test.mock.patch.object(sys, 'exit', os._exit):
tf.__internal__.distribute.multi_process_runner.test_main()
| 10,643 | 36.347368 | 84 | py |
keras | keras-master/keras/distribute/collective_all_reduce_strategy_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CollectiveAllReduceStrategy."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras import layers
from keras.engine import training
from keras.optimizer_v2 import gradient_descent as gradient_descent_keras
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
],
mode=['eager']))
class MultiWorkerMirroredStrategyTest(tf.test.TestCase, parameterized.TestCase):
def testFitWithoutStepsPerEpochPartialBatch(self, strategy):
def _model_fn():
x = layers.Input(shape=(1,), name='input')
y = layers.Dense(1, name='dense')(x)
model = training.Model(x, y)
return model
def _get_dataset():
inputs = tf.expand_dims(
tf.constant(range(10)), axis=1)
targets = tf.expand_dims(
tf.constant(range(10)), axis=1)
# Make global batch size 12 for 2 replicas and a non-repeated dataset
# with 10 elements so that we have partial batch
dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)).batch(
12, drop_remainder=False)
return dataset
with strategy.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = _model_fn()
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
dataset = _get_dataset()
kernel_before = model.get_weights()[0][0]
model.fit(dataset, epochs=10)
kernel_after = model.get_weights()[0][0]
self.assertNotEqual(kernel_before, kernel_after)
self.assertGreater(abs(kernel_before - 1), abs(kernel_after - 1))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 2,669 | 36.083333 | 82 | py |
keras | keras-master/keras/distribute/distributed_training_utils_v1.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
import tensorflow.compat.v2 as tf
# pylint:disable=protected-access
import functools
import numpy as np
from keras import backend
from keras import callbacks
from keras import metrics as metrics_module
from keras import optimizers
from keras.distribute import distribute_coordinator_utils as dc
from keras.distribute import distributed_training_utils as dist_utils
from keras.engine import training_utils_v1
from keras.optimizer_v2 import optimizer_v2
from keras.utils import tf_contextlib
from keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
"""
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if tf.compat.v1.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not tf.compat.v1.executing_eagerly_outside_functions():
backend.get_session(assign_ops).run(assign_ops)
def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs,
grouped_updates=None, grouped_session_args=None,
with_loss_tensor=False):
"""Unwrap the list of values contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_inputs: PerReplica inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
grouped_updates: PerReplica updates returned from the train or test function
that we ran on each device.
grouped_session_args: PerReplica session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica parameters.
"""
# Unwrap per device values returned from each model's train function.
# This will be used to construct the main train function.
all_inputs = flatten_per_replica_values(distribution_strategy,
grouped_inputs)
all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor)
if grouped_updates:
all_updates = flatten_per_replica_values(distribution_strategy,
grouped_updates)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get('feed_dict')
if grouped_feed_dict:
all_session_args['feed_dict'] = flatten_per_replica_values(
distribution_strategy, grouped_feed_dict)
grouped_fetches = grouped_session_args.get('fetches')
if grouped_fetches:
all_session_args['fetches'] = flatten_per_replica_values(
distribution_strategy, grouped_fetches)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def unwrap_output_dict(strategy, grouped_outputs, mode):
"""Unwrap the list of outputs contained in the PerReplica parameters."""
if mode == ModeKeys.PREDICT:
return flatten_per_replica_values(strategy, grouped_outputs)
# In the case of fit/eval, the grouped_outputs is a dict, whereas in predict,
# the output is as same structure as model output. They need to be treated
# differently
total_loss = strategy.reduce(tf.distribute.ReduceOp.SUM,
grouped_outputs['total_loss'][0], axis=None)
output_losses = flatten_per_replica_values(strategy,
grouped_outputs['output_losses'])
metrics = flatten_per_replica_values(strategy,
grouped_outputs['metrics'])
batch_size = strategy.reduce(tf.distribute.ReduceOp.SUM,
grouped_outputs['batch_size'], axis=None)
if (backend.is_tpu_strategy(strategy) and
tf.compat.v1.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
output_losses = output_losses[::strategy.num_replicas_in_sync]
metrics = metrics[::strategy.num_replicas_in_sync]
return {'total_loss': [total_loss],
'output_losses': output_losses,
'metrics': metrics,
'batch_size': batch_size}
def unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor=False):
"""Unwrap the list of outputs contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of outputs on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica outputs.
"""
if not with_loss_tensor:
return flatten_per_replica_values(distribution_strategy,
grouped_outputs)
if not isinstance(grouped_outputs, list):
grouped_outputs = [grouped_outputs]
# reduce loss tensor before adding it to the list of fetches
loss = distribution_strategy.reduce(tf.distribute.ReduceOp.SUM,
grouped_outputs[0], axis=None)
all_outputs = flatten_per_replica_values(distribution_strategy,
grouped_outputs[1:])
if (backend.is_tpu_strategy(distribution_strategy) and
tf.compat.v1.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync]
return [loss] + all_outputs
def flatten_per_replica_values(distribution_strategy, per_replica_values):
"""Unwraps and flattens a nest of PerReplica parameters.
PerReplica values have one value associated with each device. Each entry in
the PerReplica dict has a device `key` and the corresponding value on the
device as the `value`. In this function we take a PerReplica value or a list
of PerReplica values and return all the values in the PerReplica dict.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
per_replica_values: List of PerReplica object or a single PerReplica object.
Returns:
List of values of all the PerReplica objects.
"""
# pylint: disable=g-complex-comprehension
# This function takes a PerReplica object or a list of PerReplica objects and
# returns all the values associated with it.
return [e for flattened in tf.nest.flatten(per_replica_values)
for e in distribution_strategy.unwrap(flattened)]
def validate_callbacks(input_callbacks, optimizer):
"""Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the
callbacks passed.
ValueError: If `write_grads` is one of the parameters passed as part of the
TensorBoard callback.
"""
if input_callbacks:
for callback in input_callbacks:
if isinstance(callback, (callbacks.LearningRateScheduler,
callbacks.ReduceLROnPlateau)):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('You must specify a Keras Optimizer V2 when using '
'%s callback with DistributionStrategy.' % callback)
# If users want to use the TensorBoard callback they cannot use certain
# features of the callback that involve accessing model attributes and
# running ops.
if isinstance(callback, callbacks.TensorBoard):
if getattr(callback, 'write_grads', False):
logging.warning(
UserWarning(
'`write_grads` in the TensorBoard callback is not supported '
'when using DistributionStrategy. Setting `write_grads` '
'to `False`.'))
callback.write_grads = False
def validate_distributed_dataset_inputs(distribution_strategy, x, y,
sample_weights=None):
"""Validate all the components of a DistributedValue Dataset input.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`/`evaluate`.
x: Input Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. x can also be a tuple or dict. The keys of the
dict should match the names of the input layers of the model.
y: Target Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. y can also be a tuple or dict. The keys of the
dict should match the names of the output layers of the model.
sample_weights: Sample weights Dataset DistributedValue object. For example,
when we use `MirroredStrategy` this is a PerReplica object with a tensor
for each device set in the dict.
Returns:
The unwrapped values list of the x and y DistributedValues inputs.
Raises:
ValueError: If x and y do not have support for being evaluated as tensors.
or if x and y contain elements that are not tensors or if x and y
contain elements that have a shape or dtype mismatch.
"""
# If the input and target used to call the model are not dataset tensors,
# we need to raise an error. When using a DistributionStrategy, the input
# and targets to a model should be from a `tf.data.Dataset`.
# If each element of x and y are not tensors, we cannot standardize and
# validate the input and targets.
x_values_list = validate_per_replica_inputs(distribution_strategy, x)
if y is not None:
y_values_list = validate_per_replica_inputs(distribution_strategy, y)
else:
y_values_list = None
if sample_weights is not None:
sample_weights_list = validate_per_replica_inputs(distribution_strategy,
sample_weights)
else:
sample_weights_list = None
# Return the unwrapped values to avoid calling `unwrap` a second time.
return x_values_list, y_values_list, sample_weights_list
def validate_per_replica_inputs(distribution_strategy, x):
"""Validates PerReplica dataset input list.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`, `evaluate` and `predict`.
x: A list of PerReplica objects that represent the input or
target values.
Returns:
List containing the first element of each of the PerReplica objects in
the input list.
Raises:
ValueError: If any of the objects in the `per_replica_list` is not a tensor.
"""
# Convert the inputs and targets into a list of PerReplica objects.
per_replica_list = tf.nest.flatten(x, expand_composites=True)
x_values_list = []
for x in per_replica_list:
# At this point x should contain only tensors.
x_values = distribution_strategy.unwrap(x)
for value in x_values:
if not tf.is_tensor(value):
raise ValueError('Dataset input to the model should be tensors instead '
'they are of type {}'.format(type(value)))
if not tf.executing_eagerly():
# Validate that the shape and dtype of all the elements in x are the same.
validate_all_tensor_shapes(x, x_values)
validate_all_tensor_types(x, x_values)
x_values_list.append(x_values[0])
return x_values_list
def validate_all_tensor_types(x, x_values):
x_dtype = x_values[0].dtype
for i in range(1, len(x_values)):
if x_dtype != x_values[i].dtype:
raise ValueError('Input tensor dtypes do not match for distributed tensor'
' inputs {}'.format(x))
def validate_all_tensor_shapes(x, x_values):
# Validate that the shape of all the elements in x have the same shape
x_shape = x_values[0].shape.as_list()
for i in range(1, len(x_values)):
if x_shape != x_values[i].shape.as_list():
raise ValueError('Input tensor shapes do not match for distributed tensor'
' inputs {}'.format(x))
def _wait_for_variable_initialization(session):
"""Utility to wait for variables to be initialized."""
all_variables = backend._get_variables(backend.get_graph()) # pylint: disable=protected-access
candidate_vars = []
for v in all_variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if not candidate_vars:
return
while True:
is_initialized = session.run(
[tf.compat.v1.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True # pylint: disable=protected-access
if not uninitialized_vars:
break
def init_restore_or_wait_for_variables():
"""Initialize or restore variables or wait for variables to be initialized."""
backend._initialize_variables(backend._get_session()) # pylint: disable=protected-access
def validate_inputs(x, y):
"""Validate inputs when using DistributionStrategy.
Args:
x: Model Inputs.
y: Model Targets.
Raises:
ValueError: if input is not a Dataset or a numpy array(when we use
MirroredStrategy).
"""
if (isinstance(x, tf.compat.v1.data.Iterator) or
isinstance(y, tf.compat.v1.data.Iterator)):
raise ValueError('`DistributionStrategy` does not support inputs of type '
'Iterator. You must pass a `tf.data.Dataset` object or a '
'numpy array as input.')
def is_dataset_shape_fully_defined(dataset):
"""Returns whether a dataset contains a final partial batch."""
shapes = tf.nest.flatten(tf.compat.v1.data.get_output_shapes(dataset))
unknown_shapes = [s for s in shapes if not s.is_fully_defined()]
return not unknown_shapes
def process_batch_and_step_size(strategy,
inputs,
batch_size,
steps_per_epoch,
mode,
validation_split=0.):
"""Process the batch size and step size based on input and dist strategy."""
first_x_value = tf.nest.flatten(inputs)[0]
if isinstance(first_x_value, np.ndarray):
num_samples = first_x_value.shape[0]
if validation_split and 0. < validation_split < 1.:
num_samples = int(num_samples * (1 - validation_split))
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the constraint to consume all the training samples.
steps_per_epoch, batch_size = get_input_params(
strategy, num_samples, steps_per_epoch, batch_size, mode=mode)
return batch_size, steps_per_epoch
def get_input_params(distribution_strategy,
num_samples,
steps,
batch_size,
mode=None):
"""Calculate the number of batches and steps/steps_per_epoch.
Args:
distribution_strategy: The DistributionStrategy used to compile the model.
num_samples: The number of samples from which we determine the batch size
and steps.
steps: The specified number of steps.
batch_size: The specified batch_size.
mode: ModeKey representing whether input will be used for training,
evaluation, or prediction. This is used to relax the constraints on
consuming all the training samples to keep compatibility till we support
partial batches. If none, then partial batches are not allowed.
Returns:
steps: The steps or steps_per_epoch argument depending on if a user is
calling `fit`, `evaluate` or `predict`. If the is_training flag is set
we don't require the number of samples to be used completely.
batch_size: The batch size to be used in model iterations.
Raises:
ValueError: If the number of batches or steps evaluates to 0.
"""
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not dist_utils.global_batch_size_supported(
distribution_strategy)
# TODO(b/128995245): In eager mode, uneven batch sizes are allowed except for
# `fit()` on TPUStrategy.
# In graph mode, the zero batch case in batch norm is not handled due to
# XLA-GPU regression. Uneven batch sizes are not allowed except
# for `test()` and `predict()` on TPUStrategy.
if tf.executing_eagerly():
allow_partial_batch = (
mode != ModeKeys.TRAIN or
not backend.is_tpu_strategy(distribution_strategy))
else:
allow_partial_batch = (
mode == ModeKeys.TRAIN or
((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST) and
backend.is_tpu_strategy(distribution_strategy)))
if steps is None:
if batch_size is None:
# If neither the batch size or number of steps are set. We choose the
# global batch size as the minimum of number of samples and 32. 32 is
# chosen to provide backward compatibility.
global_batch_size = min(num_samples, 32)
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
if allow_partial_batch:
steps = np.ceil(num_samples / global_batch_size).astype(int)
else:
if num_samples % global_batch_size:
raise ValueError('The number of samples %s is not divisible by '
'batch size %s.' % (num_samples, global_batch_size))
steps = num_samples // global_batch_size
else:
if batch_size is None:
# We calculate the batch size based on the number of steps specified
if num_samples % steps:
raise ValueError('The number of samples %s is not divisible by '
'steps %s. Please change the number of steps to a '
'value that can consume all the samples' % (
num_samples, steps))
global_batch_size = num_samples // steps
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
min_num_samples = global_batch_size * steps
if allow_partial_batch:
min_num_samples = global_batch_size * (steps-1) + 1 if steps > 1 else 0
if num_samples < min_num_samples:
raise ValueError('Number of samples %s is less than samples required '
'for specified batch_size %s and steps %s' % (
num_samples, global_batch_size, steps))
# We need to return the per replica or global batch size based on the strategy
if use_per_replica_batch:
if global_batch_size % distribution_strategy.num_replicas_in_sync:
raise ValueError(
'The batch size (%s) could not be sharded evenly across the sync '
'replicas (%s) in the distribution strategy.' % (
global_batch_size, distribution_strategy.num_replicas_in_sync))
batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync
else:
batch_size = global_batch_size
return steps, batch_size
def get_batch_dimension(iterator):
shapes = tf.nest.flatten(tf.compat.v1.data.get_output_shapes(iterator))
# Take the batch size from the first element, as it should be the same for
# all.
dims = shapes[0].dims
return dims[0] if dims else None
def get_iterator(dataset, distribution_strategy):
with distribution_strategy.scope():
iterator = distribution_strategy.make_dataset_iterator(dataset)
initialize_iterator(iterator, distribution_strategy)
return iterator
def initialize_iterator(iterator, distribution_strategy):
with distribution_strategy.scope():
init_op = tf.group(iterator.initializer)
if not tf.executing_eagerly():
backend.get_session((init_op,)).run(init_op)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that. Empty
# elements are going to get filtered out as part of the flattening.
if len(tf.nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(tf.nest.flatten(next_element)) == (len(model.inputs) +
len(model.outputs)):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights)
return x, y, sample_weights
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Args:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
if backend.is_tpu_strategy(strategy):
if sample_weights is not None:
raise ValueError('TPUStrategy does not support sample weights.')
# When the inputs are dict, then we want to flatten it in the same order as
# the input layers, such that the data are fed into the input layers in the
# correct order.
if isinstance(inputs, dict):
inputs = [inputs[key] for key in model._feed_input_names]
if is_distributing_by_cloning(model):
inputs = flatten_per_replica_values(strategy, inputs)
targets = flatten_per_replica_values(strategy, targets)
# Expand 1-dimensional inputs.
# TODO(b/124535720): Remove once this standarize data logic is shared with
# main flow.
inputs, targets = tf.nest.map_structure(
training_utils_v1.standardize_single_array, (inputs, targets))
else:
inputs = training_utils_v1.ModelInputs(inputs).as_list()
if mode == ModeKeys.PREDICT:
sample_weights = []
targets = []
elif sample_weights is not None and is_distributing_by_cloning(model):
if tf.executing_eagerly() and not model._compile_distribution:
raise NotImplementedError('`sample_weight` is not supported when using '
'tf.distribute.Strategy in eager mode and '
'cloning=True.')
sample_weights = flatten_per_replica_values(strategy, sample_weights)
ins = [inputs, targets, sample_weights]
return tuple(ins)
def is_distributing_by_cloning(model):
"""Decide whether this model is going to be distributed via cloning.
We are going to distribute the model by cloning in graph mode.
Args:
model: Keras model to distribute.
Returns:
True if the `model` is going to be distributed using cloning and False
otherwise.
"""
if (backend.is_tpu_strategy(model._distribution_strategy) and
tf.executing_eagerly): # b/137580852
return False
elif tf.compat.v1.executing_eagerly_outside_functions():
return bool(model._compile_distribution)
return True
def _custom_compile_for_predict(model):
"""Custom compile for TPU predict mode."""
if not model.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
model._is_compiled = True
model.total_loss = None
model.train_function = None
model.test_function = None
model.predict_function = None
def _build_network_on_replica(model, mode, inputs=None, targets=None):
"""Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model guarantee
that we're using Strategy variables and any updates on either model are
reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new model
as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from keras import models # pylint: disable=g-import-not-at-top
from keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
else:
updated_model = models._clone_functional_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
# Callable losses added directly to a functional Model need to be added
# here.
updated_model._callable_losses = model._callable_losses
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == tf.bfloat16:
return tf.cast(output, tf.float32)
else:
return output
updated_model.outputs = [_upcast_low_precision_outputs(o)
for o in updated_model.outputs]
if isinstance(targets, tuple):
targets = tf.nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(
model.optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return updated_model
def _build_distributed_network(model, strategy, mode, inputs=None,
targets=None):
"""Create a cloned model on each replica."""
with backend.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_build_network_on_replica,
args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
def _clone_and_build_model(model, mode, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == tf.bfloat16:
return tf.cast(output, tf.float32)
else:
return output
cloned_model.outputs = [_upcast_low_precision_outputs(o)
for o in cloned_model.outputs]
if isinstance(targets, tuple):
targets = tf.nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(cloned_model)
else:
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return cloned_model
def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):
"""Create a cloned model on each replica."""
with backend.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_clone_and_build_model, args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
if mode == ModeKeys.TRAIN:
model._make_callback_model(distributed_model)
def _make_execution_function(model, mode):
"""Makes or reuses function to run one step of distributed model execution."""
if is_distributing_by_cloning(model):
return _make_execution_function_with_cloning(model, mode)
distributed_function = get_distributed_function(model, mode)
if distributed_function:
return distributed_function
distribution_function = _make_execution_function_without_cloning(model, mode)
set_distributed_function(model, mode, distribution_function)
return distribution_function
def _make_execution_function_without_cloning(model, mode):
"""Creates a function to run one step of distributed model execution."""
strategy = model._distribution_strategy
with strategy.scope():
per_replica_function = _make_replica_execution_function(model, mode)
def distributed_function(input_fn):
"""A single step of the distributed execution across replicas."""
x, y, sample_weights = input_fn()
# Call `Model.{train,test,predict}_on_batch` on every replica passing
# PerReplicas as arguments. On every replica inside this call, each
# PerReplica object will return the value for that replica. The outputs
# are PerReplicas too.
outputs = strategy.run(per_replica_function, args=(x, y, sample_weights))
# Out of PerReplica outputs reduce or pick values to return.
all_outputs = unwrap_outputs(
strategy, outputs, with_loss_tensor=(mode != ModeKeys.PREDICT))
return all_outputs
if not model.run_eagerly:
distributed_function = tf.function(distributed_function)
def execution_function(input_fn):
# `numpy` translates Tensors to values in Eager mode.
return [out.numpy() for out in distributed_function(input_fn)]
else:
execution_function = distributed_function
return execution_function
def _make_replica_execution_function(model, mode):
"""A single step of the distributed execution on a replica."""
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
def predict_on_batch(x, y=None, sample_weights=None):
del y, sample_weights
return model.predict_on_batch(x)
func = predict_on_batch
if mode != ModeKeys.PREDICT:
# `reset_metrics` is set to False to maintain stateful metrics across
# batch-level calls.
func = functools.partial(func, reset_metrics=False)
return func
def _make_replicated_models_with_cloning(model, mode):
"""Build models on each replica."""
strategy = model._distribution_strategy
# If distributed_model is not built, create one for `mode`.
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _make_execution_function_with_cloning(model, mode):
"""Clones or re-uses models to run one step of distributed model execution."""
distributed_model = get_distributed_model(model, mode)
# TODO(b/134069401): Create a cache for the distributed model and exec
# function that incorporates additional attributes to be part of the cache key
# than just the mode.
# If distributed model for a particular `mode` is already built, use the
# `_distribution_function` on that distributed model.
# If you have updated the sample_weight_mode on the model, then you will need
# to recompile metrics and recreate the execution function. This is indicated
# by the `_recompile_exec_function` property.
if (distributed_model and hasattr(distributed_model, '_distribution_function')
and not (hasattr(distributed_model, '_recompile_exec_function') and
distributed_model._recompile_exec_function)):
return distributed_model._distributed_function
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
assert distributed_model
# Also create an execution function on that distributed model.
if tf.executing_eagerly():
distributed_function = _make_eager_execution_function(model, mode)
else:
distributed_function = _make_graph_execution_function(model, mode)
# We cache the distributed execution function on the model since creating
# distributed models and execution functions are expensive.
distributed_model._distributed_function = distributed_function
distributed_model._recompile_exec_function = False
return distributed_function
def _make_graph_execution_function(model, mode):
"""Makes function to run one step of distributed model in graph mode."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
strategy = model._distribution_strategy
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates, all_session_args) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != ModeKeys.PREDICT))
return backend.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using
# the global one.
strategy = model._distribution_strategy
global_graph = backend.get_graph()
with global_graph.as_default(), strategy.scope():
# First we gather the relevant portions of the model across all replicas.
# `backend._scratch_graph(global_graph)` signals to Keras that it should not
# lift to a separate graph when creating the per-replica functions.
with backend._scratch_graph(global_graph):
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
grouped = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
grouped_inputs, grouped_outputs = grouped
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of
# inputs/outputs on all the devices over which the model is distributed.
(all_inputs, all_outputs, _, _) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != ModeKeys.PREDICT))
# Finally, a joint Keras function is created; this one will be created in
# a separate FuncGraph.
return backend.function(
all_inputs,
all_outputs,
name='eager_distributed_{}_function'.format(mode))
def _copy_weights_to_distributed_model(original_model, mode):
"""Copies weights from original model to distributed models."""
strategy = original_model._distribution_strategy
distributed_model = get_distributed_model(original_model, mode)
if strategy:
# Copy the weights from the original model to each of the replicated
# models.
orig_model_weights = original_model.get_weights()
first_model = strategy.unwrap(distributed_model)[0]
set_weights(strategy, first_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == ModeKeys.TRAIN:
distributed_model = get_distributed_model(model, mode)
updated_weights = model._distribution_strategy.unwrap(
distributed_model)[0].get_weights()
model.set_weights(updated_weights)
def _per_replica_aggregate_batch(strategy, batch_outs, model, mode):
"""Aggregates the per-replica batch-level outputs from a distributed step."""
if strategy is not None and mode == ModeKeys.PREDICT:
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = strategy.num_replicas_in_sync
nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
total_batch_outs.append(
concat_along_batch_dimension(tf.nest.flatten(nested_outs)))
return total_batch_outs
return batch_outs
def _reset_metrics(model):
if model._distribution_strategy:
for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]:
distributed_model = get_distributed_model(model, mode)
if distributed_model:
first_model = model._distribution_strategy.unwrap(distributed_model)[0]
first_model.reset_metrics()
def get_distributed_model(model, mode):
key = _generate_cache_key(mode)
return model._distributed_model_cache.get(key, None)
def set_distributed_model(model, mode, distributed_model):
key = _generate_cache_key(mode)
model._distributed_model_cache[key] = distributed_model
def get_distributed_function(model, mode):
key = _generate_cache_key(mode)
return model._distributed_function_cache.get(key, None)
def set_distributed_function(model, mode, distributed_function):
key = _generate_cache_key(mode)
model._distributed_function_cache[key] = distributed_function
def _generate_cache_key(mode):
key = hash(mode)
return key
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), backend.learning_phase_scope(learning_phase):
yield
def is_current_worker_chief():
return dc.get_current_worker_context().is_chief
def filter_distributed_callbacks(callbacks_list, model):
"""Filter Callbacks based on the worker context when running multi-worker.
Args:
callbacks_list: A list of `Callback` instances.
model: Keras model instance.
Returns:
The list of `Callback` instances that should be run on this worker.
"""
if not model._in_multi_worker_mode():
raise ValueError(
'filter_distributed_callbacks() should only be called when Keras '
'is in multi worker mode.')
callbacks_list = callbacks_list or []
if not [
c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)
]:
# TODO(rchao): Consider providing a ModelCheckpoint here if the user
# fails to (possibly with tempfile directory).
logging.warning('ModelCheckpoint callback is not provided. '
'Workers will need to restart training if any fails.')
if callbacks_list is None or is_current_worker_chief():
return callbacks_list
# Some Callbacks should only run on the chief worker.
return [
callback for callback in callbacks_list if not callback._chief_worker_only
] # pylint: disable=protected-access
def _update_sample_weight_modes(model, mode, sample_weights):
"""Update sample_weight_mode of the distributed model."""
if is_distributing_by_cloning(model):
distributed_model = get_distributed_model(model, mode)
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
distributed_model._recompile_exec_function = any(
[e.sample_weights_mismatch() for e in model._training_endpoints])
if sample_weights:
distributed_models = flatten_per_replica_values(
model._distribution_strategy, distributed_model)
# sample_weights is a tuple of 1 list where the number of elements in the
# list is equal to the number of replicas in sync.
sample_weights = sample_weights[0]
if sample_weights and None not in sample_weights:
for m, sw in zip(distributed_models, sample_weights):
m._update_sample_weight_modes(sample_weights=[sw])
def concat_along_batch_dimension(outputs):
"""Concats prediction outputs along the batch dimension."""
if isinstance(outputs[0], tf.SparseTensor):
return tf.sparse.concat(axis=0, sp_inputs=outputs)
if isinstance(outputs[0], tf.RaggedTensor):
return tf.concat(outputs, axis=0)
return np.concatenate(outputs)
| 46,111 | 39.663139 | 97 | py |
keras | keras-master/keras/distribute/worker_training_state.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training state management."""
import tensorflow.compat.v2 as tf
import os
from keras import backend
from keras.distribute import distributed_file_utils
from keras.utils import mode_keys
# Constant for `tf.keras.Model` attribute to store the epoch at which the most
# recently saved checkpoint was saved.
CKPT_SAVED_EPOCH = '_ckpt_saved_epoch'
CKPT_SAVED_EPOCH_UNUSED_VALUE = -1
class WorkerTrainingState:
"""Training state management class.
This class provides apis for backing up and restoring the training state.
This allows model and epoch information to be saved periodically and restore
for fault-tolerance, also known as preemption-recovery purpose.
"""
def __init__(self, model, checkpoint_dir):
self._model = model
# The epoch at which the checkpoint is saved. Used for fault-tolerance.
# GPU device only has int64 dtype registered VarHandleOp.
self._ckpt_saved_epoch = tf.Variable(
initial_value=tf.constant(
CKPT_SAVED_EPOCH_UNUSED_VALUE, dtype=tf.int64),
name='ckpt_saved_epoch')
# Variable initialization.
backend.set_value(self._ckpt_saved_epoch, CKPT_SAVED_EPOCH_UNUSED_VALUE)
# _ckpt_saved_epoch gets tracked and is included in the checkpoint file
# when backing up.
checkpoint = tf.train.Checkpoint(
model=self._model, ckpt_saved_epoch=self._ckpt_saved_epoch,
train_counter=self._model._train_counter)
# If this is single-worker training, checkpoint_dir are the same for
# write_checkpoint_manager and read_checkpoint_manager.
#
# If this is multi-worker training, and this worker should not
# save checkpoint, we replace the write_checkpoint_manager's checkpoint_dir
# with a temp filepath, so it writes to a file that will be removed at the
# end of back_up() call. This is necessary because the SyncOnReadVariable
# needs to be synced across all the workers in order to be read, and all
# workers need to perform `save()`.
# But all workers should restore from the same checkpoint_dir as passed in
# read_checkpoint_manager.
self.read_checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=os.path.join(checkpoint_dir, 'chief'),
max_to_keep=1)
write_checkpoint_dir = distributed_file_utils.write_dirpath(
checkpoint_dir, self._model.distribute_strategy)
if self._model.distribute_strategy.extended.should_checkpoint:
self.write_checkpoint_manager = self.read_checkpoint_manager
else:
self.write_checkpoint_manager = tf.train.CheckpointManager(
checkpoint, directory=write_checkpoint_dir, max_to_keep=1)
def back_up(self, epoch):
"""Back up the current state of training into a checkpoint file.
Args:
epoch: The current epoch information to be saved.
"""
backend.set_value(self._ckpt_saved_epoch, epoch)
# Save the model plus CKPT_SAVED_EPOCH variable.
if self.write_checkpoint_manager.save():
distributed_file_utils.remove_temp_dirpath(
self.write_checkpoint_manager.directory,
self._model.distribute_strategy)
def restore(self):
"""Restore the training state from the backed up checkpoint file.
Returns:
True if the training state is successfully restored. False if the training
state doesn't need to be restored, or error occurred so it can't.
"""
self.read_checkpoint_manager.restore_or_initialize()
def delete_backup(self):
"""Delete the backup directories.
Delete the backup directories which should not exist after `fit()`
successfully finishes.
"""
if self.write_checkpoint_manager is self.read_checkpoint_manager:
try:
tf.io.gfile.rmtree(self.write_checkpoint_manager.directory)
except tf.errors.NotFoundError:
pass
def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
When `_ckpt_saved_epoch` attribute exists and is not
`CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training setting
and indicates the worker is recovering from previous failure. In this case,
infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous
unfinished training from certain epoch.
Args:
initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
epoch = backend.eval(self._ckpt_saved_epoch)
if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0:
# The most recently saved epoch is one epoch prior to the epoch it
# failed at, so return the value of 'self._ckpt_saved_epoch' plus one.
return epoch + 1
return initial_epoch
| 5,686 | 39.621429 | 80 | py |
keras | keras-master/keras/distribute/worker_training_state_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of `worker_training_state.py` utilities."""
import tensorflow.compat.v2 as tf
import os
import sys
from absl.testing import parameterized
from keras import callbacks
from keras.distribute import multi_worker_testing_utils
class ModelCheckpointTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
file_format=['h5', 'tf'],
save_weights_only=[True, False]))
def testCheckpointExists(self, file_format, save_weights_only):
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(64, 2)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
saving_dir = self.get_temp_dir()
saving_filepath = os.path.join(saving_dir, 'checkpoint.' + file_format)
callbacks_list = [
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=save_weights_only)
]
self.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(x=train_ds, epochs=2, steps_per_epoch=2, callbacks=callbacks_list)
tf_saved_model_exists = tf.io.gfile.exists(saving_filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(saving_filepath +
'.index')
self.assertTrue(tf_saved_model_exists or tf_weights_only_checkpoint_exists)
if __name__ == '__main__':
with tf.compat.v1.test.mock.patch.object(sys, 'exit', os._exit):
tf.test.main()
| 2,213 | 40 | 80 | py |
keras | keras-master/keras/distribute/keras_stateful_lstm_model_correctness_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful tf.keras LSTM models using DistributionStrategy."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras.distribute import keras_correctness_test_base
from keras.optimizer_v2 import gradient_descent as gradient_descent_keras
def strategies_for_stateful_embedding_model():
"""Returns TPUStrategy with single core device assignment."""
return [
tf.__internal__.distribute.combinations.tpu_strategy_one_core,
]
def test_combinations_for_stateful_embedding_model():
return (tf.__internal__.test.combinations.combine(
distribution=strategies_for_stateful_embedding_model(),
mode='graph',
use_numpy=False,
use_validation_data=False))
class DistributionStrategyStatefulLstmModelCorrectnessTest(
keras_correctness_test_base
.TestDistributionStrategyEmbeddingModelCorrectnessBase):
def get_model(self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,),
batch_size=batch_size,
dtype=np.int32,
name='words')
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
lstm_embed = keras.layers.LSTM(
units=4, return_sequences=False, stateful=True)(
word_embed)
preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
optimizer_fn = gradient_descent_keras.SGD
model.compile(
optimizer=optimizer_fn(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
# TODO(jhseu): Disabled to fix b/130808953. Need to investigate why it
# doesn't work and enable for DistributionStrategy more generally.
@tf.__internal__.distribute.combinations.generate(test_combinations_for_stateful_embedding_model())
def disabled_test_stateful_lstm_model_correctness(
self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_correctness_test_base
.test_combinations_with_tpu_strategies_graph()))
def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(
self, distribution, use_numpy, use_validation_data):
with self.assertRaisesRegex(
ValueError, 'not yet supported with tf.distribute.Strategy'):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True)
if __name__ == '__main__':
tf.test.main()
| 3,792 | 34.448598 | 101 | py |
keras | keras-master/keras/distribute/keras_utils_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models with callbacks, checkpointing with dist strategy."""
import tensorflow.compat.v2 as tf
import collections
import tempfile
from absl.testing import parameterized
import numpy as np
import keras
from keras import losses
from keras.distribute import distribute_strategy_test as keras_test_lib
from keras.distribute import distributed_training_utils_v1
from keras.distribute import optimizer_combinations
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class TestDistributionStrategyWithCallbacks(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations()))
def test_callbacks_in_fit(self, distribution):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'])
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
epochs = 2
steps_per_epoch = 5
validation_steps = 3
model.fit(
dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=0,
validation_data=dataset,
validation_steps=validation_steps,
callbacks=[counter])
if (isinstance(distribution, tf.compat.v1.distribute.experimental.TPUStrategy) and
not tf.executing_eagerly()):
# TPU Strategy can have multi step training, from extended.steps_per_run
# if steps_per_run = 1, then num_batch_call_per_epoch = steps_per_epoch
steps_per_run = distribution.extended.steps_per_run
num_batch_call_per_epoch = steps_per_epoch // steps_per_run
if steps_per_epoch % steps_per_run:
num_batch_call_per_epoch += 1
else:
num_batch_call_per_epoch = steps_per_epoch
self.assertDictEqual(
counter.method_counts, {
'on_batch_begin': epochs * num_batch_call_per_epoch,
'on_batch_end': epochs * num_batch_call_per_epoch,
'on_epoch_begin': epochs,
'on_epoch_end': epochs,
'on_test_batch_begin': epochs * validation_steps,
'on_test_batch_end': epochs * validation_steps,
'on_test_begin': epochs,
'on_test_end': epochs,
'on_train_batch_begin': epochs * num_batch_call_per_epoch,
'on_train_batch_end': epochs * num_batch_call_per_epoch,
'on_train_begin': 1,
'on_train_end': 1
})
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations()))
def test_callbacks_in_eval(self, distribution):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'])
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
model.evaluate(dataset, steps=5, callbacks=[counter])
self.assertDictEqual(
counter.method_counts, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations()))
def test_callbacks_in_predict(self, distribution):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'])
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
model.predict(
keras_test_lib.get_predict_dataset(dataset),
steps=5,
callbacks=[counter])
self.assertDictEqual(
counter.method_counts, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
class TestDistributionStrategyErrorCases(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph']))
def test_validating_dataset_input_tensors_with_shape_mismatch(
self, distribution):
with self.cached_session():
a = tf.constant([1, 2], shape=(1, 2))
b = tf.constant([[1, 2], [1, 2]], shape=(2, 2))
x = tf.distribute.DistributedValues((a, b))
y = tf.distribute.DistributedValues((a, a))
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegex(
ValueError, 'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
with distribution.scope():
distributed_training_utils_v1.validate_distributed_dataset_inputs(
distribution, x, y)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_dtype_mismatch(
self, distribution):
with self.cached_session():
a = tf.constant([1, 2], shape=(1, 2), dtype=tf.int32)
b = tf.constant([1, 2], shape=(1, 2), dtype=tf.float64)
x = tf.distribute.DistributedValues((a, b))
y = tf.distribute.DistributedValues((a, a))
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegex(
ValueError, 'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
with distribution.scope():
distributed_training_utils_v1.validate_distributed_dataset_inputs(
distribution, x, y)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution, mode):
with self.cached_session():
with distribution.scope():
model = keras_test_lib.get_model()
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics)
dataset = keras_test_lib.get_dataset(distribution)
# Test with validation split
with self.assertRaises(ValueError):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_split=0.5,
validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaises(ValueError):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument for dataset with infinite
# cardinality.
dataset = dataset.repeat()
with self.assertRaises(ValueError):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaises(ValueError):
model.evaluate(dataset, verbose=0)
with self.assertRaises(ValueError):
model.predict(dataset, verbose=0)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=['graph', 'eager']))
def test_distribution_strategy_on_subclassed_model(
self, distribution):
with distribution.scope():
class _SimpleMLP(keras.Model):
def __init__(self, num_labels):
super(_SimpleMLP, self).__init__()
self.dense = keras.layers.Dense(num_labels)
def call(self, inputs):
return self.dense(inputs)
model = _SimpleMLP(3)
if not tf.executing_eagerly():
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
model.compile(
'sgd')
else:
model.compile(
'sgd')
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=['graph', 'eager']))
def test_distribution_strategy_on_deferred_sequential_model(
self, distribution):
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(3, activation='softmax'))
if tf.executing_eagerly():
model.compile(
'sgd')
else:
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without '
'`input_shape`/`input_dim` set in its first layer or '
'a subclassed model.'):
model.compile(
'sgd')
@tf.__internal__.distribute.combinations.generate(
keras_test_lib.all_strategy_combinations_minus_default())
def test_standalone_loss_without_loss_reduction(self, distribution):
with distribution.scope():
loss_object = losses.MeanSquaredError()
with self.assertRaisesRegex(
ValueError, 'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE`'):
y = np.asarray([1, 0])
loss_object(y, y)
class TestDistributionStrategyWithLossMasking(tf.test.TestCase,
parameterized.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager'],
optimizer=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn
))
def test_masking(self, distribution, optimizer):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(
loss='mse',
optimizer=optimizer())
y = np.array([[[1], [1]], [[1], [1]]])
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations(),
tf.__internal__.test.combinations.combine(
fused=[True, False],
optimizer=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn)))
def test_batchnorm_correctness(self, distribution, fused, optimizer):
with self.cached_session():
with distribution.scope():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
input_shape=(
10,
20,
30,
), momentum=0.8, fused=fused)
model.add(norm)
model.compile(
loss='mse',
optimizer=optimizer())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30))
x = x.astype('float32')
dataset = tf.data.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = keras_test_lib.batch_wrapper(dataset, 32, distribution)
predict_dataset = tf.data.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = keras_test_lib.batch_wrapper(predict_dataset, 32,
distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategySaveLoadWeights(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
tf.__internal__.test.combinations.combine(
optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn)))
def test_save_load_h5(self, distribution, optimizer):
with self.cached_session():
dataset = keras_test_lib.get_dataset(distribution)
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer(),
'mse')
model.fit(dataset, epochs=1, steps_per_epoch=1)
weights_file = tempfile.mktemp('.h5')
model.save_weights(weights_file)
model_2 = keras_test_lib.get_model()
model_2.compile(
optimizer(),
'mse')
model_2.load_weights(weights_file)
model_2.predict(
keras_test_lib.get_predict_dataset(distribution), steps=2)
model_2.fit(dataset, epochs=1, steps_per_epoch=1)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
tf.__internal__.test.combinations.combine(
optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn)))
def test_save_load_trackable(self, distribution, optimizer):
# TODO(b/123533246): Enable the test for TPU once bug is fixed
if (isinstance(distribution,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)) and
distribution.extended.steps_per_run > 1):
self.skipTest('MultiStep TPU Strategy deadlocks with optimizer restore.')
with self.cached_session():
dataset = keras_test_lib.get_dataset(distribution)
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer(),
'mse')
model.fit(dataset, epochs=1, steps_per_epoch=1)
weights_file = tempfile.mktemp()
model.save_weights(weights_file)
model_2 = keras_test_lib.get_model()
model_2.compile(
optimizer(),
'mse')
model_2.load_weights(weights_file)
model_2.predict(
keras_test_lib.get_predict_dataset(distribution), steps=2)
model_2.fit(dataset, epochs=1, steps_per_epoch=1)
class TestDistributionStrategyValidation(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations_minus_default()))
def test_layer_outside_scope(self, distribution):
with self.cached_session():
with self.assertRaisesRegex(
ValueError, 'was not created in the distribution strategy'):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
with distribution.scope():
model = keras.Model(x, y)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
@tf.__internal__.distribute.combinations.generate(
keras_test_lib.all_strategy_combinations_minus_default())
def test_model_outside_scope(self, distribution):
with self.cached_session():
with self.assertRaisesRegex(
ValueError, 'was not created in the distribution strategy'):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
with distribution.scope():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
class TestDistributionStrategyWithStaticShapes(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_input_batch_size_not_divisible_by_num_replicas(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, r'The `batch_size` argument \(5\) must be divisible by '
r'the number of replicas \(2\)'):
keras.layers.Input(shape=(3,), batch_size=5, name='input')
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_static_input_batch_size(self, distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10, drop_remainder=True)
with distribution.scope():
x = keras.layers.Input(shape=(3,), batch_size=10, name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
model.compile(optimizer='sgd', loss='mse', metrics=['mae'])
model.fit(dataset, epochs=1, steps_per_epoch=5)
model.evaluate(dataset, steps=5)
model.predict(dataset)
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| 21,616 | 37.12522 | 114 | py |
keras | keras-master/keras/distribute/distributed_training_utils_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributed training utility functions."""
import tensorflow.compat.v2 as tf
from keras import callbacks
from keras.distribute import distributed_training_utils_v1
from keras.optimizer_v2 import adam
class DistributedTrainingUtilsTest(tf.test.TestCase):
def test_validate_callbacks_predefined_callbacks(self):
supported_predefined_callbacks = [
callbacks.TensorBoard(),
callbacks.CSVLogger(filename='./log.csv'),
callbacks.EarlyStopping(),
callbacks.ModelCheckpoint(filepath='./checkpoint'),
callbacks.TerminateOnNaN(),
callbacks.ProgbarLogger(),
callbacks.History(),
callbacks.RemoteMonitor()
]
distributed_training_utils_v1.validate_callbacks(
supported_predefined_callbacks, adam.Adam())
unsupported_predefined_callbacks = [
callbacks.ReduceLROnPlateau(),
callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001)
]
for callback in unsupported_predefined_callbacks:
with self.assertRaisesRegex(ValueError,
'You must specify a Keras Optimizer V2'):
distributed_training_utils_v1.validate_callbacks(
[callback], tf.compat.v1.train.AdamOptimizer())
if __name__ == '__main__':
tf.test.main()
| 1,983 | 35.072727 | 80 | py |
keras | keras-master/keras/distribute/dataset_creator_model_fit_ps_only_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
from keras import callbacks as callbacks_lib
from keras.distribute import dataset_creator_model_fit_test_base as test_base
from keras.distribute import strategy_combinations
import tensorflow.compat.v2 as tf
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.parameter_server_strategies_multi_worker,
use_dataset_creator=[True, False],
mode="eager"))
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
def testModelPredict(self, strategy, use_dataset_creator):
if use_dataset_creator:
self.skipTest("Unused option.")
model, _ = self._model_compile(strategy)
test_data = tf.data.Dataset.from_tensor_slices(
[[1.], [2.], [3.], [1.], [5.], [1.]]).repeat().batch(2)
model.predict(x=test_data, steps=3)
def testClusterCoordinatorSingleInstance(self, strategy, use_dataset_creator):
model = self._model_fit(strategy, use_dataset_creator=use_dataset_creator)
strategy = model.distribute_strategy
self.assertIs(
strategy._cluster_coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy))
def testModelFitErrorOnBatchLevelCallbacks(self, strategy,
use_dataset_creator):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(ValueError,
"Batch-level `Callback`s are not supported"):
callbacks = [BatchLevelCallback()]
self._model_fit(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator)
def testModelFitCallbackSupportsTFLogs(self, strategy, use_dataset_creator):
class MyCallback(callbacks_lib.Callback):
def __init__(self):
super(MyCallback, self).__init__()
# Fetches the RemoteValues if necessary.
self._supports_tf_logs = True
def on_train_batch_end(self, batch, logs=None):
assert isinstance(logs, tf.distribute.experimental.coordinator.RemoteValue)
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(
strategy, callbacks=callbacks, use_dataset_creator=use_dataset_creator)
def testModelFitVerbosity(self, strategy, use_dataset_creator):
class MyCallback(callbacks_lib.Callback):
pass
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(
strategy, callbacks=callbacks, use_dataset_creator=use_dataset_creator)
# PSStrategy should default to epoch-level logging.
self.assertEqual(my_callback.params["verbose"], 2)
def testModelFitTensorBoardEpochLevel(self, strategy, use_dataset_creator):
log_dir = self.get_temp_dir()
callbacks = [callbacks_lib.TensorBoard(log_dir)]
self._model_fit(
strategy, callbacks=callbacks, use_dataset_creator=use_dataset_creator)
self.assertTrue(tf.compat.v1.gfile.Exists(log_dir))
files = tf.compat.v1.gfile.ListDirectory(log_dir)
self.assertGreaterEqual(len(files), 1)
def testModelEvaluateErrorOnBatchLevelCallbacks(self, strategy,
use_dataset_creator):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(ValueError,
"Batch-level `Callback`s are not supported"):
callbacks = [BatchLevelCallback()]
self._model_evaluate(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 4,968 | 37.820313 | 83 | py |
keras | keras-master/keras/distribute/dataset_creator_model_fit_test_base.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import callbacks as callbacks_lib
from keras.engine import sequential
from keras.layers import core as core_layers
from keras.layers.preprocessing import string_lookup
from keras.optimizer_v2 import gradient_descent
from keras.utils import dataset_creator
from tensorflow.python.platform import tf_logging as logging
class DatasetCreatorModelFitTestBase(tf.test.TestCase, parameterized.TestCase):
"""The base class for DatasetCreator with Model.fit tests."""
def _get_dataset_fn(self, use_lookup_layer):
if use_lookup_layer:
filepath = os.path.join(self.get_temp_dir(), "vocab")
with open(filepath, "w") as f:
f.write("\n".join(["earth", "wind", "and", "fire"]))
def dataset_fn(input_context):
del input_context
lookup_layer = string_lookup.StringLookup(
num_oov_indices=1, vocabulary=filepath)
x = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
y = np.array([0, 1])
map_fn = lambda x, y: (lookup_layer(x), y)
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2).map(map_fn)
else:
def dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2)
return dataset_fn
def _model_compile(self,
strategy,
steps_per_execution=1,
run_eagerly=False,
with_normalization_layer=False,
use_lookup_layer=False):
class ResultAssertingCallback(callbacks_lib.Callback):
"""A callback that asserts the result of the tests."""
def __init__(self):
self._prev_epoch = -1
def on_epoch_end(self, epoch, logs=None):
logging.info("testModelFit: epoch=%r, logs=%r", epoch, logs)
if epoch <= self._prev_epoch:
raise RuntimeError("Epoch is supposed to be larger than previous.")
self._prev_epoch = epoch
is_loss_float = (
logs.get("loss", None) is not None and
isinstance(logs["loss"], (float, np.floating)))
if not is_loss_float:
raise RuntimeError("loss is supposed to be in the logs and float.")
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
if with_normalization_layer:
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.add(core_layers.Dense(1, activation="sigmoid"))
self._accuracy_metric = keras.metrics.Accuracy()
model.compile(
gradient_descent.SGD(),
loss="binary_crossentropy",
metrics=[self._accuracy_metric],
steps_per_execution=steps_per_execution,
run_eagerly=run_eagerly)
return model, [ResultAssertingCallback()]
def _model_fit(self,
strategy,
steps_per_execution=1,
validation_data=None,
x=None,
y=None,
shuffle=True,
batch_size=None,
steps_per_epoch=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None,
use_lookup_layer=False,
use_dataset_creator=True):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
use_lookup_layer)
callbacks += default_callbacks
if x is None:
if use_dataset_creator:
x = dataset_creator.DatasetCreator(
self._get_dataset_fn(use_lookup_layer))
else:
x = self._get_dataset_fn(use_lookup_layer)(None)
if validation_data is None:
if use_dataset_creator:
validation_data = dataset_creator.DatasetCreator(
self._get_dataset_fn(use_lookup_layer))
else:
validation_data = self._get_dataset_fn(use_lookup_layer)(None)
model.fit(
x,
y,
shuffle=shuffle,
batch_size=batch_size,
epochs=10,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=steps_per_epoch)
return model
def _model_evaluate(self,
strategy,
steps_per_execution=1,
x=None,
y=None,
batch_size=None,
steps=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None,
use_dataset_creator=True):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
)
callbacks += default_callbacks
def dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10, 1))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(8)
if x is None:
if use_dataset_creator:
x = dataset_creator.DatasetCreator(dataset_fn)
else:
x = dataset_fn(None)
model.evaluate(
x=x, y=y, steps=steps, callbacks=callbacks, batch_size=batch_size)
return model
def _model_predict(
self,
strategy,
model=None,
steps_per_execution=1,
test_data=None,
steps=10,
with_normalization_layer=False,
):
callbacks = []
if model is None:
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
with_normalization_layer=with_normalization_layer,
)
callbacks += default_callbacks
def create_test_data():
x = tf.constant([[1.], [2.], [3.], [1.], [5.], [1.]])
return tf.data.Dataset.from_tensor_slices(x).repeat().batch(2)
if test_data is None:
test_data = create_test_data()
predictions = model.predict(x=test_data, steps=steps, callbacks=callbacks)
predictions = np.around(predictions, 4)
return model, predictions
| 7,545 | 32.242291 | 80 | py |
keras | keras-master/keras/utils/all_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Keras utilities.
This module is used as a shortcut to access all the symbols. Those symbols was
exposed under __init__, and was causing some hourglass import issue.
"""
# pylint: disable=unused-import
from keras.utils.data_utils import GeneratorEnqueuer
from keras.utils.data_utils import get_file
from keras.utils.data_utils import OrderedEnqueuer
from keras.utils.data_utils import Sequence
from keras.utils.data_utils import SequenceEnqueuer
from keras.utils.generic_utils import custom_object_scope
from keras.utils.generic_utils import CustomObjectScope
from keras.utils.generic_utils import deserialize_keras_object
from keras.utils.generic_utils import get_custom_objects
from keras.utils.generic_utils import Progbar
from keras.utils.generic_utils import serialize_keras_object
from keras.utils.layer_utils import get_source_inputs
from keras.utils.multi_gpu_utils import multi_gpu_model
from keras.utils.np_utils import normalize
from keras.utils.np_utils import to_categorical
from keras.utils.vis_utils import model_to_dot
from keras.utils.vis_utils import plot_model
| 1,780 | 44.666667 | 80 | py |
keras | keras-master/keras/utils/control_flow_util.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for control flow.
This file is copied from tensorflow/python/ops/control_flow_util.py.
"""
import tensorflow.compat.v2 as tf
def InXlaContext(graph):
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
return GetContainingXLAContext(ctxt) is not None
def GraphOrParentsInXlaContext(graph):
while True:
if InXlaContext(graph): return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def IsInWhileLoop(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingWhileContext(ctxt) is not None
def GetContainingWhileContext(ctxt, stop_ctxt=None):
"""Returns the first ancestor WhileContext of `ctxt`.
Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
stop_ctxt: ControlFlowContext, optional. If provided, the search will end
if it sees stop_ctxt.
Returns:
`ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing
`ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not
`None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal.
"""
while ctxt:
if ctxt.IsWhileContext() or ctxt == stop_ctxt: return ctxt
ctxt = ctxt.outer_context
return None
def GetContainingXLAContext(ctxt):
"""Returns the first ancestor XLAContext of `ctxt`.
Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing
`ctxt`, or None if `ctxt` is not in a while loop.
"""
while ctxt:
if ctxt.IsXLAContext(): return ctxt
ctxt = ctxt.outer_context
return None
def smart_cond(pred, true_fn=None, false_fn=None, name=None): # pylint: disable=invalid-name
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Args:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, tf.Variable):
return tf.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
return tf.__internal__.smart_cond.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
def constant_value(pred): # pylint: disable=invalid-name
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Args:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
integer 1 or 0.
"""
if isinstance(pred, tf.Tensor):
return tf.get_static_value(pred)
if pred in {0, 1}: # Accept 1/0 as valid boolean values
return bool(pred)
if isinstance(pred, bool):
return pred
if isinstance(pred, tf.Variable):
return None
raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. "
f"Received: {type(pred)}")
| 4,327 | 31.541353 | 93 | py |
keras | keras-master/keras/utils/tf_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow-related utilities."""
import tensorflow.compat.v2 as tf
import collections
import copy
import numpy as np
from tensorflow.python.framework import ops
from keras import backend as K
from keras.engine import keras_tensor
from keras.utils import object_identity
from keras.utils import tf_contextlib
from tensorflow.python.util.tf_export import keras_export
def is_tensor_or_tensor_list(v):
v = tf.nest.flatten(v)
if v and isinstance(v[0], tf.Tensor):
return True
else:
return False
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors/ops reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
inputs = tf.nest.flatten(inputs, expand_composites=True)
reachable = object_identity.ObjectIdentitySet(inputs)
if targets:
remaining_targets = object_identity.ObjectIdentitySet(tf.nest.flatten(targets))
queue = collections.deque(inputs)
while queue:
x = queue.pop()
if isinstance(x, tuple(_user_convertible_tensor_types)):
# Can't find consumers of user-specific types.
continue
if isinstance(x, tf.Operation):
outputs = x.outputs[:] or []
outputs += x._control_outputs # pylint: disable=protected-access
elif isinstance(x, tf.Variable):
try:
outputs = [x.op]
except AttributeError:
# Variables can be created in an Eager context.
outputs = []
elif tf.is_tensor(x):
outputs = x.consumers()
else:
raise TypeError(
f'Expected tf.Operation, tf.Variable, or tf.Tensor. Received: {x}')
for y in outputs:
if y not in reachable:
reachable.add(y)
if targets:
remaining_targets.discard(y)
queue.appendleft(y)
if targets and not remaining_targets:
return reachable
return reachable
# This function needs access to private functions of `nest`.
# pylint: disable=protected-access
def map_structure_with_atomic(is_atomic_fn, map_fn, nested):
"""Maps the atomic elements of a nested structure.
Args:
is_atomic_fn: A function that determines if an element of `nested` is
atomic.
map_fn: The function to apply to atomic elements of `nested`.
nested: A nested structure.
Returns:
The nested structure, with atomic elements mapped according to `map_fn`.
Raises:
ValueError: If an element that is neither atomic nor a sequence is
encountered.
"""
if is_atomic_fn(nested):
return map_fn(nested)
# Recursively convert.
if not tf.nest.is_nested(nested):
raise ValueError(
f'Received non-atomic and non-sequence element: {nested}')
if tf.__internal__.nest.is_mapping(nested):
values = [nested[k] for k in sorted(nested.keys())]
elif tf.__internal__.nest.is_attrs(nested):
values = _astuple(nested)
else:
values = nested
mapped_values = [
map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values
]
return tf.__internal__.nest.sequence_like(nested, mapped_values)
def get_shapes(tensors):
"""Gets shapes from tensors."""
return tf.nest.map_structure(lambda x: x.shape, tensors)
# pylint: enable=protected-access
def convert_shapes(input_shape, to_tuples=True):
"""Converts nested shape representations to desired format.
Performs:
TensorShapes -> tuples if `to_tuples=True`.
tuples of int or None -> TensorShapes if `to_tuples=False`.
Valid objects to be converted are:
- TensorShapes
- tuples with elements of type int or None.
- ints
- None
Args:
input_shape: A nested structure of objects to be converted to TensorShapes.
to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts
all tuples representing shapes to TensorShapes.
Returns:
Nested structure of shapes in desired format.
Raises:
ValueError: when the input tensor shape can't be converted to tuples, eg
unknown tensor shape.
"""
def _is_shape_component(value):
return value is None or isinstance(value, (int, tf.compat.v1.Dimension))
def _is_atomic_shape(input_shape):
# Ex: TensorShape or (None, 10, 32) or 5 or `None`
if _is_shape_component(input_shape):
return True
if isinstance(input_shape, tf.TensorShape):
return True
if (isinstance(input_shape, (tuple, list)) and
all(_is_shape_component(ele) for ele in input_shape)):
return True
return False
def _convert_shape(input_shape):
input_shape = tf.TensorShape(input_shape)
if to_tuples:
input_shape = tuple(input_shape.as_list())
return input_shape
return map_structure_with_atomic(_is_atomic_shape, _convert_shape,
input_shape)
class ListWrapper:
"""A wrapper for lists to be treated as elements for `nest`."""
def __init__(self, list_to_wrap):
self._list = list_to_wrap
def as_list(self):
return self._list
def convert_inner_node_data(nested, wrap=False):
"""Either wraps or unwraps innermost node data lists in `ListWrapper` objects.
Args:
nested: A nested data structure.
wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`,
unwraps `ListWrapper` objects into lists.
Returns:
Structure of same type as nested, with lists wrapped/unwrapped.
"""
def _is_serialized_node_data(nested):
# Node data can be of form `[layer_name, node_id, tensor_id]` or
# `[layer_name, node_id, tensor_id, kwargs]`.
if (isinstance(nested, list) and (len(nested) in [3, 4]) and
isinstance(nested[0], str)):
return True
return False
def _is_atomic_nested(nested):
"""Returns `True` if `nested` is a list representing node data."""
if isinstance(nested, ListWrapper):
return True
if _is_serialized_node_data(nested):
return True
return not tf.nest.is_nested(nested)
def _convert_object_or_list(nested):
"""Convert b/t `ListWrapper` object and list representations."""
if wrap:
if isinstance(nested, ListWrapper):
return nested
if _is_serialized_node_data(nested):
return ListWrapper(nested)
return nested
else:
if isinstance(nested, ListWrapper):
return nested.as_list()
return nested
return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list,
nested)
def shape_type_conversion(fn):
"""Decorator that handles tuple/TensorShape conversion.
Used in `compute_output_shape` and `build`.
Args:
fn: function to wrap.
Returns:
Wrapped function.
"""
def wrapper(instance, input_shape):
# Pass shapes as tuples to `fn`
# This preserves compatibility with external Keras.
if input_shape is not None:
input_shape = convert_shapes(input_shape, to_tuples=True)
output_shape = fn(instance, input_shape)
# Return shapes from `fn` as TensorShapes.
if output_shape is not None:
output_shape = convert_shapes(output_shape, to_tuples=False)
return output_shape
return wrapper
def are_all_symbolic_tensors(tensors):
return all(map(is_symbolic_tensor, tensors))
_user_convertible_tensor_types = set()
def is_extension_type(tensor):
"""Returns whether a tensor is of an ExtensionType.
github.com/tensorflow/community/pull/269
Currently it works by checking if `tensor` is a `CompositeTensor` instance,
but this will be changed to use an appropriate extensiontype protocol
check once ExtensionType is made public.
Args:
tensor: An object to test
Returns:
True if the tensor is an extension type object, false if not.
"""
return isinstance(tensor, tf.__internal__.CompositeTensor)
def is_symbolic_tensor(tensor):
"""Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.
A Variable can be seen as either: it is considered symbolic
when we are in a graph scope, and eager when we are in an eager scope.
Args:
tensor: A tensor instance to test.
Returns:
True for symbolic tensors, False for eager tensors.
"""
if isinstance(tensor, tf.Tensor):
return hasattr(tensor, 'graph')
elif is_extension_type(tensor):
component_tensors = tf.nest.flatten(tensor, expand_composites=True)
return any(hasattr(t, 'graph') for t in component_tensors)
elif isinstance(tensor, tf.Variable):
# Variables that are output of a Keras Layer in Functional API mode
# should be considered symbolic.
# TODO(omalleyt): We need a better way to check this in order to
# enable `run_eagerly=True` for Models containing Layers that
# return Variables as outputs.
return (getattr(tensor, '_keras_history', False) or
not tf.executing_eagerly())
elif isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
return is_symbolic_tensor(tensor)
else:
return False
@keras_export('keras.__internal__.utils.register_symbolic_tensor_type', v1=[])
def register_symbolic_tensor_type(cls):
"""Allows users to specify types regarded as symbolic `Tensor`s.
Used in conjunction with `tf.register_tensor_conversion_function`, calling
`tf.keras.__internal__.utils.register_symbolic_tensor_type(cls)`
allows non-`Tensor` objects to be plumbed through Keras layers.
Example:
```python
# One-time setup.
class Foo:
def __init__(self, input_):
self._input = input_
def value(self):
return tf.constant(42.)
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value())
tf.keras.__internal__.utils.register_symbolic_tensor_type(Foo)
# User-land.
layer = tf.keras.layers.Lambda(lambda input_: Foo(input_))
```
Args:
cls: A `class` type which shall be regarded as a symbolic `Tensor`.
"""
global _user_convertible_tensor_types
if cls not in _user_convertible_tensor_types:
keras_tensor.register_keras_tensor_specialization(
cls, keras_tensor.UserRegisteredTypeKerasTensor)
_user_convertible_tensor_types.add(cls)
def type_spec_from_value(value):
"""Grab type_spec without converting array-likes to tensors."""
if is_extension_type(value):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
return tf.TensorSpec(value.shape, value.dtype)
else:
return tf.type_spec_from_value(value)
def is_ragged(tensor):
"""Returns true if `tensor` is a ragged tensor or ragged tensor value."""
return isinstance(
tensor,
(tf.RaggedTensor, tf.compat.v1.ragged.RaggedTensorValue))
def is_sparse(tensor):
"""Returns true if `tensor` is a sparse tensor or sparse tensor value."""
return isinstance(
tensor,
(tf.SparseTensor, tf.compat.v1.SparseTensorValue))
def is_tensor_or_variable(x):
return tf.is_tensor(x) or isinstance(x, tf.Variable)
def assert_no_legacy_layers(layers):
"""Prevent tf.layers.Layers from being used with Keras.
Certain legacy layers inherit from their keras analogs; however they are
not supported with keras and can lead to subtle and hard to diagnose bugs.
Args:
layers: A list of layers to check
Raises:
TypeError: If any elements of layers are tf.layers.Layers
"""
# isinstance check for tf.layers.Layer introduces a circular dependency.
legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]
if legacy_layers:
layer_str = '\n'.join(' ' + str(l) for l in legacy_layers)
raise TypeError(
f'The following are legacy tf.layers.Layers:\n{layer_str}\n'
'To use keras as a '
'framework (for instance using the Network, Model, or Sequential '
'classes), please use the tf.keras.layers implementation instead. '
'(Or, if writing custom layers, subclass from tf.keras.layers rather '
'than tf.layers)')
@tf_contextlib.contextmanager
def maybe_init_scope(layer):
"""Open an `init_scope` if in V2 mode and using the keras graph.
Args:
layer: The Layer/Model that is currently active.
Yields:
None
"""
# Don't open an init_scope in V1 mode or when using legacy tf.layers.
if (tf.compat.v1.executing_eagerly_outside_functions() and
getattr(layer, '_keras_style', True)):
with tf.init_scope():
yield
else:
yield
@tf_contextlib.contextmanager
def graph_context_for_symbolic_tensors(*args, **kwargs):
"""Returns graph context manager if any of the inputs is a symbolic tensor."""
if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())):
with K.get_graph().as_default():
yield
else:
yield
def dataset_is_infinite(dataset):
"""True if the passed dataset is infinite."""
if tf.compat.v1.executing_eagerly_outside_functions():
return tf.equal(
tf.data.experimental.cardinality(dataset), tf.data.experimental.INFINITE_CARDINALITY)
else:
dataset_size = K.get_session().run(tf.data.experimental.cardinality(dataset))
return dataset_size == tf.data.experimental.INFINITE_CARDINALITY
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
# pylint: disable=protected-access
if isinstance(t, tf.TypeSpec):
spec = t
elif is_extension_type(t):
# TODO(b/148821952): Should these specs have a name attr?
spec = t._type_spec
elif (hasattr(t, '_keras_history') and
hasattr(t._keras_history[0], '_type_spec')):
return t._keras_history[0]._type_spec
elif hasattr(t, 'shape') and hasattr(t, 'dtype'):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
dynamic_batch_spec = copy.deepcopy(spec)
# RaggedTensorSpec only has a private _shape.
shape = dynamic_batch_spec._shape
if shape.rank is not None and shape.rank > 0:
shape_list = shape.as_list()
shape_list[0] = None
dynamic_batch_spec._shape = tf.TensorShape(shape_list)
return dynamic_batch_spec
# pylint: enable=protected-access
def sync_to_numpy_or_python_type(tensors):
"""Syncs and converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Async strategies (such as `TPUStrategy` and `ParameterServerStrategy`) are
forced to
sync during this process.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
if isinstance(tensors, tf.distribute.experimental.coordinator.RemoteValue):
return tensors.fetch()
def _to_single_numpy_or_python_type(t):
if isinstance(t, tf.Tensor):
x = t.numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
def _astuple(attrs):
"""Converts the given attrs to tuple non-recursively."""
cls = type(attrs)
fields = getattr(cls, '__attrs_attrs__', None)
if fields is None:
raise ValueError(f'{cls} is not an attrs-decorated class.')
values = []
for field in fields:
values.append(getattr(attrs, field.name))
return tuple(values)
| 16,615 | 30.291902 | 93 | py |
keras | keras-master/keras/utils/generic_utils.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
import tensorflow.compat.v2 as tf
import binascii
import codecs
import importlib
import marshal
import os
import re
import sys
import threading
import time
import types as python_types
import warnings
import weakref
import numpy as np
from keras.utils import tf_contextlib
from keras.utils import tf_inspect
from tensorflow.python.util.tf_export import keras_export
_GLOBAL_CUSTOM_OBJECTS = {}
_GLOBAL_CUSTOM_NAMES = {}
# Flag that determines whether to skip the NotImplementedError when calling
# get_config in custom models and layers. This is only enabled when saving to
# SavedModel, when the config isn't required.
_SKIP_FAILED_SERIALIZATION = False
# If a layer does not have a defined config, then the returned config will be a
# dictionary with the below key.
_LAYER_UNDEFINED_CONFIG_KEY = 'layer was saved without config'
@keras_export('keras.utils.custom_object_scope', # pylint: disable=g-classes-have-attributes
'keras.utils.CustomObjectScope')
class CustomObjectScope:
"""Exposes custom classes/functions to Keras deserialization internals.
Under a scope `with custom_object_scope(objects_dict)`, Keras methods such
as `tf.keras.models.load_model` or `tf.keras.models.model_from_config`
will be able to deserialize any custom object referenced by a
saved config (e.g. a custom layer or metric).
Example:
Consider a custom regularizer `my_regularizer`:
```python
layer = Dense(3, kernel_regularizer=my_regularizer)
config = layer.get_config() # Config contains a reference to `my_regularizer`
...
# Later:
with custom_object_scope({'my_regularizer': my_regularizer}):
layer = Dense.from_config(config)
```
Args:
*args: Dictionary or dictionaries of `{name: object}` pairs.
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access the current collection of custom objects.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
# Store a unique, per-object ID for shared objects.
#
# We store a unique ID for each object so that we may, at loading time,
# re-create the network properly. Without this ID, we would have no way of
# determining whether a config is a description of a new object that
# should be created or is merely a reference to an already-created object.
SHARED_OBJECT_KEY = 'shared_object_id'
SHARED_OBJECT_DISABLED = threading.local()
SHARED_OBJECT_LOADING = threading.local()
SHARED_OBJECT_SAVING = threading.local()
# Attributes on the threadlocal variable must be set per-thread, thus we
# cannot initialize these globally. Instead, we have accessor functions with
# default values.
def _shared_object_disabled():
"""Get whether shared object handling is disabled in a threadsafe manner."""
return getattr(SHARED_OBJECT_DISABLED, 'disabled', False)
def _shared_object_loading_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_LOADING, 'scope', NoopLoadingScope())
def _shared_object_saving_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_SAVING, 'scope', None)
class DisableSharedObjectScope:
"""A context manager for disabling handling of shared objects.
Disables shared object handling for both saving and loading.
Created primarily for use with `clone_model`, which does extra surgery that
is incompatible with shared objects.
"""
def __enter__(self):
SHARED_OBJECT_DISABLED.disabled = True
self._orig_loading_scope = _shared_object_loading_scope()
self._orig_saving_scope = _shared_object_saving_scope()
def __exit__(self, *args, **kwargs):
SHARED_OBJECT_DISABLED.disabled = False
SHARED_OBJECT_LOADING.scope = self._orig_loading_scope
SHARED_OBJECT_SAVING.scope = self._orig_saving_scope
class NoopLoadingScope:
"""The default shared object loading scope. It does nothing.
Created to simplify serialization code that doesn't care about shared objects
(e.g. when serializing a single object).
"""
def get(self, unused_object_id):
return None
def set(self, object_id, obj):
pass
class SharedObjectLoadingScope:
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find already-loaded
object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
class SharedObjectConfig(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super(SharedObjectConfig, self).__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID when
# it's strictly necessary, making backwards compatibility breakage less
# likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
class SharedObjectSavingScope:
"""Keeps track of shared object configs when serializing."""
def __enter__(self):
if _shared_object_disabled():
return None
global SHARED_OBJECT_SAVING
# Serialization can happen at a number of layers for a number of reasons.
# We may end up with a case where we're opening a saving scope within
# another saving scope. In that case, we'd like to use the outermost scope
# available and ignore inner scopes, since there is not (yet) a reasonable
# use case for having these nested and distinct.
if _shared_object_saving_scope() is not None:
self._passthrough = True
return _shared_object_saving_scope()
else:
self._passthrough = False
SHARED_OBJECT_SAVING.scope = self
self._shared_objects_config = weakref.WeakKeyDictionary()
self._next_id = 0
return self
def get_config(self, obj):
"""Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`.
"""
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
# If the object is unhashable (e.g. a subclass of `AbstractBaseClass`
# that has not overridden `__hash__`), a `TypeError` will be thrown.
# We'll just continue on without shared object support.
return None
shared_object_config.increment_ref_count()
return shared_object_config
def create_config(self, base_config, obj):
"""Create a new SharedObjectConfig for a given object."""
shared_object_config = SharedObjectConfig(base_config, self._next_id)
self._next_id += 1
try:
self._shared_objects_config[obj] = shared_object_config
except TypeError:
# If the object is unhashable (e.g. a subclass of `AbstractBaseClass`
# that has not overridden `__hash__`), a `TypeError` will be thrown.
# We'll just continue on without shared object support.
pass
return shared_object_config
def __exit__(self, *args, **kwargs):
if not getattr(self, '_passthrough', False):
global SHARED_OBJECT_SAVING
SHARED_OBJECT_SAVING.scope = None
def serialize_keras_class_and_config(
cls_name, cls_config, obj=None, shared_object_id=None):
"""Returns the serialization of the class with the given config."""
base_config = {'class_name': cls_name, 'config': cls_config}
# We call `serialize_keras_class_and_config` for some branches of the load
# path. In that case, we may already have a shared object ID we'd like to
# retain.
if shared_object_id is not None:
base_config[SHARED_OBJECT_KEY] = shared_object_id
# If we have an active `SharedObjectSavingScope`, check whether we've already
# serialized this config. If so, just use that config. This will store an
# extra ID field in the config, allowing us to re-create the shared object
# relationship at load time.
if _shared_object_saving_scope() is not None and obj is not None:
shared_object_config = _shared_object_saving_scope().get_config(obj)
if shared_object_config is None:
return _shared_object_saving_scope().create_config(base_config, obj)
return shared_object_config
return base_config
@keras_export('keras.utils.register_keras_serializable')
def register_keras_serializable(package='Custom', name=None):
"""Registers an object with the Keras serialization framework.
This decorator injects the decorated class or function into the Keras custom
object dictionary, so that it can be serialized and deserialized without
needing an entry in the user-provided custom object dict. It also injects a
function that Keras will call to get the object's serializable string key.
Note that to be serialized and deserialized, classes must implement the
`get_config()` method. Functions do not have this requirement.
The object will be registered under the key 'package>name' where `name`,
defaults to the object name if not passed.
Args:
package: The package that this class belongs to.
name: The name to serialize this class under in this package. If None, the
class' name will be used.
Returns:
A decorator that registers the decorated class with the passed names.
"""
def decorator(arg):
"""Registers a class with the Keras serialization framework."""
class_name = name if name is not None else arg.__name__
registered_name = package + '>' + class_name
if tf_inspect.isclass(arg) and not hasattr(arg, 'get_config'):
raise ValueError(
'Cannot register a class that does not have a get_config() method.')
if registered_name in _GLOBAL_CUSTOM_OBJECTS:
raise ValueError(
f'{registered_name} has already been registered to '
f'{_GLOBAL_CUSTOM_OBJECTS[registered_name]}')
if arg in _GLOBAL_CUSTOM_NAMES:
raise ValueError(
f'{arg} has already been registered to {_GLOBAL_CUSTOM_NAMES[arg]}')
_GLOBAL_CUSTOM_OBJECTS[registered_name] = arg
_GLOBAL_CUSTOM_NAMES[arg] = registered_name
return arg
return decorator
@keras_export('keras.utils.get_registered_name')
def get_registered_name(obj):
"""Returns the name registered to an object within the Keras framework.
This function is part of the Keras serialization and deserialization
framework. It maps objects to the string names associated with those objects
for serialization/deserialization.
Args:
obj: The object to look up.
Returns:
The name associated with the object, or the default Python name if the
object is not registered.
"""
if obj in _GLOBAL_CUSTOM_NAMES:
return _GLOBAL_CUSTOM_NAMES[obj]
else:
return obj.__name__
@tf_contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
@keras_export('keras.utils.get_registered_object')
def get_registered_object(name, custom_objects=None, module_objects=None):
"""Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.utils.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library implementers.
Returns:
An instantiable class associated with 'name', or None if no such class
exists.
"""
if name in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
# pylint: disable=g-bad-exception-name
class CustomMaskWarning(Warning):
pass
# pylint: enable=g-bad-exception-name
@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
"""Serialize a Keras object into a JSON-compatible representation.
Calls to `serialize_keras_object` while underneath the
`SharedObjectSavingScope` context manager will cause any objects re-used
across multiple layers to be saved with a special shared object ID. This
allows the network to be re-created properly during deserialization.
Args:
instance: The object to serialize.
Returns:
A dict-like, JSON-compatible representation of the object's config.
"""
_, instance = tf.__internal__.decorator.unwrap(instance)
if instance is None:
return None
# pylint: disable=protected-access
#
# For v1 layers, checking supports_masking is not enough. We have to also
# check whether compute_mask has been overridden.
supports_masking = (getattr(instance, 'supports_masking', False)
or (hasattr(instance, 'compute_mask')
and not is_default(instance.compute_mask)))
if supports_masking and is_default(instance.get_config):
warnings.warn('Custom mask layers require a config and must override '
'get_config. When loading, the custom mask layer must be '
'passed to the custom_objects argument.',
category=CustomMaskWarning)
# pylint: enable=protected-access
if hasattr(instance, 'get_config'):
name = get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(
name, {_LAYER_UNDEFINED_CONFIG_KEY: True})
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, str):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or dict
# for serialization (e.g. custom functions, custom classes)
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(item, dict):
serialized_item['__passive_serialization__'] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = get_registered_name(instance.__class__)
return serialize_keras_class_and_config(
name, serialization_config, instance)
if hasattr(instance, '__name__'):
return get_registered_name(instance)
raise ValueError(f'Cannot serialize {instance} since it doesn\'t implement '
'`get_config()`, and also doesn\t have `__name__`')
def get_custom_objects_by_name(item, custom_objects=None):
"""Returns the item if it is in either local or global custom objects."""
if item in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[item]
elif custom_objects and item in custom_objects:
return custom_objects[item]
return None
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict)
or 'class_name' not in config
or 'config' not in config):
raise ValueError(
f'Improper config format for {config}. '
'Expecting python dict contains `class_name` and `config` as keys')
class_name = config['class_name']
cls = get_registered_object(class_name, custom_objects, module_objects)
if cls is None:
raise ValueError(
f'Unknown {printable_module_name}: {class_name}. Please ensure this '
'object is passed to the `custom_objects` argument. See '
'https://www.tensorflow.org/guide/keras/save_and_serialize'
'#registering_the_custom_object for details.')
cls_config = config['config']
# Check if `cls_config` is a list. If it is a list, return the class and the
# associated class configs for recursively deserialization. This case will
# happen on the old version of sequential model (e.g. `keras_version` ==
# "2.0.6"), which is serialized in a different structure, for example
# "{'class_name': 'Sequential',
# 'config': [{'class_name': 'Embedding', 'config': ...}, {}, ...]}".
if isinstance(cls_config, list):
return (cls, cls_config)
deserialized_objects = {}
for key, item in cls_config.items():
if key == 'name':
# Assume that the value of 'name' is a string that should not be
# deserialized as a function. This avoids the corner case where
# cls_config['name'] has an identical name to a custom function and
# gets converted into that function.
deserialized_objects[key] = item
elif isinstance(item, dict) and '__passive_serialization__' in item:
deserialized_objects[key] = deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='config_item')
# TODO(momernick): Should this also have 'module_objects'?
elif (isinstance(item, str) and
tf_inspect.isfunction(get_registered_object(item, custom_objects))):
# Handle custom functions here. When saving functions, we only save the
# function's name as a string. If we find a matching string in the custom
# objects during deserialization, we convert the string back to the
# original function.
# Note that a potential issue is that a string field could have a naming
# conflict with a custom function name, but this should be a rare case.
# This issue does not occur if a string field has a naming conflict with
# a custom object, since the config of an object will always be a dict.
deserialized_objects[key] = get_registered_object(item, custom_objects)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Turns the serialized form of a Keras object back into an actual object.
This function is for mid-level library implementers rather than end users.
Importantly, this utility requires you to provide the dict of `module_objects`
to use for looking up the object config; this is not populated by default.
If you need a deserialization utility that has preexisting knowledge of
built-in Keras objects, use e.g. `keras.layers.deserialize(config)`,
`keras.metrics.deserialize(config)`, etc.
Calling `deserialize_keras_object` while underneath the
`SharedObjectLoadingScope` context manager will cause any already-seen shared
objects to be returned as-is rather than creating a new object.
Args:
identifier: the serialized form of the object.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
printable_module_name: A human-readable string representing the type of the
object. Printed in case of exception.
Returns:
The deserialized object.
Example:
A mid-level library implementer might want to implement a utility for
retrieving an object from its config, as such:
```python
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
identifier,
module_objects=globals(),
custom_objects=custom_objects,
name="MyObjectType",
)
```
This is how e.g. `keras.layers.deserialize()` is implemented.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
# If this object has already been loaded (i.e. it's shared between multiple
# objects), return the already-loaded object.
shared_object_id = config.get(SHARED_OBJECT_KEY)
shared_object = _shared_object_loading_scope().get(shared_object_id) # pylint: disable=assignment-from-none
if shared_object is not None:
return shared_object
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
deserialized_obj = cls.from_config(
cls_config,
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
else:
with CustomObjectScope(custom_objects):
deserialized_obj = cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
deserialized_obj = cls(**cls_config)
# Add object to shared objects, in case we find it referenced again.
_shared_object_loading_scope().set(shared_object_id, deserialized_obj)
return deserialized_obj
elif isinstance(identifier, str):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif object_name in _GLOBAL_CUSTOM_OBJECTS:
obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError(
f'Unknown {printable_module_name}: {object_name}. Please ensure '
'this object is passed to the `custom_objects` argument. See '
'https://www.tensorflow.org/guide/keras/save_and_serialize'
'#registering_the_custom_object for details.')
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
elif tf_inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError(
f'Could not interpret serialized {printable_module_name}: {identifier}')
def func_dump(func):
"""Serializes a user defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
# pylint: disable=pointless-statement
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Args:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name` but the
function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args or name in arg_spec.kwonlyargs
@keras_export('keras.utils.Progbar')
class Progbar:
"""Displays a progress bar.
Args:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not* be
averaged over time. Metrics in this list will be displayed as-is. All
others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(self,
target,
width=30,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name='step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules or
'PYCHARM_HOSTED' in os.environ)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._time_at_epoch_start = self._start
self._time_at_epoch_end = None
self._time_after_first_step = None
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Args:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is in
`stateful_metrics`, `value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in the first
# epoch, both on_batch_end and on_epoch_end will be called, which will
# cause 'current' and 'self._seen_so_far' to have the same value. Force
# the minimal value to 1 here, otherwise stateful_metric will be 0s.
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if current == self.target:
self._time_at_epoch_end = now
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
time_per_unit = self._estimate_step_duration(current, now)
if self.target is None or finalize:
info += self._format_time(time_per_unit, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if finalize:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = count + info
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
if self._time_at_epoch_end:
time_per_epoch = self._time_at_epoch_end - self._time_at_epoch_start
avg_time_per_step = time_per_epoch / self.target
self._time_at_epoch_start = now
self._time_at_epoch_end = None
info += ' -' + self._format_time(time_per_epoch, 'epoch')
info += ' -' + self._format_time(avg_time_per_step, self.unit_name)
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _format_time(self, time_per_unit, unit_name):
"""format a given duration to display to the user.
Given the duration, this function formats it in either milliseconds
or seconds and displays the unit (i.e. ms/step or s/epoch)
Args:
time_per_unit: the duration to display
unit_name: the name of the unit to display
Returns:
a string with the correctly formatted duration and units
"""
formatted = ''
if time_per_unit >= 1 or time_per_unit == 0:
formatted += ' %.0fs/%s' % (time_per_unit, unit_name)
elif time_per_unit >= 1e-3:
formatted += ' %.0fms/%s' % (time_per_unit * 1e3, unit_name)
else:
formatted += ' %.0fus/%s' % (time_per_unit * 1e6, unit_name)
return formatted
def _estimate_step_duration(self, current, now):
"""Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now`
this function returns an estimate for how long a single step
takes. If this is called before one step has been completed
(i.e. `current == 0`) then zero is given as an estimate. The duration
estimate ignores the duration of the (assumed to be non-representative)
first step for estimates when more steps are available (i.e. `current>1`).
Args:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step.
"""
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying step 1
# 2) somebody is calling the progress bar and supplies step one mulitple
# times, e.g. as part of a finalizing call
# in these cases, we just fall back to the simple calculation
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (current - 1)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0
def _update_stateful_metrics(self, stateful_metrics):
self.stateful_metrics = self.stateful_metrics.union(stateful_metrics)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Args:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Args:
arrays: Single array or list of arrays.
start: can be an integer index (start index) or a list/array of indices
stop: integer (stop index); should be None if `start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start '
f'is a list. Received start={start}, stop={stop}')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [
None if x is None else
None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays
]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
if hasattr(start, '__getitem__'):
return arrays[start:stop]
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def is_all_none(structure):
iterable = tf.nest.flatten(structure)
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def check_for_unexpected_keys(name, input_dict, expected_values):
unknown = set(input_dict.keys()).difference(expected_values)
if unknown:
raise ValueError(
f'Unknown entries in {name} dictionary: {list(unknown)}. Only expected '
f'following keys: {expected_values}')
def validate_kwargs(kwargs,
allowed_kwargs,
error_message='Keyword argument not understood:'):
"""Checks that all keyword arguments are in the set of allowed keys."""
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError(error_message, kwarg)
def validate_config(config):
"""Determines whether config appears to be a valid layer config."""
return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True # pylint: disable=protected-access
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, '_is_default', False)
def populate_dict_with_module_objects(target_dict, modules, obj_filter):
for module in modules:
for name in dir(module):
obj = getattr(module, name)
if obj_filter(obj):
target_dict[name] = obj
class LazyLoader(python_types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies."""
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
# Aliases
custom_object_scope = CustomObjectScope # pylint: disable=invalid-name
| 43,166 | 34.324877 | 112 | py |
keras | keras-master/keras/utils/traceback_utils.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to Keras exception stack trace prettifying."""
import inspect
import os
import sys
import traceback
import types
import tensorflow.compat.v2 as tf
_EXCLUDED_PATHS = (
os.path.abspath(os.path.join(__file__, '..', '..')),
os.path.join('tensorflow', 'python'),
)
def include_frame(fname):
for exclusion in _EXCLUDED_PATHS:
if exclusion in fname:
return False
return True
def _process_traceback_frames(tb):
"""Iterate through traceback frames and return a new, filtered traceback."""
last_tb = None
tb_list = list(traceback.walk_tb(tb))
for f, line_no in reversed(tb_list):
if include_frame(f.f_code.co_filename):
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
if last_tb is None and tb_list:
# If no frames were kept during filtering, create a new traceback
# from the outermost function.
f, line_no = tb_list[-1]
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
return last_tb
def filter_traceback(fn):
"""Filter out Keras-internal stack trace frames in exceptions raised by fn."""
if sys.version_info.major != 3 or sys.version_info.minor < 7:
return fn
def error_handler(*args, **kwargs):
if not tf.debugging.is_traceback_filtering_enabled():
return fn(*args, **kwargs)
filtered_tb = None
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
filtered_tb = _process_traceback_frames(e.__traceback__)
raise e.with_traceback(filtered_tb) from None
finally:
del filtered_tb
return tf.__internal__.decorator.make_decorator(fn, error_handler)
def inject_argument_info_in_traceback(fn, object_name=None):
"""Add information about call argument values to an error message.
Arguments:
fn: Function to wrap. Exceptions raised by the this function will be
re-raised with additional information added to the error message,
displaying the values of the different arguments that the function
was called with.
object_name: String, display name of the class/function being called,
e.g. `'layer "layer_name" (LayerClass)'`.
Returns:
A wrapped version of `fn`.
"""
def error_handler(*args, **kwargs):
signature = None
bound_signature = None
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if hasattr(e, '_keras_call_info_injected'):
# Only inject info for the innermost failing call
raise e
signature = inspect.signature(fn)
try:
# The first argument is `self`, so filter it out
bound_signature = signature.bind(*args, **kwargs)
except TypeError:
# Likely unbindable arguments
raise e
# Add argument context
arguments_context = []
for arg in list(signature.parameters.values()):
if arg.name in bound_signature.arguments:
value = tf.nest.map_structure(
format_argument_value, bound_signature.arguments[arg.name])
else:
value = arg.default
arguments_context.append(f' • {arg.name}={value}')
if arguments_context:
arguments_context = '\n'.join(arguments_context)
# Get original error message and append information to it.
if isinstance(e, tf.errors.OpError):
message = e.message
elif e.args:
# Canonically, the 1st argument in an exception is the error message.
# This works for all built-in Python exceptions.
message = e.args[0]
else:
message = ''
message = (
'Exception encountered when calling '
f'{object_name if object_name else fn.__name__}.\n\n'
f'{message}\n\n'
f'Call arguments received:\n{arguments_context}')
# Reraise exception, with added context
if isinstance(e, tf.errors.OpError):
new_e = e.__class__(e.node_def, e.op, message, e.error_code)
else:
try:
# For standard exceptions such as ValueError, TypeError, etc.
new_e = e.__class__(message)
except TypeError:
# For any custom error that doesn't have a standard signature.
new_e = RuntimeError(message)
new_e._keras_call_info_injected = True # pylint: disable=protected-access
else:
new_e = e
raise new_e.with_traceback(e.__traceback__) from None
finally:
del signature
del bound_signature
return tf.__internal__.decorator.make_decorator(fn, error_handler)
def format_argument_value(value):
if isinstance(value, tf.Tensor):
# Simplified representation for eager / graph tensors
# to keep messages readable
return f'tf.Tensor(shape={value.shape}, dtype={value.dtype.name})'
return repr(value)
| 5,527 | 33.987342 | 82 | py |
keras | keras-master/keras/utils/traceback_utils_test.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for traceback_utils."""
from keras import layers
from keras.utils import traceback_utils
import tensorflow.compat.v2 as tf
class TracebackUtilsTest(tf.test.TestCase):
def test_info_injection_basics(self):
def error_fn(arg_1, arg_2, keyword_arg_1=None, keyword_arg_2=None):
raise ValueError('Original message')
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(
error_fn, 'ObjName')(1, 2, keyword_arg_1=3, keyword_arg_2=4)
self.assertIn('Original message', str(e.exception))
self.assertIn('Exception encountered when calling ObjName',
str(e.exception))
self.assertIn('Call arguments received:', str(e.exception))
self.assertIn('arg_1=1', str(e.exception))
self.assertIn('arg_2=2', str(e.exception))
self.assertIn('keyword_arg_1=3', str(e.exception))
self.assertIn('keyword_arg_2=4', str(e.exception))
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(
error_fn)(1, 2, keyword_arg_1=3, keyword_arg_2=4)
self.assertIn('Exception encountered when calling error_fn',
str(e.exception))
def test_info_injection_no_args(self):
def error_fn():
raise ValueError('Original message')
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)()
self.assertEqual(str(e.exception).count('Call arguments received:'), 0)
def test_info_injection_unbindable(self):
def error_fn(arg_1, keyword_arg_1=1):
return arg_1 + keyword_arg_1
with self.assertRaises(TypeError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)()
self.assertIn('missing 1 required positional argument', str(e.exception))
def test_info_injection_nested(self):
def inner_fn(arg_1):
raise ValueError('Original message')
def outer_fn(arg_1):
return inner_fn(arg_1)
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(
outer_fn)(1)
self.assertEqual(str(e.exception).count('Call arguments received:'), 1)
def test_info_injection_tf_op_error(self):
def error_fn(arg_1, keyword_arg_1=1):
return arg_1 + keyword_arg_1 + tf.zeros((2, 3))
with self.assertRaises(tf.errors.InvalidArgumentError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)(
tf.zeros((3, 3)))
self.assertIn('Incompatible shapes', str(e.exception))
self.assertIn('Call arguments received', str(e.exception))
class LayerCallInfoInjectionTest(tf.test.TestCase):
def assert_info_injected(self, fn):
tf.debugging.enable_traceback_filtering()
try:
fn()
except Exception as e: # pylint: disable=broad-except
# Info should be injected exactly once.
self.assertEqual(str(e).count('Call arguments received:'), 1) # pylint: disable=g-assert-in-except
def test_custom_layer_call_nested(self):
class InnerLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
class OuterLayer(layers.Layer):
def __init__(self):
super().__init__()
self.inner = InnerLayer()
def call(self, inputs, training=True):
return self.inner(inputs)
def fn():
layer = OuterLayer()
layer(tf.zeros((3, 5)), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_eager_dense_input(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(tf.zeros((3, 5)), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_eager_sparse_input(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(
tf.SparseTensor(indices=[[0, 0]], values=[1], dense_shape=[3, 5]),
training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_eager_ragged_input(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(tf.ragged.constant([[0, 0, 0], [0, 0]]), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_symbolic(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(layers.Input((3, 5)), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_unbindable(self):
class MyLayer(layers.Layer):
def __init__(self):
super().__init__()
self.input_spec = layers.InputSpec(shape=(3, 4))
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(bad=True, arguments=True)
with self.assertRaisesRegex(
ValueError, 'The first argument to `Layer.call` must always'):
fn()
if __name__ == '__main__':
tf.test.main()
| 6,016 | 30.176166 | 105 | py |
keras | keras-master/keras/utils/kernelized_utils_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized_utils.py."""
import tensorflow.compat.v2 as tf
import functools
from absl.testing import parameterized
from keras.utils import kernelized_utils
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
class KernelizedUtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]),
('laplacian', _exact_laplacian(stddev=50.0), [[1.0]]))
def test_equal_vectors(self, exact_kernel_fn, expected_values):
"""Identical vectors give exactly the identity kernel value."""
x = tf.constant([0.5, -0.5, -0.5, 0.5])
y = tf.constant([0.5, -0.5, -0.5, 0.5])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are identical and therefore K(x, y) will be precisely equal to
# the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-6)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]),
('laplacian', _exact_laplacian(stddev=50.0), [[1.0]]))
def test_almost_identical_vectors(self, exact_kernel_fn, expected_values):
"""Almost identical vectors give the identity kernel value."""
x = tf.constant([1.0, 0.4, -2.1, -1.1])
y = tf.constant([1.01, 0.39, -2.099, -1.101])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are almost identical and therefore K(x, y) will be almost equal to
# the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-3)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=1.0), [[0.99], [0.977]]),
('laplacian', _exact_laplacian(stddev=5.0), [[0.96], [0.94]]))
def test_similar_matrices(self, exact_kernel_fn, expected_values):
"""Pairwise "close" vectors give high kernel values (similarity scores)."""
x = tf.constant([1.0, 3.4, -2.1, 0.9, 3.3, -2.0], shape=[2, 3])
y = tf.constant([1.1, 3.35, -2.05])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# The 2 rows of x are close to y. The pairwise kernel values (similarity
# scores) are somewhat close to the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=2.0), [[.997, .279], [.251, 1.],
[.164, 0.019]]),
('laplacian', _exact_laplacian(stddev=2.0), [[.904, .128], [.116, 1.],
[.07, 0.027]]))
def test_matrices_varying_similarity(self, exact_kernel_fn, expected_values):
"""Test matrices with row vectors of varying pairwise similarity."""
x = tf.constant([1.0, 2., -2., 0.9, 3.3, -1.0], shape=[3, 2])
y = tf.constant([1.1, 2.1, -2., 0.9], shape=[2, 2])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=1.0), [[0.0]]),
('laplacian', _exact_laplacian(stddev=1.0), [[0.0]]))
def test_completely_dissimilar_vectors(self, exact_kernel_fn,
expected_values):
"""Very dissimilar vectors give very low similarity scores."""
x = tf.constant([1.0, 3.4, -2.1, -5.1])
y = tf.constant([0.5, 2.1, 1.0, 3.0])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are very "far" from each other and so the corresponding kernel
# value will be very low.
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
if __name__ == '__main__':
tf.test.main()
| 4,825 | 42.089286 | 80 | py |
keras | keras-master/keras/utils/tf_contextlib.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the contextlib module."""
import tensorflow.compat.v2 as tf
import contextlib as _contextlib
def contextmanager(target):
"""A tf_decorator-aware wrapper for `contextlib.contextmanager`.
Usage is identical to `contextlib.contextmanager`.
Args:
target: A callable to be wrapped in a contextmanager.
Returns:
A callable that can be used inside of a `with` statement.
"""
context_manager = _contextlib.contextmanager(target)
return tf.__internal__.decorator.make_decorator(target, context_manager, 'contextmanager')
| 1,267 | 36.294118 | 92 | py |
keras | keras-master/keras/utils/data_utils_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_utils."""
import tensorflow.compat.v2 as tf
from itertools import cycle
import os
import tarfile
import urllib
import zipfile
import numpy as np
import keras
from keras.utils import data_utils
class TestGetFile(tf.test.TestCase):
def test_get_file_and_validate_it(self):
"""Tests get_file from a url, plus extraction and validation.
"""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, 'test.txt')
zip_file_path = os.path.join(orig_dir, 'test.zip')
tar_file_path = os.path.join(orig_dir, 'test.tar.gz')
with open(text_file_path, 'w') as text_file:
text_file.write('Float like a butterfly, sting like a bee.')
with tarfile.open(tar_file_path, 'w:gz') as tar_file:
tar_file.add(text_file_path)
with zipfile.ZipFile(zip_file_path, 'w') as zip_file:
zip_file.write(text_file_path)
origin = urllib.parse.urljoin(
'file://', urllib.request.pathname2url(os.path.abspath(tar_file_path)))
path = keras.utils.data_utils.get_file('test.txt', origin,
untar=True, cache_subdir=dest_dir)
filepath = path + '.tar.gz'
hashval_sha256 = keras.utils.data_utils._hash_file(filepath)
hashval_md5 = keras.utils.data_utils._hash_file(filepath, algorithm='md5')
path = keras.utils.data_utils.get_file(
'test.txt', origin, md5_hash=hashval_md5,
untar=True, cache_subdir=dest_dir)
path = keras.utils.data_utils.get_file(
filepath, origin, file_hash=hashval_sha256,
extract=True, cache_subdir=dest_dir)
self.assertTrue(os.path.exists(filepath))
self.assertTrue(keras.utils.data_utils.validate_file(filepath,
hashval_sha256))
self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_md5))
os.remove(filepath)
origin = urllib.parse.urljoin(
'file://', urllib.request.pathname2url(os.path.abspath(zip_file_path)))
hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path)
hashval_md5 = keras.utils.data_utils._hash_file(zip_file_path,
algorithm='md5')
path = keras.utils.data_utils.get_file(
'test', origin, md5_hash=hashval_md5,
extract=True, cache_subdir=dest_dir)
path = keras.utils.data_utils.get_file(
'test', origin, file_hash=hashval_sha256,
extract=True, cache_subdir=dest_dir)
self.assertTrue(os.path.exists(path))
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_sha256))
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5))
os.remove(path)
for file_path, extract in [(text_file_path, False), (tar_file_path, True),
(zip_file_path, True)]:
origin = urllib.parse.urljoin(
'file://', urllib.request.pathname2url(os.path.abspath(file_path)))
hashval_sha256 = keras.utils.data_utils._hash_file(file_path)
path = keras.utils.data_utils.get_file(
origin=origin,
file_hash=hashval_sha256,
extract=extract,
cache_subdir=dest_dir)
self.assertTrue(os.path.exists(path))
self.assertTrue(
keras.utils.data_utils.validate_file(path, hashval_sha256))
os.remove(path)
with self.assertRaisesRegexp(ValueError, 'Please specify the "origin".*'):
_ = keras.utils.data_utils.get_file()
def test_get_file_with_tgz_extension(self):
"""Tests get_file from a url, plus extraction and validation."""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, 'test.txt')
tar_file_path = os.path.join(orig_dir, 'test.tar.gz')
with open(text_file_path, 'w') as text_file:
text_file.write('Float like a butterfly, sting like a bee.')
with tarfile.open(tar_file_path, 'w:gz') as tar_file:
tar_file.add(text_file_path)
origin = urllib.parse.urljoin(
'file://', urllib.request.pathname2url(os.path.abspath(tar_file_path)))
path = keras.utils.data_utils.get_file(
'test.txt.tar.gz', origin, untar=True, cache_subdir=dest_dir)
self.assertEndsWith(path, '.txt')
self.assertTrue(os.path.exists(path))
class TestSequence(keras.utils.data_utils.Sequence):
def __init__(self, shape, value=1.):
self.shape = shape
self.inner = value
def __getitem__(self, item):
return np.ones(self.shape, dtype=np.uint32) * item * self.inner
def __len__(self):
return 100
def on_epoch_end(self):
self.inner *= 5.0
class FaultSequence(keras.utils.data_utils.Sequence):
def __getitem__(self, item):
raise IndexError(item, 'item is not present')
def __len__(self):
return 100
@data_utils.threadsafe_generator
def create_generator_from_sequence_threads(ds):
for i in cycle(range(len(ds))):
yield ds[i]
def create_generator_from_sequence_pcs(ds):
for i in cycle(range(len(ds))):
yield ds[i]
class TestEnqueuers(tf.test.TestCase):
def test_generator_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])),
use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertEqual(len(set(acc) - set(range(100))), 0)
enqueuer.stop()
@data_utils.dont_use_multiprocessing_pool
def test_generator_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])),
use_multiprocessing=True)
enqueuer.start(4, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(300):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertNotEqual(acc, list(range(100)))
enqueuer.stop()
def test_generator_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(FaultSequence()),
use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
@data_utils.dont_use_multiprocessing_pool
def test_generator_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(FaultSequence()),
use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
def test_ordered_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
@data_utils.dont_use_multiprocessing_pool
def test_ordered_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
def test_ordered_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
@data_utils.dont_use_multiprocessing_pool
def test_ordered_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
@data_utils.dont_use_multiprocessing_pool
def test_on_epoch_end_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(200):
acc.append(next(gen_output)[0, 0, 0, 0])
# Check that order was keep in GeneratorEnqueuer with processes
self.assertEqual(acc[100:], list([k * 5 for k in range(100)]))
enqueuer.stop()
@data_utils.dont_use_multiprocessing_pool
def test_context_switch(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer2 = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3], value=15), use_multiprocessing=True)
enqueuer.start(3, 10)
enqueuer2.start(3, 10)
gen_output = enqueuer.get()
gen_output2 = enqueuer2.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc[-1], 99)
# One epoch is completed so enqueuer will switch the Sequence
acc = []
self.skipTest('b/145555807 flakily timing out.')
for _ in range(100):
acc.append(next(gen_output2)[0, 0, 0, 0])
self.assertEqual(acc[-1], 99 * 15)
# One epoch has been completed so enqueuer2 will switch
# Be sure that both Sequence were updated
self.assertEqual(next(gen_output)[0, 0, 0, 0], 0)
self.assertEqual(next(gen_output)[0, 0, 0, 0], 5)
self.assertEqual(next(gen_output2)[0, 0, 0, 0], 0)
self.assertEqual(next(gen_output2)[0, 0, 0, 0], 15 * 5)
# Tear down everything
enqueuer.stop()
enqueuer2.stop()
def test_on_epoch_end_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
# Check that order was keep in GeneratorEnqueuer with processes
self.assertEqual(acc, list([k * 5 for k in range(100)]))
enqueuer.stop()
if __name__ == '__main__':
# Bazel sets these environment variables to very long paths.
# Tempfile uses them to create long paths, and in turn multiprocessing
# library tries to create sockets named after paths. Delete whatever bazel
# writes to these to avoid tests failing due to socket addresses being too
# long.
for var in ('TMPDIR', 'TMP', 'TEMP'):
if var in os.environ:
del os.environ[var]
tf.test.main()
| 11,502 | 34.285276 | 80 | py |
keras | keras-master/keras/utils/vis_utils_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Vis utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.applications import efficientnet
from keras.utils import vis_utils
class ModelToDotFormatTest(tf.test.TestCase, parameterized.TestCase):
def test_plot_model_cnn(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
dot_img_file = 'model_1.png'
try:
vis_utils.plot_model(
model, to_file=dot_img_file, show_shapes=True, show_dtype=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def test_plot_model_with_wrapped_layers_and_models(self):
inputs = keras.Input(shape=(None, 3))
lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm')
x = lstm(inputs)
# Add layer inside a Wrapper
bilstm = keras.layers.Bidirectional(
keras.layers.LSTM(16, return_sequences=True, name='bilstm'))
x = bilstm(x)
# Add model inside a Wrapper
submodel = keras.Sequential(
[keras.layers.Dense(32, name='dense', input_shape=(None, 32))]
)
wrapped_dense = keras.layers.TimeDistributed(submodel)
x = wrapped_dense(x)
# Add shared submodel
outputs = submodel(x)
model = keras.Model(inputs, outputs)
dot_img_file = 'model_2.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def test_plot_model_with_add_loss(self):
inputs = keras.Input(shape=(None, 3))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.add_loss(tf.reduce_mean(outputs))
dot_img_file = 'model_3.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
model = keras.Sequential([
keras.Input(shape=(None, 3)), keras.layers.Dense(1)])
model.add_loss(tf.reduce_mean(model.output))
dot_img_file = 'model_4.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
@parameterized.parameters(
{'layer_range': ['block1a_project_conv', 'block1a_activation']},
{'layer_range': ['block1a_activation', 'block1a_project_conv']},
{'layer_range': [r'block*', 'block2a_se_excite']},
{'layer_range': [r'block\da_activation', r'block\da_project_bn']})
def test_dot_layer_range(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
layer_ids_from_model = get_layer_ids_from_model(model, layer_range)
try:
dot = vis_utils.model_to_dot(model, layer_range=layer_range)
dot_edges = dot.get_edges()
layer_ids_from_dot = get_layer_ids_from_dot(dot_edges)
self.assertAllEqual(
sorted(layer_ids_from_model), sorted(layer_ids_from_dot))
except ImportError:
pass
@parameterized.parameters(
{'layer_range': ['block1a_project_conv', 'block1a_activation']},
{'layer_range': ['block1a_activation', 'block1a_project_conv']},
{'layer_range': [r'block*', 'block2a_se_excite']},
{'layer_range': [r'block\da_activation', r'block\da_project_bn']})
def test_plot_layer_range(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
effnet_subplot = 'model_effnet.png'
try:
vis_utils.plot_model(
model, to_file=effnet_subplot, layer_range=layer_range)
self.assertTrue(tf.io.gfile.exists(effnet_subplot))
except ImportError:
pass
finally:
if tf.io.gfile.exists(effnet_subplot):
tf.io.gfile.remove(effnet_subplot)
@parameterized.parameters(
{'layer_range': ['block1a_se_squeeze', 'block2a_project_conv']},
{'layer_range': [r'block\da_se_reshape', r'block*']})
def test_layer_range_assertion_fail(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
try:
with self.assertRaises(AssertionError):
vis_utils.model_to_dot(model, layer_range=layer_range)
with self.assertRaises(AssertionError):
vis_utils.plot_model(model, layer_range=layer_range)
except ImportError:
pass
@parameterized.parameters(
{'layer_range': ['block1a_activation']},
{'layer_range': []},
{'layer_range': ['input', 'block1a_activation', 'block1a_project_conv']},
{'layer_range': [9, 'block1a_activation']},
{'layer_range': [29, 9]},
{'layer_range': ['block8a_se_reshape', 'block*']})
def test_layer_range_value_fail(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
try:
with self.assertRaises(ValueError):
vis_utils.model_to_dot(model, layer_range=layer_range)
with self.assertRaises(ValueError):
vis_utils.plot_model(model, layer_range=layer_range)
except ImportError:
pass
def get_layer_ids_from_model(model, layer_range):
layer_range = vis_utils.get_layer_index_bound_by_layer_name(
model, layer_range)
layer_ids_from_model = []
for i, layer in enumerate(model.layers):
if i >= layer_range[0] and i <= layer_range[1]:
layer_ids_from_model.append(str(id(layer)))
return layer_ids_from_model
def get_layer_ids_from_dot(dot_edges):
layer_ids_from_dot = []
for edge in dot_edges:
for pt in edge.obj_dict['points']:
if pt not in layer_ids_from_dot:
layer_ids_from_dot.append(pt)
return layer_ids_from_dot
if __name__ == '__main__':
tf.test.main()
| 6,916 | 34.471795 | 80 | py |
keras | keras-master/keras/utils/dataset_creator_test.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_creator."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from keras import combinations
from keras.distribute import multi_worker_testing_utils
from keras.engine import data_adapter
from keras.engine import sequential
from keras.layers import core as core_layers
from keras.optimizer_v2 import gradient_descent
from keras.utils import dataset_creator
from tensorflow.python.training.server_lib import ClusterSpec
class DatasetCreatorTest(tf.test.TestCase, parameterized.TestCase):
def test_dataset_creator(self):
with self.assertRaisesRegex(
TypeError, "`dataset_fn` for `DatasetCreator` must be a `callable`."):
dataset_creator.DatasetCreator(2)
dataset_fn = lambda: 3
with self.assertRaisesRegex(
TypeError, "The `callable` provided to `DatasetCreator` must return "
"a Dataset."):
dataset_creator.DatasetCreator(dataset_fn)()
dataset_fn = lambda: tf.data.Dataset.from_tensor_slices([1, 1])
got = dataset_creator.DatasetCreator(dataset_fn)()
self.assertEqual(
next(iter(got)),
next(iter(tf.data.Dataset.from_tensor_slices([1, 1]))))
def _get_dataset_fn(self):
def dataset_fn(input_context):
global_batch_size = 64
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat()
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2)
return dataset
return dataset_fn
@combinations.generate(combinations.combine(use_input_options=[True, False]))
def test_dataset_creator_model_fit_without_strategy(self, use_input_options):
model = sequential.Sequential([core_layers.Dense(10)])
model.compile(gradient_descent.SGD(), loss="mse")
input_options = tf.distribute.InputOptions() if use_input_options else None
history = model.fit(
dataset_creator.DatasetCreator(self._get_dataset_fn(), input_options),
epochs=10,
steps_per_epoch=10,
verbose=0)
self.assertLen(history.history["loss"], 10)
def _get_parameter_server_strategy(self):
cluster_def = multi_worker_testing_utils.create_in_process_cluster(
num_workers=2, num_ps=1, rpc_layer="grpc")
return tf.distribute.experimental.ParameterServerStrategy(
SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc"))
@combinations.generate(combinations.combine(use_input_options=[True, False]))
def test_dataset_creator_usage_in_parameter_server_model_fit(
self, use_input_options):
strategy = self._get_parameter_server_strategy()
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
model.compile(gradient_descent.SGD(), loss="mse")
input_options = tf.distribute.InputOptions() if use_input_options else None
history = model.fit(
dataset_creator.DatasetCreator(self._get_dataset_fn(), input_options),
epochs=10,
steps_per_epoch=10,
verbose=0)
self.assertLen(history.history["loss"], 10)
def test_dataset_creator_input_options(self):
dataset_fn = lambda _: tf.data.Dataset.from_tensor_slices([1, 1])
input_options = tf.distribute.InputOptions(
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2)
x = dataset_creator.DatasetCreator(dataset_fn, input_options=input_options)
with tf.distribute.MultiWorkerMirroredStrategy().scope():
data_handler = data_adapter.get_data_handler(
x,
steps_per_epoch=2,
model=sequential.Sequential([core_layers.Dense(10)]))
# Ensuring the resulting `DistributedDatasetsFromFunction` has the right
# options.
self.assertTrue(data_handler._dataset._options.experimental_fetch_to_device)
self.assertEqual(
data_handler._dataset._options.experimental_per_replica_buffer_size, 2)
def test_dataset_creator_input_options_with_cluster_coordinator(self):
dataset_fn = lambda _: tf.data.Dataset.from_tensor_slices([1, 1])
input_options = tf.distribute.InputOptions(
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2)
x = dataset_creator.DatasetCreator(dataset_fn, input_options=input_options)
strategy = self._get_parameter_server_strategy()
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
model._cluster_coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy)
data_handler = data_adapter.get_data_handler(
x, steps_per_epoch=2, model=model)
iter_rv = iter(data_handler._dataset)._values[0]
iter_rv._rebuild_on(model._cluster_coordinator._cluster.workers[0])
distributed_iterator = iter_rv._get_values()
# Ensuring the resulting `DistributedIterator` has the right options.
self.assertTrue(distributed_iterator._options.experimental_fetch_to_device)
self.assertEqual(
distributed_iterator._options.experimental_per_replica_buffer_size, 2)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 6,042 | 40.965278 | 93 | py |
keras | keras-master/keras/utils/io_utils_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for io_utils."""
import tensorflow.compat.v2 as tf
import builtins
from pathlib import Path
from keras import keras_parameterized
from keras.utils import io_utils
class TestIOUtils(keras_parameterized.TestCase):
def test_ask_to_proceed_with_overwrite(self):
with tf.compat.v1.test.mock.patch.object(builtins, 'input') as mock_log:
mock_log.return_value = 'y'
self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
mock_log.return_value = 'n'
self.assertFalse(
io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
mock_log.side_effect = ['m', 'y']
self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
mock_log.side_effect = ['m', 'n']
self.assertFalse(
io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
def test_path_to_string(self):
class PathLikeDummy:
def __fspath__(self):
return 'dummypath'
dummy = object()
# conversion of PathLike
self.assertEqual(io_utils.path_to_string(Path('path')), 'path')
self.assertEqual(io_utils.path_to_string(PathLikeDummy()), 'dummypath')
# pass-through, works for all versions of python
self.assertEqual(io_utils.path_to_string('path'), 'path')
self.assertIs(io_utils.path_to_string(dummy), dummy)
if __name__ == '__main__':
tf.test.main()
| 2,070 | 31.873016 | 80 | py |
keras | keras-master/keras/utils/tf_inspect.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the inspect module."""
# pylint: disable=g-classes-have-attributes
import tensorflow.compat.v2 as tf
import collections
import functools
import inspect as _inspect
ArgSpec = _inspect.ArgSpec
if hasattr(_inspect, 'FullArgSpec'):
FullArgSpec = _inspect.FullArgSpec # pylint: disable=invalid-name
else:
FullArgSpec = collections.namedtuple('FullArgSpec', [
'args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',
'annotations'
])
def _convert_maybe_argspec_to_fullargspec(argspec):
if isinstance(argspec, FullArgSpec):
return argspec
return FullArgSpec(
args=argspec.args,
varargs=argspec.varargs,
varkw=argspec.keywords,
defaults=argspec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
if hasattr(_inspect, 'getfullargspec'):
_getfullargspec = _inspect.getfullargspec # pylint: disable=invalid-name
def _getargspec(target):
"""A python3 version of getargspec.
Calls `getfullargspec` and assigns args, varargs,
varkw, and defaults to a python 2/3 compatible `ArgSpec`.
The parameter name 'varkw' is changed to 'keywords' to fit the
`ArgSpec` struct.
Args:
target: the target object to inspect.
Returns:
An ArgSpec with args, varargs, keywords, and defaults parameters
from FullArgSpec.
"""
fullargspecs = getfullargspec(target)
argspecs = ArgSpec(
args=fullargspecs.args,
varargs=fullargspecs.varargs,
keywords=fullargspecs.varkw,
defaults=fullargspecs.defaults)
return argspecs
else:
_getargspec = _inspect.getargspec
def _getfullargspec(target):
"""A python2 version of getfullargspec.
Args:
target: the target object to inspect.
Returns:
A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.
"""
return _convert_maybe_argspec_to_fullargspec(getargspec(target))
def currentframe():
"""TFDecorator-aware replacement for inspect.currentframe."""
return _inspect.stack()[1][0]
def getargspec(obj):
"""TFDecorator-aware replacement for `inspect.getargspec`.
Note: `getfullargspec` is recommended as the python 2/3 compatible
replacement for this function.
Args:
obj: A function, partial function, or callable object, possibly decorated.
Returns:
The `ArgSpec` that describes the signature of the outermost decorator that
changes the callable's signature, or the `ArgSpec` that describes
the object if not decorated.
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
TypeError: For objects of unsupported types.
"""
if isinstance(obj, functools.partial):
return _get_argspec_for_partial(obj)
decorators, target = tf.__internal__.decorator.unwrap(obj)
spec = next((d.decorator_argspec
for d in decorators
if d.decorator_argspec is not None), None)
if spec:
return spec
try:
# Python3 will handle most callables here (not partial).
return _getargspec(target)
except TypeError:
pass
if isinstance(target, type):
try:
return _getargspec(target.__init__)
except TypeError:
pass
try:
return _getargspec(target.__new__)
except TypeError:
pass
# The `type(target)` ensures that if a class is received we don't return
# the signature of its __call__ method.
return _getargspec(type(target).__call__)
def _get_argspec_for_partial(obj):
"""Implements `getargspec` for `functools.partial` objects.
Args:
obj: The `functools.partial` object
Returns:
An `inspect.ArgSpec`
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
"""
# When callable is a functools.partial object, we construct its ArgSpec with
# following strategy:
# - If callable partial contains default value for positional arguments (ie.
# object.args), then final ArgSpec doesn't contain those positional arguments.
# - If callable partial contains default value for keyword arguments (ie.
# object.keywords), then we merge them with wrapped target. Default values
# from callable partial takes precedence over those from wrapped target.
#
# However, there is a case where it is impossible to construct a valid
# ArgSpec. Python requires arguments that have no default values must be
# defined before those with default values. ArgSpec structure is only valid
# when this presumption holds true because default values are expressed as a
# tuple of values without keywords and they are always assumed to belong to
# last K arguments where K is number of default values present.
#
# Since functools.partial can give default value to any argument, this
# presumption may no longer hold in some cases. For example:
#
# def func(m, n):
# return 2 * m + n
# partialed = functools.partial(func, m=1)
#
# This example will result in m having a default value but n doesn't. This is
# usually not allowed in Python and can not be expressed in ArgSpec correctly.
#
# Thus, we must detect cases like this by finding first argument with default
# value and ensures all following arguments also have default values. When
# this is not true, a ValueError is raised.
n_prune_args = len(obj.args)
partial_keywords = obj.keywords or {}
args, varargs, keywords, defaults = getargspec(obj.func)
# Pruning first n_prune_args arguments.
args = args[n_prune_args:]
# Partial function may give default value to any argument, therefore length
# of default value list must be len(args) to allow each argument to
# potentially be given a default value.
no_default = object()
all_defaults = [no_default] * len(args)
if defaults:
all_defaults[-len(defaults):] = defaults
# Fill in default values provided by partial function in all_defaults.
for kw, default in partial_keywords.items():
if kw in args:
idx = args.index(kw)
all_defaults[idx] = default
elif not keywords:
raise ValueError('Function does not have **kwargs parameter, but '
'contains an unknown partial keyword.')
# Find first argument with default value set.
first_default = next(
(idx for idx, x in enumerate(all_defaults) if x is not no_default), None)
# If no default values are found, return ArgSpec with defaults=None.
if first_default is None:
return ArgSpec(args, varargs, keywords, None)
# Checks if all arguments have default value set after first one.
invalid_default_values = [
args[i] for i, j in enumerate(all_defaults)
if j is no_default and i > first_default
]
if invalid_default_values:
raise ValueError(f'Some arguments {invalid_default_values} do not have '
'default value, but they are positioned after those with '
'default values. This can not be expressed with ArgSpec.')
return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
def getfullargspec(obj):
"""TFDecorator-aware replacement for `inspect.getfullargspec`.
This wrapper emulates `inspect.getfullargspec` in[^)]* Python2.
Args:
obj: A callable, possibly decorated.
Returns:
The `FullArgSpec` that describes the signature of
the outermost decorator that changes the callable's signature. If the
callable is not decorated, `inspect.getfullargspec()` will be called
directly on the callable.
"""
decorators, target = tf.__internal__.decorator.unwrap(obj)
for d in decorators:
if d.decorator_argspec is not None:
return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)
return _getfullargspec(target)
def getcallargs(*func_and_positional, **named):
"""TFDecorator-aware replacement for inspect.getcallargs.
Args:
*func_and_positional: A callable, possibly decorated, followed by any
positional arguments that would be passed to `func`.
**named: The named argument dictionary that would be passed to `func`.
Returns:
A dictionary mapping `func`'s named arguments to the values they would
receive if `func(*positional, **named)` were called.
`getcallargs` will use the argspec from the outermost decorator that provides
it. If no attached decorators modify argspec, the final unwrapped target's
argspec will be used.
"""
func = func_and_positional[0]
positional = func_and_positional[1:]
argspec = getfullargspec(func)
call_args = named.copy()
this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)
if ismethod(func) and this:
positional = (this,) + positional
remaining_positionals = [arg for arg in argspec.args if arg not in call_args]
call_args.update(dict(zip(remaining_positionals, positional)))
default_count = 0 if not argspec.defaults else len(argspec.defaults)
if default_count:
for arg, value in zip(argspec.args[-default_count:], argspec.defaults):
if arg not in call_args:
call_args[arg] = value
if argspec.kwonlydefaults is not None:
for k, v in argspec.kwonlydefaults.items():
if k not in call_args:
call_args[k] = v
return call_args
def getframeinfo(*args, **kwargs):
return _inspect.getframeinfo(*args, **kwargs)
def getdoc(obj):
"""TFDecorator-aware replacement for inspect.getdoc.
Args:
obj: An object, possibly decorated.
Returns:
The docstring associated with the object.
The outermost-decorated object is intended to have the most complete
documentation, so the decorated parameter is not unwrapped.
"""
return _inspect.getdoc(obj)
def getfile(obj):
"""TFDecorator-aware replacement for inspect.getfile."""
unwrapped_object = tf.__internal__.decorator.unwrap(obj)[1]
# Work around for the case when object is a stack frame
# and only .pyc files are used. In this case, getfile
# might return incorrect path. So, we get the path from f_globals
# instead.
if (hasattr(unwrapped_object, 'f_globals') and
'__file__' in unwrapped_object.f_globals):
return unwrapped_object.f_globals['__file__']
return _inspect.getfile(unwrapped_object)
def getmembers(obj, predicate=None):
"""TFDecorator-aware replacement for inspect.getmembers."""
return _inspect.getmembers(obj, predicate)
def getmodule(obj):
"""TFDecorator-aware replacement for inspect.getmodule."""
return _inspect.getmodule(obj)
def getmro(cls):
"""TFDecorator-aware replacement for inspect.getmro."""
return _inspect.getmro(cls)
def getsource(obj):
"""TFDecorator-aware replacement for inspect.getsource."""
return _inspect.getsource(tf.__internal__.decorator.unwrap(obj)[1])
def getsourcefile(obj):
"""TFDecorator-aware replacement for inspect.getsourcefile."""
return _inspect.getsourcefile(tf.__internal__.decorator.unwrap(obj)[1])
def getsourcelines(obj):
"""TFDecorator-aware replacement for inspect.getsourcelines."""
return _inspect.getsourcelines(tf.__internal__.decorator.unwrap(obj)[1])
def isbuiltin(obj):
"""TFDecorator-aware replacement for inspect.isbuiltin."""
return _inspect.isbuiltin(tf.__internal__.decorator.unwrap(obj)[1])
def isclass(obj):
"""TFDecorator-aware replacement for inspect.isclass."""
return _inspect.isclass(tf.__internal__.decorator.unwrap(obj)[1])
def isfunction(obj):
"""TFDecorator-aware replacement for inspect.isfunction."""
return _inspect.isfunction(tf.__internal__.decorator.unwrap(obj)[1])
def isframe(obj):
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.isframe(tf.__internal__.decorator.unwrap(obj)[1])
def isgenerator(obj):
"""TFDecorator-aware replacement for inspect.isgenerator."""
return _inspect.isgenerator(tf.__internal__.decorator.unwrap(obj)[1])
def isgeneratorfunction(obj):
"""TFDecorator-aware replacement for inspect.isgeneratorfunction."""
return _inspect.isgeneratorfunction(tf.__internal__.decorator.unwrap(obj)[1])
def ismethod(obj):
"""TFDecorator-aware replacement for inspect.ismethod."""
return _inspect.ismethod(tf.__internal__.decorator.unwrap(obj)[1])
def ismodule(obj):
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.ismodule(tf.__internal__.decorator.unwrap(obj)[1])
def isroutine(obj):
"""TFDecorator-aware replacement for inspect.isroutine."""
return _inspect.isroutine(tf.__internal__.decorator.unwrap(obj)[1])
def stack(context=1):
"""TFDecorator-aware replacement for inspect.stack."""
return _inspect.stack(context)[1:]
| 13,235 | 31.843672 | 80 | py |
keras | keras-master/keras/utils/kpl_test_utils.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test related utilities for KPL + tf.distribute."""
import tensorflow.compat.v2 as tf
import random
import tempfile
import keras
from keras.layers.preprocessing import string_lookup
class DistributeKplTestUtils(tf.test.TestCase):
"""Utils for test of tf.distribute + KPL."""
FEATURE_VOCAB = [
"avenger", "ironman", "batman", "hulk", "spiderman", "kingkong",
"wonder_woman"
]
LABEL_VOCAB = ["yes", "no"]
def define_kpls_for_training(self, use_adapt):
"""Function that defines KPL used for unit tests of tf.distribute.
Args:
use_adapt: if adapt will be called. False means there will be precomputed
statistics.
Returns:
feature_mapper: a simple keras model with one keras StringLookup layer
which maps feature to index.
label_mapper: similar to feature_mapper, but maps label to index.
"""
if use_adapt:
feature_lookup_layer = (
string_lookup.StringLookup(
num_oov_indices=1))
feature_lookup_layer.adapt(self.FEATURE_VOCAB)
label_lookup_layer = (
string_lookup.StringLookup(
num_oov_indices=0, mask_token=None))
label_lookup_layer.adapt(self.LABEL_VOCAB)
else:
feature_lookup_layer = (
string_lookup.StringLookup(
vocabulary=self.FEATURE_VOCAB, num_oov_indices=1))
label_lookup_layer = (
string_lookup.StringLookup(
vocabulary=self.LABEL_VOCAB, num_oov_indices=0, mask_token=None))
raw_feature_input = keras.layers.Input(
shape=(3,), dtype=tf.string, name="feature", ragged=True)
feature_id_input = feature_lookup_layer(raw_feature_input)
feature_mapper = keras.Model({"features": raw_feature_input},
feature_id_input)
raw_label_input = keras.layers.Input(
shape=(1,), dtype=tf.string, name="label")
label_id_input = label_lookup_layer(raw_label_input)
label_mapper = keras.Model({"label": raw_label_input}, label_id_input)
return feature_mapper, label_mapper
def dataset_fn(self, feature_mapper, label_mapper):
"""Function that generates dataset for test of tf.distribute + KPL.
Args:
feature_mapper: a simple keras model with one keras StringLookup layer
which maps feature to index.
label_mapper: similar to feature_mapper, but maps label to index.
Returns:
Generated dataset for test of tf.distribute + KPL.
"""
def feature_and_label_gen():
# Generator of dataset.
while True:
features = random.sample(self.FEATURE_VOCAB, 3)
label = ["yes"] if self.FEATURE_VOCAB[0] in features else ["no"]
yield {"features": features, "label": label}
raw_dataset = tf.data.Dataset.from_generator(
feature_and_label_gen,
output_signature={
"features": tf.TensorSpec([3], tf.string),
"label": tf.TensorSpec([1], tf.string)
}).shuffle(100).batch(32)
train_dataset = raw_dataset.map(lambda x: ( # pylint: disable=g-long-lambda
{
"features": feature_mapper(x["features"])
}, label_mapper(x["label"])))
return train_dataset
def define_model(self):
"""A simple model for test of tf.distribute + KPL."""
# Create the model. The input needs to be compatible with KPLs.
model_input = keras.layers.Input(
shape=(3,), dtype=tf.int64, name="model_input")
# input_dim includes a mask token and an oov token.
emb_output = keras.layers.Embedding(
input_dim=len(self.FEATURE_VOCAB) + 2, output_dim=20)(
model_input)
emb_output = tf.reduce_mean(emb_output, axis=1)
dense_output = keras.layers.Dense(
units=1, activation="sigmoid")(
emb_output)
model = keras.Model({"features": model_input}, dense_output)
return model
def define_reverse_lookup_layer(self):
"""Create string reverse lookup layer for serving."""
label_inverse_lookup_layer = string_lookup.StringLookup(
num_oov_indices=0,
mask_token=None,
vocabulary=self.LABEL_VOCAB,
invert=True)
return label_inverse_lookup_layer
def create_serving_signature(self, model, feature_mapper,
label_inverse_lookup_layer):
"""Create serving signature for the given model."""
@tf.function
def serve_fn(raw_features):
raw_features = tf.expand_dims(raw_features, axis=0)
transformed_features = model.feature_mapper(raw_features)
outputs = model(transformed_features)
outputs = tf.squeeze(outputs, axis=0)
outputs = tf.cast(tf.greater(outputs, 0.5), tf.int64)
decoded_outputs = model.label_inverse_lookup_layer(outputs)
return tf.squeeze(decoded_outputs, axis=0)
model.feature_mapper = feature_mapper
model.label_inverse_lookup_layer = label_inverse_lookup_layer
# serving does NOT have batch dimension
return serve_fn.get_concrete_function(
tf.TensorSpec(
shape=(3), dtype=tf.string, name="example"))
def test_save_load_serving_model(self, model, feature_mapper,
label_inverse_lookup_layer):
"""Test save/load/serving model."""
serving_fn = self.create_serving_signature(model, feature_mapper,
label_inverse_lookup_layer)
saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
model.save(saved_model_dir, save_format="tf",
signatures={"serving_default": serving_fn})
# Test the saved_model.
loaded_serving_fn = keras.saving.save.load_model(
saved_model_dir).signatures["serving_default"]
# check the result w/ and w/o avenger.
prediction0 = loaded_serving_fn(
tf.constant(["avenger", "ironman", "avenger"]))["output_0"]
self.assertIn(prediction0.numpy().decode("UTF-8"), ("yes", "no"))
prediction1 = loaded_serving_fn(
tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"]
self.assertIn(prediction1.numpy().decode("UTF-8"), ("yes", "no"))
| 6,764 | 36.375691 | 80 | py |
keras | keras-master/keras/utils/layer_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to layer/model functionality."""
import tensorflow.compat.v2 as tf
import functools
import weakref
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.get_source_inputs')
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Args:
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
Returns:
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if node.is_input:
# Reached an Input layer, stop recursion.
return tf.nest.flatten(node.input_tensors)
else:
source_tensors = []
for layer, node_index, _, tensor in node.iterate_inbound():
previous_sources = get_source_inputs(tensor, layer, node_index)
# Avoid input redundancy.
for x in previous_sources:
if all(x is not t for t in source_tensors):
source_tensors.append(x)
return source_tensors
def validate_string_arg(input_data,
allowable_strings,
layer_name,
arg_name,
allow_none=False,
allow_callables=False):
"""Validates the correctness of a string-based arg."""
if allow_none and input_data is None:
return
elif allow_callables and callable(input_data):
return
elif isinstance(input_data, str) and input_data in allowable_strings:
return
else:
allowed_args = '`None`, ' if allow_none else ''
allowed_args += 'a `Callable`, ' if allow_callables else ''
allowed_args += 'or one of the following values: %s' % (allowable_strings,)
raise ValueError(
f'The `{arg_name}` argument of layer {layer_name} received an invalid '
f'value `{input_data}`. Allowed values are: {allowed_args}.')
def count_params(weights):
"""Count the total number of scalars composing the weights.
Args:
weights: An iterable containing the weights on which to compute params
Returns:
The total number of scalars composing the weights
"""
unique_weights = {id(w): w for w in weights}.values()
# Ignore TrackableWeightHandlers, which will not have a shape defined.
unique_weights = [w for w in unique_weights if hasattr(w, 'shape')]
weight_shapes = [w.shape.as_list() for w in unique_weights]
standardized_weight_shapes = [
[0 if w_i is None else w_i for w_i in w] for w in weight_shapes
]
return int(sum(np.prod(p) for p in standardized_weight_shapes))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Args:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and
len(tf.nest.flatten(v[0].keras_inputs)) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
left_to_print = [str(x) for x in fields]
while any(left_to_print):
line = ''
for col in range(len(left_to_print)):
if col > 0:
start_pos = positions[col - 1]
else:
start_pos = 0
end_pos = positions[col]
# Leave room for 2 spaces to delineate columns
# we don't need any if we are printing the last column
space = 2 if col != len(positions) - 1 else 0
cutoff = end_pos - start_pos - space
fit_into_line = left_to_print[col][:cutoff]
# For nicer formatting we line-break on seeing end of
# tuple/dict etc.
line_break_conditions = ('),', '},', '],', "',")
candidate_cutoffs = [
fit_into_line.find(x) + len(x)
for x in line_break_conditions
if fit_into_line.find(x) >= 0
]
if candidate_cutoffs:
cutoff = min(candidate_cutoffs)
fit_into_line = fit_into_line[:cutoff]
line += fit_into_line
line += ' ' * space if space else ''
left_to_print[col] = left_to_print[col][cutoff:]
# Pad out to the next position
line += ' ' * (positions[col] - len(line))
print_fn(line)
print_fn('Model: "{}"'.format(model.name))
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
"""Prints a summary for a single layer.
Args:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
except RuntimeError: # output_shape unknown in Eager mode.
output_shape = '?'
name = layer.name
cls_name = layer.__class__.__name__
if not layer.built and not getattr(layer, '_is_graph_network', False):
# If a subclassed model has a layer that is not called in Model.call, the
# layer will not be built and we cannot call layer.count_params().
params = '0 (unused)'
else:
params = layer.count_params()
fields = [name + ' (' + cls_name + ')', output_shape, params]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer (including topological connections).
Args:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():
connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index,
tensor_index))
name = layer.name
cls_name = layer.__class__.__name__
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), connections
]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Args:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
def is_builtin_layer(layer):
if not getattr(layer, '_keras_api_names', None):
return False
# Subclasses of `Layer` that are not exported inherit the export name
# of the base layer class.
return (layer._keras_api_names != ('keras.layers.Layer',) and
layer._keras_api_names_v1 != ('keras.layers.Layer',))
def cached_per_instance(f):
"""Lightweight decorator for caching lazily constructed properties.
When to use:
This decorator provides simple caching with minimal overhead. It is designed
for properties which are expensive to compute and static over the life of a
class instance, and provides no mechanism for cache invalidation. Thus it is
best suited for lazily exposing derived properties of other static data.
For classes with custom getattr / setattr behavior (such as trackable
objects), storing cache results as object attributes is not performant.
Instead, a specialized cache can significantly reduce property lookup
overhead. (While still allowing the decorated property to be lazily computed.)
Consider the following class:
```
class MyClass:
def __setattr__(self, key, value):
# Some expensive class specific code
# ...
# ...
super(MyClass, self).__setattr__(key, value)
@property
def thing(self):
# `thing` is expensive to compute (and may not even be requested), so we
# want to lazily compute it and then cache it.
output = getattr(self, '_thing', None)
if output is None:
self._thing = output = compute_thing(self)
return output
```
It's also worth noting that ANY overriding of __setattr__, even something as
simple as:
```
def __setattr__(self, key, value):
super(MyClass, self).__setattr__(key, value)
```
Slows down attribute assignment by nearly 10x.
By contrast, replacing the definition of `thing` with the following sidesteps
the expensive __setattr__ altogether:
'''
@property
@tracking.cached_per_instance
def thing(self):
# `thing` is expensive to compute (and may not even be requested), so we
# want to lazily compute it and then cache it.
return compute_thing(self)
'''
Performance:
The overhead for this decorator is ~0.4 us / call. A much lower overhead
implementation (~0.085 us / call) can be achieved by using a custom dict type:
```
def dict_based_cache(f):
class Cache(dict):
__slots__ = ()
def __missing__(self, key):
self[key] = output = f(key)
return output
return property(Cache().__getitem__)
```
However, that implementation holds class instances as keys, and as a result
blocks garbage collection. (And modifying it to use weakref's as keys raises
the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary
implementation below turns out to be more prudent.
Args:
f: The function to cache.
Returns:
f decorated with simple caching behavior.
"""
cache = weakref.WeakKeyDictionary()
@functools.wraps(f)
def wrapped(item):
output = cache.get(item)
if output is None:
cache[item] = output = f(item)
return output
wrapped.cache = cache
return wrapped
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers and uniquify."""
# TODO(b/130381733): Make this an attribute in base_layer.Layer.
existing = set()
to_visit = layer_list[::-1]
while to_visit:
obj = to_visit.pop()
if id(obj) in existing:
continue
existing.add(id(obj))
if hasattr(obj, '_is_layer') and not isinstance(obj, type):
yield obj
else:
sub_layers = getattr(obj, 'layers', None) or []
# Trackable data structures will not show up in ".layers" lists, but
# the layers they contain will.
to_visit.extend(sub_layers[::-1])
| 15,591 | 33.343612 | 80 | py |
keras | keras-master/keras/utils/np_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for np_utils."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.utils import np_utils
class TestNPUtils(tf.test.TestCase):
def test_to_categorical(self):
num_classes = 5
shapes = [(1,), (3,), (4, 3), (5, 4, 3), (3, 1), (3, 2, 1)]
expected_shapes = [(1, num_classes), (3, num_classes), (4, 3, num_classes),
(5, 4, 3, num_classes), (3, num_classes),
(3, 2, num_classes)]
labels = [np.random.randint(0, num_classes, shape) for shape in shapes]
one_hots = [
np_utils.to_categorical(label, num_classes) for label in labels]
for label, one_hot, expected_shape in zip(labels,
one_hots,
expected_shapes):
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(np.all(
np.argmax(one_hot, -1).reshape(label.shape) == label))
if __name__ == '__main__':
tf.test.main()
| 1,856 | 36.897959 | 80 | py |
keras | keras-master/keras/utils/version_utils_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras utilities to split v1 and v2 classes."""
import tensorflow.compat.v2 as tf
import abc
import numpy as np
import keras
from keras import keras_parameterized
from keras.engine import base_layer
from keras.engine import base_layer_v1
from keras.engine import training
from keras.engine import training_v1
@keras_parameterized.run_all_keras_modes
class SplitUtilsTest(keras_parameterized.TestCase):
def _check_model_class(self, model_class):
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertEqual(model_class, training.Model)
else:
self.assertEqual(model_class, training_v1.Model)
def _check_layer_class(self, layer):
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertIsInstance(layer, base_layer.Layer)
self.assertNotIsInstance(layer, base_layer_v1.Layer)
else:
self.assertIsInstance(layer, base_layer_v1.Layer)
def test_functional_model(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
self._check_model_class(model.__class__.__bases__[0])
self._check_layer_class(model)
def test_subclass_model_with_functional_init(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model_with_functional_init_interleaved_v1_functional(self):
with tf.Graph().as_default():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
_ = keras.Model(inputs, outputs)
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_sequential_model(self):
model = keras.Sequential([keras.layers.Dense(1)])
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model(self):
class MyModel(keras.Model):
def call(self, x):
return 2 * x
model = MyModel()
model_class = model.__class__.__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_layer(self):
class IdentityLayer(base_layer.Layer):
"""A layer that returns it's input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
layer = IdentityLayer()
self._check_layer_class(layer)
def test_multiple_subclass_model(self):
class Model1(keras.Model):
pass
class Model2(Model1):
def call(self, x):
return 2 * x
model = Model2()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_user_provided_metaclass(self):
class AbstractModel(keras.Model, metaclass=abc.ABCMeta):
@abc.abstractmethod
def call(self, inputs):
"""Calls the model."""
class MyModel(AbstractModel):
def call(self, inputs):
return 2 * inputs
with self.assertRaisesRegex(TypeError, 'instantiate abstract class'):
AbstractModel() # pylint: disable=abstract-class-instantiated
model = MyModel()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_multiple_inheritance(self):
class Return2:
def return_2(self):
return 2
class MyModel(keras.Model, Return2):
def call(self, x):
return self.return_2() * x
model = MyModel()
bases = model.__class__.__bases__
self._check_model_class(bases[0])
self.assertEqual(bases[1], Return2)
self.assertEqual(model.return_2(), 2)
self._check_layer_class(model)
def test_fit_error(self):
if not tf.compat.v1.executing_eagerly_outside_functions():
# Error only appears on the v2 class.
return
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
with tf.compat.v1.get_default_graph().as_default():
with self.assertRaisesRegex(
ValueError, 'instance was constructed with eager mode enabled'):
model.fit(x, y, batch_size=2)
if __name__ == '__main__':
tf.test.main()
| 5,359 | 27.972973 | 80 | py |
keras | keras-master/keras/utils/data_utils.py | # Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
import tensorflow.compat.v2 as tf
from abc import abstractmethod
from contextlib import closing
import functools
import hashlib
import multiprocessing.dummy
import os
import pathlib
import queue
import random
import shutil
import tarfile
import threading
import time
import typing
import urllib
import weakref
import zipfile
from six.moves.urllib.parse import urlsplit
import numpy as np
from six.moves.urllib.request import urlopen
from keras.utils import tf_inspect
from keras.utils.generic_utils import Progbar
from keras.utils.io_utils import path_to_string
from tensorflow.python.util.tf_export import keras_export
# Required to support google internal urlretrieve
if True: # This gets transformed to `if sys.version_info[0] == 2:` in OSS. # pylint: disable=using-constant-test
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from urllib.request import urlretrieve # pylint: disable=g-importing-member
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (tf.Tensor, np.ndarray) + builtin_iterators):
return False
return (tf_inspect.isgenerator(x) or
isinstance(x, Sequence) or
isinstance(x, typing.Iterator))
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location. If `None`, the
name of the file at `origin` will be used.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if origin is None:
raise ValueError('Please specify the "origin" argument (URL of the file '
'to download).')
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = path_to_string(fname)
if not fname:
fname = os.path.basename(urlsplit(origin).path)
if not fname:
raise ValueError(
f"Can't parse the file name from the origin provided: '{origin}'."
"Please specify the `fname` as the input param.")
if untar:
if fname.endswith('.tar.gz'):
fname = pathlib.Path(fname)
# The 2 `.with_suffix()` are because of `.tar.gz` as pathlib
# considers it as 2 suffixes.
fname = fname.with_suffix('').with_suffix('')
fname = str(fname)
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker:
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == 'sha256':
return hashlib.sha256()
if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if isinstance(algorithm, str):
hasher = _resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
hasher = _resolve_hasher(algorithm, file_hash)
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter:
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence:
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer:
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e: # pylint: disable=broad-except
self.stop()
raise e
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
raise e
| 28,193 | 29.813115 | 114 | py |
keras | keras-master/keras/utils/generic_utils_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras generic Python utils."""
import tensorflow.compat.v2 as tf
from functools import partial
import numpy as np
import keras
from keras.utils import generic_utils
class SnakeCaseTest(tf.test.TestCase):
def test_snake_case(self):
self.assertEqual(generic_utils.to_snake_case('SomeClass'), 'some_class')
self.assertEqual(generic_utils.to_snake_case('Conv2D'), 'conv2d')
self.assertEqual(generic_utils.to_snake_case('ConvLSTM2D'), 'conv_lstm2d')
class HasArgTest(tf.test.TestCase):
def test_has_arg(self):
def f_x(x):
return x
def f_x_args(x, *args):
_ = args
return x
def f_x_kwargs(x, **kwargs):
_ = kwargs
return x
def f(a, b, c):
return a + b + c
partial_f = partial(f, b=1)
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_args, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_args, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=True))
self.assertTrue(
keras.utils.generic_utils.has_arg(partial_f, 'c', accept_all=True))
class TestCustomObjectScope(tf.test.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass:
pass
with keras.utils.generic_utils.custom_object_scope(
{'CustomClass': CustomClass, 'custom_fn': custom_fn}):
act = keras.activations.get('custom_fn')
self.assertEqual(act, custom_fn)
cl = keras.regularizers.get('CustomClass')
self.assertEqual(cl.__class__, CustomClass)
class SerializeKerasObjectTest(tf.test.TestCase):
def test_serialize_none(self):
serialized = keras.utils.generic_utils.serialize_keras_object(None)
self.assertEqual(serialized, None)
deserialized = keras.utils.generic_utils.deserialize_keras_object(
serialized)
self.assertEqual(deserialized, None)
def test_serialize_custom_class_with_default_name(self):
@keras.utils.generic_utils.register_keras_serializable()
class TestClass:
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
serialized_name = 'Custom>TestClass'
inst = TestClass(value=10)
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
# Make sure registering a new class with same name will fail.
with self.assertRaisesRegex(ValueError, '.*has already been registered.*'):
@keras.utils.generic_utils.register_keras_serializable() # pylint: disable=function-redefined
class TestClass: # pylint: disable=function-redefined
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
def test_serialize_custom_class_with_custom_name(self):
@keras.utils.generic_utils.register_keras_serializable(
'TestPackage', 'CustomName')
class OtherTestClass:
def __init__(self, val):
self._val = val
def get_config(self):
return {'val': self._val}
serialized_name = 'TestPackage>CustomName'
inst = OtherTestClass(val=5)
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
fn_class_name = keras.utils.generic_utils.get_registered_name(
OtherTestClass)
self.assertEqual(fn_class_name, class_name)
cls = keras.utils.generic_utils.get_registered_object(fn_class_name)
self.assertEqual(OtherTestClass, cls)
config = keras.utils.generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@keras.utils.generic_utils.register_keras_serializable()
def my_fn():
return 42
serialized_name = 'Custom>my_fn'
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
fn_class_name = keras.utils.generic_utils.get_registered_name(my_fn)
self.assertEqual(fn_class_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(my_fn)
self.assertEqual(class_name, config)
fn = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertEqual(42, fn())
fn_2 = keras.utils.generic_utils.get_registered_object(fn_class_name)
self.assertEqual(42, fn_2())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError, 'Cannot register a class that does '
'not have a get_config.*'):
@keras.utils.generic_utils.register_keras_serializable( # pylint: disable=unused-variable
'TestPackage', 'TestClass')
class TestClass:
def __init__(self, value):
self._value = value
def test_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableInt(3),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config, custom_objects={'SerializableInt': SerializableInt})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L2)
self.assertEqual(new_layer.units.__class__, SerializableInt)
self.assertEqual(new_layer.units, 3)
def test_nested_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
class SerializableNestedInt(int):
"""A serializable object containing another serializable object."""
def __new__(cls, value, int_obj):
obj = int.__new__(cls, value)
obj.int_obj = int_obj
return obj
def get_config(self):
return {'value': int(self), 'int_obj': self.int_obj}
@classmethod
def from_config(cls, config):
return cls(**config)
nested_int = SerializableInt(4)
layer = keras.layers.Dense(
SerializableNestedInt(3, nested_int),
name='SerializableNestedInt',
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'SerializableInt': SerializableInt,
'SerializableNestedInt': SerializableNestedInt
})
# Make sure the string field doesn't get convert to custom object, even
# they have same value.
self.assertEqual(new_layer.name, 'SerializableNestedInt')
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L2)
self.assertEqual(new_layer.units.__class__, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertEqual(new_layer.units.int_obj.__class__, SerializableInt)
self.assertEqual(new_layer.units.int_obj, 4)
def test_nested_serializable_fn(self):
def serializable_fn(x):
"""A serializable function to pass out of a test layer's config."""
return x
class SerializableNestedInt(int):
"""A serializable object containing a serializable function."""
def __new__(cls, value, fn):
obj = int.__new__(cls, value)
obj.fn = fn
return obj
def get_config(self):
return {'value': int(self), 'fn': self.fn}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableNestedInt(3, serializable_fn),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'serializable_fn': serializable_fn,
'SerializableNestedInt': SerializableNestedInt
})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertIsInstance(new_layer.bias_regularizer, keras.regularizers.L2)
self.assertIsInstance(new_layer.units, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertIs(new_layer.units.fn, serializable_fn)
def test_serialize_type_object_initializer(self):
layer = keras.layers.Dense(
1,
kernel_initializer=keras.initializers.ones,
bias_initializer=keras.initializers.zeros)
config = keras.layers.serialize(layer)
self.assertEqual(config['config']['bias_initializer']['class_name'],
'Zeros')
self.assertEqual(config['config']['kernel_initializer']['class_name'],
'Ones')
def test_serializable_with_old_config(self):
# model config generated by tf-1.2.1
old_model_config = {
'class_name':
'Sequential',
'config': [{
'class_name': 'Dense',
'config': {
'name': 'dense_1',
'trainable': True,
'batch_input_shape': [None, 784],
'dtype': 'float32',
'units': 32,
'activation': 'linear',
'use_bias': True,
'kernel_initializer': {
'class_name': 'Ones',
'config': {
'dtype': 'float32'
}
},
'bias_initializer': {
'class_name': 'Zeros',
'config': {
'dtype': 'float32'
}
},
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None
}
}]
}
old_model = keras.utils.generic_utils.deserialize_keras_object(
old_model_config, module_objects={'Sequential': keras.Sequential})
new_model = keras.Sequential([
keras.layers.Dense(32, input_dim=784, kernel_initializer='Ones'),
])
input_data = np.random.normal(2, 1, (5, 784))
output = old_model.predict(input_data)
expected_output = new_model.predict(input_data)
self.assertAllEqual(output, expected_output)
def test_deserialize_unknown_object(self):
class CustomLayer(keras.layers.Layer):
pass
layer = CustomLayer()
config = keras.utils.generic_utils.serialize_keras_object(layer)
with self.assertRaisesRegexp(ValueError,
'passed to the `custom_objects` arg'):
keras.utils.generic_utils.deserialize_keras_object(config)
restored = keras.utils.generic_utils.deserialize_keras_object(
config, custom_objects={'CustomLayer': CustomLayer})
self.assertIsInstance(restored, CustomLayer)
class SliceArraysTest(tf.test.TestCase):
def test_slice_arrays(self):
input_a = list([1, 2, 3])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, start=0),
[None, None, None])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, stop=3),
[None, None, None])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, start=0, stop=1),
[None, None, None])
# object() alone isn't compatible with WeakKeyDictionary, which we use to
# track shared configs.
class MaybeSharedObject:
pass
class SharedObjectScopeTest(tf.test.TestCase):
def test_shared_object_saving_scope_single_object_doesnt_export_id(self):
with generic_utils.SharedObjectSavingScope() as scope:
single_object = MaybeSharedObject()
self.assertIsNone(scope.get_config(single_object))
single_object_config = scope.create_config({}, single_object)
self.assertIsNotNone(single_object_config)
self.assertNotIn(generic_utils.SHARED_OBJECT_KEY,
single_object_config)
def test_shared_object_saving_scope_shared_object_exports_id(self):
with generic_utils.SharedObjectSavingScope() as scope:
shared_object = MaybeSharedObject()
self.assertIsNone(scope.get_config(shared_object))
scope.create_config({}, shared_object)
first_object_config = scope.get_config(shared_object)
second_object_config = scope.get_config(shared_object)
self.assertIn(generic_utils.SHARED_OBJECT_KEY,
first_object_config)
self.assertIn(generic_utils.SHARED_OBJECT_KEY,
second_object_config)
self.assertIs(first_object_config, second_object_config)
def test_shared_object_loading_scope_noop(self):
# Test that, without a context manager scope, adding configs will do
# nothing.
obj_id = 1
obj = MaybeSharedObject()
generic_utils._shared_object_loading_scope().set(obj_id, obj)
self.assertIsNone(generic_utils._shared_object_loading_scope().get(obj_id))
def test_shared_object_loading_scope_returns_shared_obj(self):
obj_id = 1
obj = MaybeSharedObject()
with generic_utils.SharedObjectLoadingScope() as scope:
scope.set(obj_id, obj)
self.assertIs(scope.get(obj_id), obj)
def test_nested_shared_object_saving_scopes(self):
my_obj = MaybeSharedObject()
with generic_utils.SharedObjectSavingScope() as scope_1:
scope_1.create_config({}, my_obj)
with generic_utils.SharedObjectSavingScope() as scope_2:
# Nesting saving scopes should return the original scope and should
# not clear any objects we're tracking.
self.assertIs(scope_1, scope_2)
self.assertIsNotNone(scope_2.get_config(my_obj))
self.assertIsNotNone(scope_1.get_config(my_obj))
self.assertIsNone(generic_utils._shared_object_saving_scope())
if __name__ == '__main__':
tf.test.main()
| 16,148 | 33.803879 | 100 | py |
keras | keras-master/keras/utils/metrics_utils.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utils related to keras metrics."""
import tensorflow.compat.v2 as tf
import functools
import weakref
from enum import Enum
import numpy as np
from keras import backend
from keras.utils import losses_utils
from keras.utils import tf_utils
from keras.utils.generic_utils import to_list
NEG_INF = -1e10
class Reduction(Enum):
"""Types of metrics reduction.
Contains the following values:
* `SUM`: Scalar sum of weighted values.
* `SUM_OVER_BATCH_SIZE`: Scalar sum of weighted values divided by
number of elements.
* `WEIGHTED_MEAN`: Scalar sum of weighted values divided by sum of weights.
"""
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
WEIGHTED_MEAN = 'weighted_mean'
def update_state_wrapper(update_state_fn):
"""Decorator to wrap metric `update_state()` with `add_update()`.
Args:
update_state_fn: function that accumulates metric statistics.
Returns:
Decorated function that wraps `update_state_fn()` with `add_update()`.
"""
def decorated(metric_obj, *args, **kwargs):
"""Decorated function with `add_update()`."""
strategy = tf.distribute.get_strategy()
for weight in metric_obj.weights:
if (backend.is_tpu_strategy(strategy) and
not strategy.extended.variable_created_in_scope(weight)
and not tf.distribute.in_cross_replica_context()):
raise ValueError(
'Trying to run metric.update_state in replica context when '
'the metric was not created in TPUStrategy scope. '
'Make sure the keras Metric is created in TPUstrategy scope. ')
with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
update_op = update_state_fn(*args, **kwargs)
if update_op is not None: # update_op will be None in eager execution.
metric_obj.add_update(update_op)
return update_op
return tf.__internal__.decorator.make_decorator(update_state_fn, decorated)
def result_wrapper(result_fn):
"""Decorator to wrap metric `result()` function in `merge_call()`.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
If metric state variables are distributed across replicas/devices and
`result()` is requested from the context of one device - This function wraps
`result()` in a distribution strategy `merge_call()`. With this,
the metric state variables will be aggregated across devices.
Args:
result_fn: function that computes the metric result.
Returns:
Decorated function that wraps `result_fn()` in distribution strategy
`merge_call()`.
"""
def decorated(metric_obj, *args):
"""Decorated function with merge_call."""
has_strategy = tf.distribute.has_strategy()
replica_context = tf.distribute.get_replica_context()
# The purpose of using `merge_call` to call `result()` is to trigger cross
# replica aggregation of metric state variables (SyncOnReadVariable). After
# we introduced `variable_sync_on_read_context`, in principle there is no
# need to use `merge_call` here. However the branch still exists because:
#
# 1. Keras V1 training code sometimes assumes `result_t` is the same tensor
# across replicas (achieved by `merge_call`). With
# `variable_sync_on_read_context` each replica gets their own tensors
# residing on replica's device, thus breaking the assumption.
# 2. Keras c/fit creates a tf.function (a.k.a, train_function) that returns
# the metric values of the first replica. With
# `variable_sync_on_read_context` since each replica gets their own
# tensors, the metric result tensors on the non-first replicas are not in
# the return value of train_function, making TF graph optimizer prune the
# branch that computes and aggregates those metric results. As a result,
# if NCCL is used to do the aggregation, the program will hang because
# NCCL ops are only launched on the non-pruned first replica.
#
# We condition on strategy.extended._use_merge_call() since we know if it is
# false, the program uses `jit_compile` to compile replica fn, meaning it is
# not V1 training (hence #1 is okay), and no pruning will happen as
# compiled functions are not inlined (hence #2 is okay).
if (not has_strategy or replica_context is None or
not tf.distribute.get_strategy(
).extended._use_merge_call()):
with tf.__internal__.distribute.variable_sync_on_read_context():
raw_result = result_fn(*args)
# Results need to be wrapped in a `tf.identity` op to ensure
# correct execution order.
if isinstance(raw_result,
(tf.Tensor, tf.Variable, float, int)):
result_t = tf.identity(raw_result)
elif isinstance(raw_result, dict):
result_t = {
key: tf.identity(value)
for key, value in raw_result.items()
}
else:
try:
result_t = tf.identity(raw_result)
except (ValueError, TypeError):
raise RuntimeError(
'The output of `metric.result()` can only be a single '
'Tensor/Variable, or a dict of Tensors/Variables. '
f'For metric {metric_obj.name}, got result {raw_result}.')
else:
# TODO(psv): Test distribution of metrics using different distribution
# strategies.
# Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
# with distribution object as the first parameter. We create a wrapper
# here so that the result function need not have that parameter.
def merge_fn_wrapper(distribution, merge_fn, *args):
# We will get `PerReplica` merge function. Taking the first one as all
# are identical copies of the function that we had passed below.
result = distribution.experimental_local_results(merge_fn)[0](*args)
# Wrapping result in identity so that control dependency between
# update_op from `update_state` and result works in case result returns
# a tensor.
return tf.identity(result)
# Wrapping result in merge_call. merge_call is used when we want to leave
# replica mode and compute a value in cross replica mode.
result_t = replica_context.merge_call(
merge_fn_wrapper, args=(result_fn,) + args)
# We are saving the result op here to be used in train/test execution
# functions. This basically gives the result op that was generated with a
# control dep to the updates for these workflows.
metric_obj._call_result = result_t
return result_t
return tf.__internal__.decorator.make_decorator(result_fn, decorated)
def weakmethod(method):
"""Creates a weak reference to the bound method."""
cls = method.im_class
func = method.im_func
instance_ref = weakref.ref(method.im_self)
@functools.wraps(method)
def inner(*args, **kwargs):
return func.__get__(instance_ref(), cls)(*args, **kwargs)
del method
return inner
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [t for t in thresholds if t is None or t < 0 or t > 1]
if invalid_thresholds:
raise ValueError(
f'Threshold values must be in [0, 1]. Received: {invalid_thresholds}')
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(default_threshold if thresholds is None else thresholds)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = 'tp'
FALSE_POSITIVES = 'fp'
TRUE_NEGATIVES = 'tn'
FALSE_NEGATIVES = 'fn'
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = 'ROC'
PR = 'PR'
@staticmethod
def from_str(key):
if key in ('pr', 'PR'):
return AUCCurve.PR
elif key in ('roc', 'ROC'):
return AUCCurve.ROC
else:
raise ValueError(
f'Invalid AUC curve value: "{key}". '
'Expected values are ["PR", "ROC"]')
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = 'interpolation'
MAJORING = 'majoring'
MINORING = 'minoring'
@staticmethod
def from_str(key):
if key in ('interpolation', 'Interpolation'):
return AUCSummationMethod.INTERPOLATION
elif key in ('majoring', 'Majoring'):
return AUCSummationMethod.MAJORING
elif key in ('minoring', 'Minoring'):
return AUCSummationMethod.MINORING
else:
raise ValueError(
f'Invalid AUC summation method value: "{key}". '
'Expected values are ["interpolation", "majoring", "minoring"]')
def _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=False,
sample_weights=None,
label_weights=None,
thresholds_with_epsilon=False):
"""Update confusion matrix variables with memory efficient alternative.
Note that the thresholds need to be evenly distributed within the list, eg,
the diff between consecutive elements are the same.
To compute TP/FP/TN/FN, we are measuring a binary classifier
C(t) = (predictions >= t)
at each threshold 't'. So we have
TP(t) = sum( C(t) * true_labels )
FP(t) = sum( C(t) * false_labels )
But, computing C(t) requires computation for each t. To make it fast,
observe that C(t) is a cumulative integral, and so if we have
thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
where n = num_thresholds, and if we can compute the bucket function
B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
then we get
C(t_i) = sum( B(j), j >= i )
which is the reversed cumulative sum in tf.cumsum().
We can compute B(i) efficiently by taking advantage of the fact that
our thresholds are evenly distributed, in that
width = 1.0 / (num_thresholds - 1)
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
Given a prediction value p, we can map it to its bucket by
bucket_index(p) = floor( p * (num_thresholds - 1) )
so we can use tf.math.unsorted_segment_sum() to update the buckets in one
pass.
Consider following example:
y_true = [0, 0, 1, 1]
y_pred = [0.1, 0.5, 0.3, 0.9]
thresholds = [0.0, 0.5, 1.0]
num_buckets = 2 # [0.0, 1.0], (1.0, 2.0]
bucket_index(y_pred) = tf.math.floor(y_pred * num_buckets)
= tf.math.floor([0.2, 1.0, 0.6, 1.8])
= [0, 0, 0, 1]
# The meaning of this bucket is that if any of the label is true,
# then 1 will be added to the corresponding bucket with the index.
# Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the
# label for 1.8 is true, then 1 will be added to bucket 1.
#
# Note the second item "1.0" is floored to 0, since the value need to be
# strictly larger than the bucket lower bound.
# In the implementation, we use tf.math.ceil() - 1 to achieve this.
tp_bucket_value = tf.math.unsorted_segment_sum(true_labels, bucket_indices,
num_segments=num_thresholds)
= [1, 1, 0]
# For [1, 1, 0] here, it means there is 1 true value contributed by bucket 0,
# and 1 value contributed by bucket 1. When we aggregate them to together,
# the result become [a + b + c, b + c, c], since large thresholds will always
# contribute to the value for smaller thresholds.
true_positive = tf.math.cumsum(tp_bucket_value, reverse=True)
= [2, 1, 0]
This implementation exhibits a run time and space complexity of O(T + N),
where T is the number of thresholds and N is the size of predictions.
Metrics that rely on standard implementation instead exhibit a complexity of
O(T * N).
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be cast
to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A sorted floating point `Tensor` with value in `[0, 1]`.
It need to be evenly distributed (the diff between each element need to be
the same).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the valus of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
sample_weights: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions
must be either `1`, or the same as the corresponding `y_true` dimension).
label_weights: Optional tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
thresholds_with_epsilon: Optional boolean indicating whether the leading and
tailing thresholds has any epsilon added for floating point imprecisions.
It will change how we handle the leading and tailing bucket.
Returns:
Update op.
"""
num_thresholds = thresholds.shape.as_list()[0]
if sample_weights is None:
sample_weights = 1.0
else:
sample_weights = tf.__internal__.ops.broadcast_weights(
tf.cast(sample_weights, dtype=y_pred.dtype), y_pred)
if not multi_label:
sample_weights = tf.reshape(sample_weights, [-1])
if label_weights is None:
label_weights = 1.0
else:
label_weights = tf.expand_dims(label_weights, 0)
label_weights = tf.__internal__.ops.broadcast_weights(label_weights,
y_pred)
if not multi_label:
label_weights = tf.reshape(label_weights, [-1])
weights = tf.multiply(sample_weights, label_weights)
# We shouldn't need this, but in case there are predict value that is out of
# the range of [0.0, 1.0]
y_pred = tf.clip_by_value(y_pred,
clip_value_min=0.0, clip_value_max=1.0)
y_true = tf.cast(tf.cast(y_true, tf.bool), y_true.dtype)
if not multi_label:
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
true_labels = tf.multiply(y_true, weights)
false_labels = tf.multiply((1.0 - y_true), weights)
# Compute the bucket indices for each prediction value.
# Since the predict value has to be strictly greater than the thresholds,
# eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket.
# We have to use math.ceil(val) - 1 for the bucket.
bucket_indices = tf.math.ceil(y_pred * (num_thresholds - 1)) - 1
if thresholds_with_epsilon:
# In this case, the first bucket should actually take into account since
# the any prediction between [0.0, 1.0] should be larger than the first
# threshold. We change the bucket value from -1 to 0.
bucket_indices = tf.nn.relu(bucket_indices)
bucket_indices = tf.cast(bucket_indices, tf.int32)
if multi_label:
# We need to run bucket segment sum for each of the label class. In the
# multi_label case, the rank of the label is 2. We first transpose it so
# that the label dim becomes the first and we can parallel run though them.
true_labels = tf.transpose(true_labels)
false_labels = tf.transpose(false_labels)
bucket_indices = tf.transpose(bucket_indices)
def gather_bucket(label_and_bucket_index):
label, bucket_index = label_and_bucket_index[0], label_and_bucket_index[1]
return tf.math.unsorted_segment_sum(
data=label, segment_ids=bucket_index, num_segments=num_thresholds)
tp_bucket_v = tf.vectorized_map(
gather_bucket, (true_labels, bucket_indices))
fp_bucket_v = tf.vectorized_map(
gather_bucket, (false_labels, bucket_indices))
tp = tf.transpose(
tf.cumsum(tp_bucket_v, reverse=True, axis=1))
fp = tf.transpose(
tf.cumsum(fp_bucket_v, reverse=True, axis=1))
else:
tp_bucket_v = tf.math.unsorted_segment_sum(
data=true_labels, segment_ids=bucket_indices,
num_segments=num_thresholds)
fp_bucket_v = tf.math.unsorted_segment_sum(
data=false_labels, segment_ids=bucket_indices,
num_segments=num_thresholds)
tp = tf.cumsum(tp_bucket_v, reverse=True)
fp = tf.cumsum(fp_bucket_v, reverse=True)
# fn = sum(true_labels) - tp
# tn = sum(false_labels) - fp
if (ConfusionMatrix.TRUE_NEGATIVES in variables_to_update or
ConfusionMatrix.FALSE_NEGATIVES in variables_to_update):
if multi_label:
total_true_labels = tf.reduce_sum(true_labels, axis=1)
total_false_labels = tf.reduce_sum(false_labels, axis=1)
else:
total_true_labels = tf.reduce_sum(true_labels)
total_false_labels = tf.reduce_sum(false_labels)
update_ops = []
if ConfusionMatrix.TRUE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES]
update_ops.append(variable.assign_add(tp))
if ConfusionMatrix.FALSE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES]
update_ops.append(variable.assign_add(fp))
if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES]
tn = total_false_labels - fp
update_ops.append(variable.assign_add(tn))
if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES]
fn = total_true_labels - tp
update_ops.append(variable.assign_add(fn))
return tf.group(update_ops)
def is_evenly_distributed_thresholds(thresholds):
"""Check if the thresholds list is evenly distributed.
We could leverage evenly distributed thresholds to use less memory when
calculate metrcis like AUC where each individual threshold need to be
evaluted.
Args:
thresholds: A python list or tuple, or 1D numpy array whose value is ranged
in [0, 1].
Returns:
boolean, whether the values in the inputs are evenly distributed.
"""
# Check the list value and see if it is evenly distributed.
num_thresholds = len(thresholds)
if num_thresholds < 3:
return False
even_thresholds = np.arange(num_thresholds,
dtype=np.float32) / (num_thresholds - 1)
return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())
def update_confusion_matrix_variables(variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None,
thresholds_distributed_evenly=False):
"""Returns op to update the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds are
provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates an
`update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited to
the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
be either `1`, or the same as the corresponding `y_true` dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the valus of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
distributed within the list. An optimized method will be used if this is
the case. See _update_confusion_matrix_variables_optimized() for more
details.
Returns:
Update op.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
`variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError('`label_weights` for multilabel data should be handled '
'outside of `update_confusion_matrix_variables` when '
'`multi_label` is True.')
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)):
raise ValueError(
'Please provide at least one valid confusion matrix '
'variable to update. Valid variable key options are: '
f'"{list(ConfusionMatrix)}". Received: "{variables_to_update.keys()}"')
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = tf.cast(y_true, dtype=variable_dtype)
y_pred = tf.cast(y_pred, dtype=variable_dtype)
if thresholds_distributed_evenly:
# Check whether the thresholds has any leading or tailing epsilon added
# for floating point imprecision. The leading and tailing threshold will be
# handled bit differently as the corner case.
# At this point, thresholds should be a list/array with more than 2 items,
# and ranged between [0, 1]. See is_evenly_distributed_thresholds() for more
# details.
thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0
thresholds = tf.convert_to_tensor(
thresholds, dtype=variable_dtype)
num_thresholds = thresholds.shape.as_list()[0]
if multi_label:
one_thresh = tf.equal(
tf.cast(1, dtype=tf.int32),
tf.rank(thresholds),
name='one_set_of_thresholds_cond')
else:
[y_pred,
y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true],
sample_weight)
one_thresh = tf.cast(True, dtype=tf.bool)
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
f'Invalid keys: "{invalid_keys}". '
f'Valid variable key options are: "{list(ConfusionMatrix)}"')
with tf.control_dependencies([
tf.compat.v1.assert_greater_equal(
y_pred,
tf.cast(0.0, dtype=y_pred.dtype),
message='predictions must be >= 0'),
tf.compat.v1.assert_less_equal(
y_pred,
tf.cast(1.0, dtype=y_pred.dtype),
message='predictions must be <= 1')
]):
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
else:
sample_weight = tf.cast(sample_weight, dtype=variable_dtype)
y_pred, y_true, sample_weight = (
losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight=sample_weight))
y_pred.shape.assert_is_compatible_with(y_true.shape)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
y_true = y_true[..., class_id]
y_pred = y_pred[..., class_id]
if thresholds_distributed_evenly:
return _update_confusion_matrix_variables_optimized(
variables_to_update, y_true, y_pred, thresholds,
multi_label=multi_label, sample_weights=sample_weight,
label_weights=label_weights,
thresholds_with_epsilon=thresholds_with_epsilon)
pred_shape = tf.shape(y_pred)
num_predictions = pred_shape[0]
if y_pred.shape.ndims == 1:
num_labels = 1
else:
num_labels = tf.raw_ops.Prod(input=pred_shape[1:], axis=0)
thresh_label_tile = tf.where(one_thresh, num_labels,
tf.ones([], dtype=tf.int32))
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = tf.expand_dims(y_pred, 0)
labels_extra_dim = tf.expand_dims(
tf.cast(y_true, dtype=tf.bool), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = tf.reshape(y_pred, [1, -1])
labels_extra_dim = tf.reshape(
tf.cast(y_true, dtype=tf.bool), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = tf.tile(
tf.reshape(thresholds, thresh_pretile_shape),
tf.stack(thresh_tiles))
# Tile the predictions for every threshold.
preds_tiled = tf.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = tf.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = tf.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = tf.__internal__.ops.broadcast_weights(
tf.cast(sample_weight, dtype=variable_dtype), y_pred)
weights_tiled = tf.tile(
tf.reshape(sample_weight, thresh_tiles), data_tiles)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = tf.expand_dims(label_weights, 0)
label_weights = tf.__internal__.ops.broadcast_weights(label_weights,
y_pred)
label_weights_tiled = tf.tile(
tf.reshape(label_weights, thresh_tiles), data_tiles)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = tf.multiply(weights_tiled, label_weights_tiled)
update_ops = []
def weighted_assign_add(label, pred, weights, var):
label_and_pred = tf.cast(
tf.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= tf.cast(weights, dtype=var.dtype)
return var.assign_add(tf.reduce_sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = tf.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = tf.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
update_ops.append(
weighted_assign_add(label, pred, weights_tiled,
variables_to_update[matrix_cond]))
return tf.group(update_ops)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the same
shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = tf.math.top_k(x, k, sorted=False)
top_k_mask = tf.reduce_sum(
tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def ragged_assert_compatible_and_get_flat_values(values, mask=None):
"""If ragged, it checks the compatibility and then returns the flat_values.
Note: If two tensors are dense, it does not check their compatibility.
Note: Although two ragged tensors with different ragged ranks could have
identical overall rank and dimension sizes and hence be compatible,
we do not support those cases.
Args:
values: A list of potentially ragged tensor of the same ragged_rank.
mask: A potentially ragged tensor of the same ragged_rank as elements in
Values.
Returns:
A tuple in which the first element is the list of tensors and the second
is the mask tensor. ([Values], mask). Mask and the element in Values
are equal to the flat_values of the input arguments (if they were ragged).
"""
if isinstance(values, list):
is_all_ragged = \
all(isinstance(rt, tf.RaggedTensor) for rt in values)
is_any_ragged = \
any(isinstance(rt, tf.RaggedTensor) for rt in values)
else:
is_all_ragged = isinstance(values, tf.RaggedTensor)
is_any_ragged = is_all_ragged
if (is_all_ragged and
((mask is None) or isinstance(mask, tf.RaggedTensor))):
to_be_stripped = False
if not isinstance(values, list):
values = [values]
to_be_stripped = True
# NOTE: we leave the flat_values compatibility to
# tf.TensorShape `assert_is_compatible_with`
# check if both dynamic dimensions are equal and then use the flat_values.
nested_row_split_list = [rt.nested_row_splits for rt in values]
assertion_list = _assert_splits_match(nested_row_split_list)
# if both are ragged sample_weights also should be ragged with same dims.
if isinstance(mask, tf.RaggedTensor):
assertion_list_for_mask = _assert_splits_match(
[nested_row_split_list[0], mask.nested_row_splits])
with tf.control_dependencies(assertion_list_for_mask):
mask = tf.expand_dims(mask.flat_values, -1)
# values has at least 1 element.
flat_values = []
for value in values:
with tf.control_dependencies(assertion_list):
flat_values.append(tf.expand_dims(value.flat_values, -1))
values = flat_values[0] if to_be_stripped else flat_values
elif is_any_ragged:
raise TypeError('Some of the inputs are not tf.RaggedTensor. '
f'Input received: {values}')
# values are empty or value are not ragged and mask is ragged.
elif isinstance(mask, tf.RaggedTensor):
raise TypeError('Ragged mask is not allowed with non-ragged inputs. '
f'Input received: {values}, mask received: {mask}')
return values, mask
def _assert_splits_match(nested_splits_lists):
"""Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
"""
error_msg = ('Inputs must have identical ragged splits. '
f'Input received: {nested_splits_lists}')
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [
tf.compat.v1.assert_equal(s1, s2, message=error_msg) # pylint: disable=g-complex-comprehension
for splits_list in nested_splits_lists[1:]
for (s1, s2) in zip(nested_splits_lists[0], splits_list)
]
| 34,384 | 39.548349 | 101 | py |
keras | keras-master/keras/utils/conv_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv_utils."""
import tensorflow.compat.v2 as tf
import itertools
from absl.testing import parameterized
import numpy as np
from keras.utils import conv_utils
def _get_const_output_shape(input_shape, dim):
return tuple([min(d, dim) for d in input_shape])
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
class TestBasicConvUtilsTest(tf.test.TestCase):
def test_convert_data_format(self):
self.assertEqual('NCDHW', conv_utils.convert_data_format(
'channels_first', 5))
self.assertEqual('NCHW', conv_utils.convert_data_format(
'channels_first', 4))
self.assertEqual('NCW', conv_utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', conv_utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', conv_utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', conv_utils.convert_data_format(
'channels_last', 5))
with self.assertRaises(ValueError):
conv_utils.convert_data_format('invalid', 2)
def test_normalize_tuple(self):
self.assertEqual((2, 2, 2),
conv_utils.normalize_tuple(2, n=3, name='strides'))
self.assertEqual((2, 1, 2),
conv_utils.normalize_tuple((2, 1, 2), n=3, name='strides'))
with self.assertRaises(ValueError):
conv_utils.normalize_tuple((2, 1), n=3, name='strides')
with self.assertRaises(ValueError):
conv_utils.normalize_tuple(None, n=3, name='strides')
def test_normalize_data_format(self):
self.assertEqual('channels_last',
conv_utils.normalize_data_format('Channels_Last'))
self.assertEqual('channels_first',
conv_utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
conv_utils.normalize_data_format('invalid')
def test_normalize_padding(self):
self.assertEqual('same', conv_utils.normalize_padding('SAME'))
self.assertEqual('valid', conv_utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
conv_utils.normalize_padding('invalid')
def test_conv_output_length(self):
self.assertEqual(4, conv_utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, conv_utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, conv_utils.conv_output_length(5, 2, 'valid', 2, 2))
def test_conv_input_length(self):
self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, conv_utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, conv_utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'full', 2))
def test_deconv_output_length(self):
self.assertEqual(4, conv_utils.deconv_output_length(4, 2, 'same', stride=1))
self.assertEqual(8, conv_utils.deconv_output_length(4, 2, 'same', stride=2))
self.assertEqual(5, conv_utils.deconv_output_length(
4, 2, 'valid', stride=1))
self.assertEqual(8, conv_utils.deconv_output_length(
4, 2, 'valid', stride=2))
self.assertEqual(3, conv_utils.deconv_output_length(4, 2, 'full', stride=1))
self.assertEqual(6, conv_utils.deconv_output_length(4, 2, 'full', stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=2, stride=1))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=1, stride=2))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=2, stride=1))
self.assertEqual(
9,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=1, stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=2, stride=1))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=1, stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=1, stride=1, dilation=2))
self.assertEqual(
12,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=2, stride=2, dilation=3))
self.assertEqual(
6,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=2, stride=2, dilation=3))
@parameterized.parameters(input_shapes)
class TestConvUtils(tf.test.TestCase, parameterized.TestCase):
def test_conv_kernel_mask_fc(self, *input_shape):
padding = 'valid'
kernel_shape = input_shape
ndims = len(input_shape)
strides = (1,) * ndims
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.ones(input_shape + output_shape, np.bool)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_diag(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
for padding in ['valid', 'same']:
mask = np.identity(int(np.prod(input_shape)), np.bool)
mask = np.reshape(mask, input_shape * 2)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_full_stride(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.zeros(input_shape + output_shape, np.bool)
if all(d > 0 for d in mask.shape): # pylint: disable=not-an-iterable
mask[(0,) * len(output_shape)] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_almost_full_stride(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d - 1, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=2)
mask = np.zeros(input_shape + output_shape, np.bool)
if all(d > 0 for d in mask.shape): # pylint: disable=not-an-iterable
for in_position in itertools.product(*[[0, d - 1] for d in input_shape]):
out_position = tuple([min(p, 1) for p in in_position])
mask[in_position + out_position] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_rect_kernel(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
strides = (1,) * ndims
for d in range(ndims):
kernel_shape = [1] * ndims
kernel_shape[d] = input_shape[d]
output_shape = list(input_shape)
output_shape[d] = min(1, input_shape[d])
mask = np.identity(int(np.prod(input_shape)), np.bool)
mask = np.reshape(mask, input_shape * 2)
for p in itertools.product(*[range(input_shape[dim])
for dim in range(ndims)]):
p = list(p)
p[d] = slice(None)
mask[p * 2] = True
mask = np.take(mask, range(0, min(1, input_shape[d])), ndims + d)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_wrong_padding(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'same'
)
self.assertRaises(NotImplementedError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'full')
def test_conv_kernel_mask_wrong_dims(self, *input_shape):
kernel_shape = 1
strides = 1
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
ndims = len(input_shape)
kernel_shape = (2,) * (ndims + 1)
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'same')
strides = (1,) * ndims
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'valid')
kernel_shape = (1,) * ndims
strides = (2,) * (ndims - 1)
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'valid')
strides = (2,) * ndims
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
if __name__ == '__main__':
tf.test.main()
| 10,703 | 30.668639 | 80 | py |
keras | keras-master/keras/utils/composite_tensor_support_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras composite tensor support."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import scipy.sparse
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import input_layer
from keras.layers import core
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import Layer
# Define test-only Layer classes to validate passing Sparse and Ragged tensors
# between layers.
class ToDense(Layer):
"""Create a dense (standard) tensor from the given input tensor."""
def __init__(self, default_value, **kwargs):
super(ToDense, self).__init__(**kwargs)
self._default_value = default_value
def call(self, inputs):
if isinstance(inputs, dict): # Dicts are no longer flattened.
# Always a single element in these tests.
inputs = tf.nest.flatten(inputs)[0]
if isinstance(inputs, tf.RaggedTensor):
output = inputs.to_tensor(default_value=self._default_value)
elif isinstance(inputs, tf.SparseTensor):
output = tf.sparse.to_dense(
inputs, default_value=self._default_value)
elif isinstance(inputs, tf.Tensor):
output = inputs
else:
raise TypeError("Unexpected tensor type %s" % type(inputs).__name__)
# Return a float so that we can compile models with this as the final layer.
return tf.cast(output, tf.float32)
class ToRagged(Layer):
"""Create a ragged tensor based on a given dense tensor."""
def __init__(self, padding, ragged_rank=1, **kwargs):
super(ToRagged, self).__init__(**kwargs)
self._padding = padding
self._ragged_rank = ragged_rank
def call(self, inputs):
return tf.RaggedTensor.from_tensor(
inputs, padding=self._padding, ragged_rank=self._ragged_rank)
class ToSparse(Layer):
"""Create a sparse tensor based on a given dense tensor."""
def call(self, inputs):
indices = tf.where(tf.not_equal(inputs, 0))
values = tf.gather_nd(inputs, indices)
shape = tf.shape(inputs, out_type=tf.int64)
return tf.SparseTensor(indices, values, dense_shape=shape)
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers, i_layer=None):
super(_SubclassModel, self).__init__()
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
if i_layer is not None:
self._set_inputs(i_layer)
def _layer_name_for_i(self, i):
return "layer{}".format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
def get_model_from_layers_with_input(layers,
input_shape=None,
input_dtype=None,
model_input=None):
"""Builds a model from a sequence of layers."""
if model_input is not None and input_shape is not None:
raise ValueError("Cannot specify a model_input and an input shape.")
model_type = testing_utils.get_model_type()
if model_type == "subclass":
return _SubclassModel(layers, model_input)
if model_type == "sequential":
model = keras.models.Sequential()
if model_input is not None:
model.add(model_input)
elif input_shape is not None:
model.add(keras.Input(shape=input_shape, dtype=input_dtype))
for layer in layers:
model.add(layer)
return model
if model_type == "functional":
if model_input is not None:
inputs = model_input
else:
if not input_shape:
raise ValueError("Cannot create a functional model from layers with no "
"input shape.")
inputs = keras.Input(shape=input_shape, dtype=input_dtype)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError("Unknown model type {}".format(model_type))
def get_test_mode_kwargs():
run_eagerly = testing_utils.should_run_eagerly()
return {
"run_eagerly": run_eagerly,
}
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CompositeTensorInternalTest(keras_parameterized.TestCase):
def test_internal_ragged_tensors(self):
# Create a model that accepts an input, converts it to Ragged, and
# converts the ragged tensor back to a dense tensor.
layers = [ToRagged(padding=0), ToDense(default_value=-1)]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
expected_output = np.array([[1, -1], [2, 3]])
output = model.predict(input_data)
self.assertAllEqual(expected_output, output)
def test_internal_sparse_tensors(self):
# Create a model that accepts an input, converts it to Sparse, and
# converts the sparse tensor back to a dense tensor.
layers = [ToSparse(), ToDense(default_value=-1)]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
expected_output = np.array([[1, -1, -1], [2, 3, -1]])
output = model.predict(input_data)
self.assertAllEqual(expected_output, output)
def test_training_internal_ragged_tensors(self):
# Create a model that implements y=Mx. This is easy to learn and will
# demonstrate appropriate gradient passing. (We have to use RaggedTensors
# for this test, as ToSparse() doesn't support gradient propagation through
# the layer.) TODO(b/124796939): Investigate this.
layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)]
model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
input_data = np.random.rand(1024, 1)
expected_data = np.concatenate((input_data * 3, input_data * .5), axis=-1)
model.compile(loss="mse", optimizer="adam", **get_test_mode_kwargs())
history = model.fit(input_data, expected_data, epochs=10, verbose=0)
# If the model trained, the loss stored at history[0] should be different
# than the one stored at history[-1].
self.assertNotEqual(history.history["loss"][-1], history.history["loss"][0])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CompositeTensorOutputTest(keras_parameterized.TestCase):
def test_ragged_tensor_outputs(self):
# Create a model that accepts an input, converts it to Ragged, and
# converts the ragged tensor back to a dense tensor.
layers = [ToRagged(padding=0)]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
output = model.predict(input_data)
expected_values = [[1], [2, 3]]
self.assertAllEqual(expected_values, output)
def test_ragged_tensor_rebatched_outputs(self):
# Create a model that accepts an input, converts it to Ragged, and
# converts the ragged tensor back to a dense tensor.
layers = [ToRagged(padding=0)]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]])
output = model.predict(input_data, batch_size=2)
expected_values = [[1], [2, 3], [4], [5, 6]]
self.assertAllEqual(expected_values, output)
def test_sparse_tensor_outputs(self):
# Create a model that accepts an input, converts it to Ragged, and
# converts the ragged tensor back to a dense tensor.
layers = [ToSparse()]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
output = model.predict(input_data)
expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
expected_values = np.array([1, 2, 3])
expected_dense_shape = np.array([2, 3])
self.assertAllEqual(output.indices, expected_indices)
self.assertAllEqual(output.values, expected_values)
self.assertAllEqual(output.dense_shape, expected_dense_shape)
def test_sparse_tensor_rebatched_outputs(self):
# Create a model that accepts an input, converts it to Ragged, and
# converts the ragged tensor back to a dense tensor.
layers = [ToSparse()]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]])
output = model.predict(input_data, batch_size=2)
expected_indices = np.array([[0, 0], [1, 0], [1, 1], [2, 0], [3, 0], [3,
1]])
expected_values = np.array([1, 2, 3, 4, 5, 6])
expected_dense_shape = np.array([4, 3])
self.assertAllEqual(output.indices, expected_indices)
self.assertAllEqual(output.values, expected_values)
self.assertAllEqual(output.dense_shape, expected_dense_shape)
def get_input_name(use_dict):
# Define the input name.
if not use_dict:
return None # This is the same as not setting 'name'.
elif testing_utils.get_model_type() == "subclass":
return "input_1" # Subclass models don"t support input names.
else:
return "test_input_name"
def get_kwargs(use_dataset, action="predict"):
if use_dataset or not tf.executing_eagerly():
if action == "fit":
return {"steps_per_epoch": 1}
return {"steps": 1}
else:
return {"batch_size": 2}
def prepare_inputs(data, use_dict, use_dataset, action, input_name):
input_data, expected_output = data
batch_size = input_data.shape[0]
# Prepare the input data.
if use_dict:
input_data = {input_name: input_data}
if use_dataset:
if action == "predict":
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
batch_size)
else:
input_data = tf.data.Dataset.from_tensor_slices(
(input_data, expected_output)).batch(batch_size)
expected_output = None
return (input_data, expected_output)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
use_dict=[True, False],
use_dataset=[True, False],
action=["predict", "evaluate", "fit"]))
class SparseTensorInputTest(keras_parameterized.TestCase):
def test_sparse_tensors(self, use_dict, use_dataset, action):
data = [(tf.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]],
[1, 2, 3], [2, 1, 3]),
np.array([[[1, -1, -1]], [[2, 3, -1]]])),
(tf.SparseTensor(
[[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8],
[3, 1, 4]),
np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1,
-1]]]))]
# Prepare the model to test.
input_name = get_input_name(use_dict)
model_input = input_layer.Input(
shape=(1, None), sparse=True, name=input_name, dtype=tf.int32)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
**get_test_mode_kwargs())
kwargs = get_kwargs(use_dataset, action)
# Prepare the input data
for data_element in data:
input_data, expected_output = prepare_inputs(data_element, use_dict,
use_dataset, action,
input_name)
# Perform the action.
if action == "predict":
result = model.predict(input_data, **kwargs)
self.assertAllEqual(expected_output, result)
if action == "evaluate":
result = model.evaluate(input_data, expected_output, **kwargs)
self.assertAllEqual(1.0, result[-1])
if action == "fit":
# TODO(momernick): What's the best way of validating that fit happened?
_ = model.fit(input_data, expected_output, shuffle=False, **kwargs)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ScipySparseTensorInputTest(keras_parameterized.TestCase,
tf.test.TestCase):
def test_sparse_scipy_predict_inputs_via_input_layer_args(self):
# Create a model that accepts a sparse input and converts the sparse tensor
# back to a dense tensor. Scipy sparse matrices are limited to 2D, so use
# a one-dimensional shape; note also that scipy's default dtype is int64.
model_input = input_layer.Input(shape=(3,), sparse=True, dtype=tf.int64)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])),
shape=[2, 3])
expected_output = np.array([[1, -1, -1], [2, 3, -1]])
output = model.predict(input_data, steps=1)
self.assertAllEqual(expected_output, output)
input_data_2 = scipy.sparse.coo_matrix(
([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3])
expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]])
output_2 = model.predict(input_data_2, steps=1)
self.assertAllEqual(expected_output_2, output_2)
def test_sparse_scipy_eval_inputs(self):
# Create a model that accepts a sparse input and converts the sparse tensor
# back to a dense tensor. Scipy sparse matrices are limited to 2D, so use
# a one-dimensional shape; note also that scipy's default dtype is int64.
model_input = input_layer.Input(shape=(3,), sparse=True, dtype=tf.int64)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"])
input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])),
shape=[2, 3])
expected_output = np.array([[1, -1, -1], [2, 3, -1]])
output = model.evaluate(input_data, expected_output, steps=1)
self.assertAllEqual(1.0, output[-1])
input_data_2 = scipy.sparse.coo_matrix(
([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3])
expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]])
output_2 = model.evaluate(input_data_2, expected_output_2, steps=1)
self.assertAllEqual(1.0, output_2[-1])
def test_sparse_scipy_predict_input_dicts_via_input_layer_args(self):
# Create a model that accepts a sparse input and converts the sparse tensor
# back to a dense tensor. Scipy sparse matrices are limited to 2D, so use
# a one-dimensional shape; note also that scipy's default dtype is int64.
if testing_utils.get_model_type() == "subclass":
input_name = "input_1" # Subclass models don"t support input names.
else:
input_name = "test_input_name"
model_input = input_layer.Input(
shape=(3,), sparse=True, name=input_name, dtype=tf.int64)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
input_data = {
input_name:
scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])),
shape=[2, 3])
}
expected_output = np.array([[1, -1, -1], [2, 3, -1]])
output = model.predict(input_data, steps=1)
self.assertAllEqual(expected_output, output)
input_data_2 = {
input_name:
scipy.sparse.coo_matrix(
([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3])
}
expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]])
output_2 = model.predict(input_data_2, steps=1)
self.assertAllEqual(expected_output_2, output_2)
def test_sparse_scipy_eval_input_dicts(self):
# Create a model that accepts a sparse input and converts the sparse tensor
# back to a dense tensor. Scipy sparse matrices are limited to 2D, so use
# a one-dimensional shape; note also that scipy's default dtype is int64.
if testing_utils.get_model_type() == "subclass":
input_name = "input_1" # Subclass models don"t support input names.
else:
input_name = "test_input_name"
model_input = input_layer.Input(
shape=(3,), sparse=True, name=input_name, dtype=tf.int64)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"])
input_data = {
input_name:
scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])),
shape=[2, 3])
}
expected_output = np.array([[1, -1, -1], [2, 3, -1]])
output = model.evaluate(input_data, expected_output, steps=1)
self.assertAllEqual(1.0, output[-1])
input_data_2 = {
input_name:
scipy.sparse.coo_matrix(
([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3])
}
expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]])
output_2 = model.evaluate(input_data_2, expected_output_2, steps=1)
self.assertAllEqual(1.0, output_2[-1])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
use_dict=[True, False],
use_dataset=[True, False],
action=["predict", "evaluate", "fit"]))
class RaggedTensorInputTest(keras_parameterized.TestCase,
tf.test.TestCase):
def test_ragged_input(self, use_dict, use_dataset, action):
data = [(tf.ragged.constant([[[1]], [[2, 3]]]),
np.array([[[1, -1]], [[2, 3]]]))]
# Prepare the model to test.
input_name = get_input_name(use_dict)
model_input = input_layer.Input(
shape=(None, None), ragged=True, name=input_name, dtype=tf.int32,
batch_size=2)
self.assertIsInstance(model_input._type_spec,
tf.RaggedTensorSpec)
self.assertEqual(model_input.shape.as_list(), [2, None, None])
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
**get_test_mode_kwargs())
# Prepare the input data
for data_element in data:
input_data, expected_output = prepare_inputs(data_element, use_dict,
use_dataset, action,
input_name)
# Perform the action.
if action == "predict":
result = model.predict(input_data)
self.assertAllEqual(expected_output, result)
if action == "evaluate":
result = model.evaluate(input_data, expected_output)
self.assertAllEqual(1.0, result[-1])
if action == "fit":
# TODO(momernick): What's the best way of validating that fit happened?
_ = model.fit(input_data, expected_output, shuffle=False)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
use_dict=[True, False], use_dataset=[True, False]))
class RaggedTensorInputValidationTest(keras_parameterized.TestCase,
tf.test.TestCase):
def test_ragged_tensor_input_with_one_none_dimension(self, use_dict,
use_dataset):
# Define some input data.
data = [(tf.ragged.constant([[[1, 0]], [[2, 3]]], ragged_rank=1),
np.array([[[1, 0]], [[2, 3]]]))]
# Prepare the model to test.
input_shape = (None, 2) # RaggedTensorInputTest uses (None, None).
input_name = get_input_name(use_dict)
model_input = input_layer.Input(
shape=input_shape, ragged=True, name=input_name, dtype=tf.int32)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
**get_test_mode_kwargs())
for data_element in data:
input_data, expected_output = prepare_inputs(
data_element,
use_dict,
use_dataset,
action="predict",
input_name=input_name)
result = model.predict(input_data)
self.assertAllEqual(expected_output, result)
def test_ragged_tensor_input_with_no_none_dimension(self, use_dict,
use_dataset):
# Define some input data.
data = [(tf.ragged.constant([[[1, 0]], [[2, 3]]], ragged_rank=0),
np.array([[[1, 0]], [[2, 3]]]))]
# Prepare the model to test.
input_shape = (1, 2) # RaggedTensorInputTest uses (None, None).
input_name = get_input_name(use_dict)
model_input = input_layer.Input(
shape=input_shape, ragged=True, name=input_name, dtype=tf.int32)
layers = [ToDense(default_value=-1)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
**get_test_mode_kwargs())
kwargs = get_kwargs(use_dataset)
for data_element in data:
input_data, expected_output = prepare_inputs(
data_element,
use_dict,
use_dataset,
action="predict",
input_name=input_name)
result = model.predict(input_data, **kwargs)
self.assertAllEqual(expected_output, result)
@keras_parameterized.run_with_all_model_types()
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CompositeTensorModelPredictTest(keras_parameterized.TestCase):
def _normalize_shape(self, shape):
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
def test_sparse_tensor_model_predict(self):
# Create a model that accepts a sparse input and runs a "Dense" layer on it.
model_input = input_layer.Input(
shape=(3,), sparse=True, dtype=tf.float32)
self.assertEqual([None, 3], model_input.shape.as_list())
layers = [Dense(2)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
sparse_input = tf.SparseTensor(
# A two-row matrix
indices=[(0, 0), (0, 1), (0, 2), (5, 0), (5, 1), (5, 2)],
values=[1., 1., 1., 1., 1., 1.],
dense_shape=(6, 3))
shape = model(sparse_input).shape
self.assertEqual((6, 2), self._normalize_shape(shape))
shape = model.predict(sparse_input, steps=1).shape
self.assertEqual((6, 2), self._normalize_shape(shape))
def test_ragged_tensor_model_predict(self):
# Create a model that accepts a sparse input and runs a "Dense" layer on it.
model_input = input_layer.Input(shape=(None,), ragged=True)
self.assertEqual([None, None], model_input.shape.as_list())
layers = [Embedding(input_dim=7, output_dim=5)]
model = get_model_from_layers_with_input(layers, model_input=model_input)
ragged_input = tf.ragged.constant([
[1, 2, 3, 4, 5],
[2, 4],
])
shape = model(ragged_input).shape
self.assertEqual((2, None, 5), self._normalize_shape(shape))
shape = model.predict(ragged_input, steps=1).shape
self.assertEqual((2, None, 5), self._normalize_shape(shape))
if __name__ == "__main__":
tf.test.main()
| 25,157 | 38.186916 | 80 | py |
keras | keras-master/keras/utils/np_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy-related utilities."""
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with `categorical_crossentropy`.
Args:
y: Array-like with class values to be converted into a matrix
(integers from 0 to `num_classes - 1`).
num_classes: Total number of classes. If `None`, this would be inferred
as `max(y) + 1`.
dtype: The data type expected by the input. Default: `'float32'`.
Returns:
A binary matrix representation of the input. The class axis is placed
last.
Example:
>>> a = tf.keras.utils.to_categorical([0, 1, 2, 3], num_classes=4)
>>> a = tf.constant(a, shape=[4, 4])
>>> print(a)
tf.Tensor(
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]], shape=(4, 4), dtype=float32)
>>> b = tf.constant([.9, .04, .03, .03,
... .3, .45, .15, .13,
... .04, .01, .94, .05,
... .12, .21, .5, .17],
... shape=[4, 4])
>>> loss = tf.keras.backend.categorical_crossentropy(a, b)
>>> print(np.around(loss, 5))
[0.10536 0.82807 0.1011 1.77196]
>>> loss = tf.keras.backend.categorical_crossentropy(a, a)
>>> print(np.around(loss, 5))
[0. 0. 0. 0.]
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@keras_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Args:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. `order=2` for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
| 3,012 | 31.75 | 80 | py |
keras | keras-master/keras/utils/metrics_utils_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics_utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras import combinations
from keras.utils import metrics_utils
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RaggedSizeOpTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2]
},
{
'x_list': [1, 2],
'y_list': [2, 3]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]]
},
])
def test_passing_dense_tensors(self, x_list, y_list):
x = tf.constant(x_list)
y = tf.constant(y_list)
[x,
y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
x.shape.assert_is_compatible_with(y.shape)
@parameterized.parameters([
{
'x_list': [1],
},
{
'x_list': [1, 2],
},
{
'x_list': [1, 2, 4],
},
{
'x_list': [[1, 2], [3, 4]],
},
])
def test_passing_one_dense_tensor(self, x_list):
x = tf.constant(x_list)
[x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x])
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2]
},
{
'x_list': [1, 2],
'y_list': [2, 3]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'y_list': [[2, 3], [5, 6], [3]]
},
{
'x_list': [[1, 2], [], [1]],
'y_list': [[2, 3], [], [3]]
},
])
def test_passing_both_ragged(self, x_list, y_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
[x,
y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
x.shape.assert_is_compatible_with(y.shape)
@parameterized.parameters([
{
'x_list': [1],
},
{
'x_list': [1, 2],
},
{
'x_list': [1, 2, 4],
},
{
'x_list': [[1, 2], [3, 4]],
},
{
'x_list': [[1, 2], [3, 4], [1]],
},
{
'x_list': [[1, 2], [], [1]],
},
])
def test_passing_one_ragged(self, x_list):
x = tf.ragged.constant(x_list)
[x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x])
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2],
'mask_list': [0]
},
{
'x_list': [1, 2],
'y_list': [2, 3],
'mask_list': [0, 1]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5],
'mask_list': [1, 1, 1]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]],
'mask_list': [[1, 1], [0, 1]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'y_list': [[2, 3], [5, 6], [3]],
'mask_list': [[1, 1], [0, 0], [1]]
},
{
'x_list': [[1, 2], [], [1]],
'y_list': [[2, 3], [], [3]],
'mask_list': [[1, 1], [], [0]]
},
])
def test_passing_both_ragged_with_mask(self, x_list, y_list, mask_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
mask = tf.ragged.constant(mask_list)
[x, y], mask = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask)
x.shape.assert_is_compatible_with(y.shape)
y.shape.assert_is_compatible_with(mask.shape)
@parameterized.parameters([
{
'x_list': [1],
'mask_list': [0]
},
{
'x_list': [1, 2],
'mask_list': [0, 1]
},
{
'x_list': [1, 2, 4],
'mask_list': [1, 1, 1]
},
{
'x_list': [[1, 2], [3, 4]],
'mask_list': [[1, 1], [0, 1]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'mask_list': [[1, 1], [0, 0], [1]]
},
{
'x_list': [[1, 2], [], [1]],
'mask_list': [[1, 1], [], [0]]
},
])
def test_passing_one_ragged_with_mask(self, x_list, mask_list):
x = tf.ragged.constant(x_list)
mask = tf.ragged.constant(mask_list)
[x], mask = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x], mask)
x.shape.assert_is_compatible_with(mask.shape)
@parameterized.parameters([
{
'x_list': [[[1, 3]]],
'y_list': [[2, 3]]
},
])
def test_failing_different_ragged_and_dense_ranks(self, x_list, y_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
@parameterized.parameters([
{
'x_list': [[[1, 3]]],
'y_list': [[[2, 3]]],
'mask_list': [[0, 1]]
},
])
def test_failing_different_mask_ranks(self, x_list, y_list, mask_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
mask = tf.ragged.constant(mask_list)
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y],
mask)
# we do not support such cases that ragged_ranks are different but overall
# dimension shapes and sizes are identical due to adding too much performance
# overheads to the overall use cases.
def test_failing_different_ragged_ranks(self):
dt = tf.constant([[[1, 2]]])
# adding a ragged dimension
x = tf.RaggedTensor.from_row_splits(dt, row_splits=[0, 1])
y = tf.ragged.constant([[[[1, 2]]]])
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class FilterTopKTest(tf.test.TestCase, parameterized.TestCase):
def test_one_dimensional(self):
x = tf.constant([.3, .1, .2, -.5, 42.])
top_1 = self.evaluate(metrics_utils._filter_top_k(x=x, k=1))
top_2 = self.evaluate(metrics_utils._filter_top_k(x=x, k=2))
top_3 = self.evaluate(metrics_utils._filter_top_k(x=x, k=3))
self.assertAllClose(top_1, [
metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF,
metrics_utils.NEG_INF, 42.
])
self.assertAllClose(top_2, [
.3, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF,
42.
])
self.assertAllClose(
top_3, [.3, metrics_utils.NEG_INF, .2, metrics_utils.NEG_INF, 42.])
def test_three_dimensional(self):
x = tf.constant([[[.3, .1, .2], [-.3, -.2, -.1]],
[[5., .2, 42.], [-.3, -.6, -.99]]])
top_2 = self.evaluate(metrics_utils._filter_top_k(x=x, k=2))
self.assertAllClose(
top_2,
[[[.3, metrics_utils.NEG_INF, .2], [metrics_utils.NEG_INF, -.2, -.1]],
[[5., metrics_utils.NEG_INF, 42.], [-.3, -.6, metrics_utils.NEG_INF]]])
def test_handles_dynamic_shapes(self):
# See b/150281686. # GOOGLE_INTERNAL
def _identity(x):
return x
def _filter_top_k(x):
# This loses the static shape.
x = tf.numpy_function(_identity, (x,), tf.float32)
return metrics_utils._filter_top_k(x=x, k=2)
x = tf.constant([.3, .1, .2, -.5, 42.])
top_2 = self.evaluate(_filter_top_k(x))
self.assertAllClose(top_2, [
.3, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF,
42.
])
if __name__ == '__main__':
tf.test.main()
| 8,678 | 28.420339 | 86 | py |
keras | keras-master/keras/utils/object_identity.py | """Utilities for collecting objects based on "is" comparison."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import weakref
# LINT.IfChange
class _ObjectIdentityWrapper:
"""Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
Since __eq__ is based on object identity, it's safe to also define __hash__
based on object ids. This lets us add unhashable types like trackable
_ListWrapper objects to object-identity collections.
"""
__slots__ = ["_wrapped", "__weakref__"]
def __init__(self, wrapped):
self._wrapped = wrapped
@property
def unwrapped(self):
return self._wrapped
def _assert_type(self, other):
if not isinstance(other, _ObjectIdentityWrapper):
raise TypeError(
"Cannot compare wrapped object with unwrapped object. "
f"Expect the object to be `_ObjectIdentityWrapper`. Got: {other}")
def __lt__(self, other):
self._assert_type(other)
return id(self._wrapped) < id(other._wrapped) # pylint: disable=protected-access
def __gt__(self, other):
self._assert_type(other)
return id(self._wrapped) > id(other._wrapped) # pylint: disable=protected-access
def __eq__(self, other):
if other is None:
return False
self._assert_type(other)
return self._wrapped is other._wrapped # pylint: disable=protected-access
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Wrapper id() is also fine for weakrefs. In fact, we rely on
# id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is
# weakref.ref(a) in _WeakObjectIdentityWrapper.
return id(self._wrapped)
def __repr__(self):
return "<{} wrapping {!r}>".format(type(self).__name__, self._wrapped)
class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
__slots__ = ()
def __init__(self, wrapped):
super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))
@property
def unwrapped(self):
return self._wrapped()
class Reference(_ObjectIdentityWrapper):
"""Reference that refers an object.
```python
x = [1]
y = [1]
x_ref1 = Reference(x)
x_ref2 = Reference(x)
y_ref2 = Reference(y)
print(x_ref1 == x_ref2)
==> True
print(x_ref1 == y)
==> False
```
"""
__slots__ = ()
# Disabling super class' unwrapped field.
unwrapped = property()
def deref(self):
"""Returns the referenced object.
```python
x_ref = Reference(x)
print(x is x_ref.deref())
==> True
```
"""
return self._wrapped
class ObjectIdentityDictionary(collections.abc.MutableMapping):
"""A mutable mapping data structure which compares using "is".
This is necessary because we have trackable objects (_ListWrapper) which
have behavior identical to built-in Python lists (including being unhashable
and comparing based on the equality of their contents by default).
"""
__slots__ = ["_storage"]
def __init__(self):
self._storage = {}
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __getitem__(self, key):
return self._storage[self._wrap_key(key)]
def __setitem__(self, key, value):
self._storage[self._wrap_key(key)] = value
def __delitem__(self, key):
del self._storage[self._wrap_key(key)]
def __len__(self):
return len(self._storage)
def __iter__(self):
for key in self._storage:
yield key.unwrapped
def __repr__(self):
return "ObjectIdentityDictionary(%s)" % repr(self._storage)
class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):
"""Like weakref.WeakKeyDictionary, but compares objects with "is"."""
__slots__ = ["__weakref__"]
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len(list(self._storage))
def __iter__(self):
keys = self._storage.keys()
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
del self[key]
else:
yield unwrapped
class ObjectIdentitySet(collections.abc.MutableSet):
"""Like the built-in set, but compares objects with "is"."""
__slots__ = ["_storage", "__weakref__"]
def __init__(self, *args):
self._storage = set(self._wrap_key(obj) for obj in list(*args))
@staticmethod
def _from_storage(storage):
result = ObjectIdentitySet()
result._storage = storage # pylint: disable=protected-access
return result
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __contains__(self, key):
return self._wrap_key(key) in self._storage
def discard(self, key):
self._storage.discard(self._wrap_key(key))
def add(self, key):
self._storage.add(self._wrap_key(key))
def update(self, items):
self._storage.update([self._wrap_key(item) for item in items])
def clear(self):
self._storage.clear()
def intersection(self, items):
return self._storage.intersection([self._wrap_key(item) for item in items])
def difference(self, items):
return ObjectIdentitySet._from_storage(
self._storage.difference([self._wrap_key(item) for item in items]))
def __len__(self):
return len(self._storage)
def __iter__(self):
keys = list(self._storage)
for key in keys:
yield key.unwrapped
class ObjectIdentityWeakSet(ObjectIdentitySet):
"""Like weakref.WeakSet, but compares objects with "is"."""
__slots__ = ()
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
self.discard(key)
else:
yield unwrapped
# LINT.ThenChange(//tensorflow/python/util/object_identity.py)
| 6,506 | 25.34413 | 85 | py |
keras | keras-master/keras/utils/vis_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-import-not-at-top
"""Utilities related to model visualization."""
import tensorflow.compat.v2 as tf
import os
import sys
import re
from keras.utils.io_utils import path_to_string
from tensorflow.python.util.tf_export import keras_export
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot and Graphviz are available."""
if pydot is None:
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
def is_wrapped_model(layer):
from keras.engine import functional
from keras.layers import wrappers
return (isinstance(layer, wrappers.Wrapper) and
isinstance(layer.layer, functional.Functional))
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
def get_layer_index_bound_by_layer_name(model, layer_names):
"""Return specific range of layers to plot, mainly for sub-graph plot models.
Args:
model: tf.keras.Model
layer_names: unique name of layer of the model, type(str)
Returns:
retun the index value of layer based on its unique name (layer_names)
"""
lower_index = []
upper_index = []
for idx, layer in enumerate(model.layers):
if re.match(layer_names[0], layer.name):
lower_index.append(idx)
if re.match(layer_names[1], layer.name):
upper_index.append(idx)
if not lower_index or not upper_index:
raise ValueError(
'Passed layer_names does not match to layers in the model. '
f'Recieved: {layer_names}')
if min(lower_index) > max(upper_index):
return [min(upper_index), max(lower_index)]
return [min(lower_index), max(upper_index)]
@keras_export('keras.utils.model_to_dot')
def model_to_dot(model,
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96,
subgraph=False,
layer_range=None):
"""Convert a Keras model to dot format.
Args:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: whether to expand nested models into clusters.
dpi: Dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
layer_range: input of `list` containing two `str` items, which is the
starting layer name and ending layer name (both inclusive) indicating
the range of layers for which the `pydot.Dot` will be generated. It
also accepts regex patterns instead of exact name. In such case, start
predicate will be the first element it matches to `layer_range[0]`
and the end predicate will be the last element it matches to
`layer_range[1]`. By default `None` which considers all layers of
model. Note that you must pass range such that the resultant subgraph
must be complete.
Returns:
A `pydot.Dot` instance representing the Keras model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
Raises:
ImportError: if graphviz or pydot are not available.
"""
from keras.layers import wrappers
from keras.engine import sequential
from keras.engine import functional
if not check_pydot():
message = (
'You must install pydot (`pip install pydot`) '
'and install graphviz '
'(see instructions at https://graphviz.gitlab.io/download/) ',
'for plot_model/model_to_dot to work.')
if 'IPython.core.magics.namespace' in sys.modules:
# We don't raise an exception here in order to avoid crashing notebook
# tests where graphviz is not available.
print(message)
return
else:
raise ImportError(message)
if subgraph:
dot = pydot.Cluster(style='dashed', graph_name=model.name)
dot.set('label', model.name)
dot.set('labeljust', 'l')
else:
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set('dpi', dpi)
dot.set_node_defaults(shape='record')
if layer_range:
if len(layer_range) != 2:
raise ValueError(
'layer_range must be of shape (2,). Received: '
f'layer_range = {layer_range} of length {len(layer_range)}')
if (not isinstance(layer_range[0], str) or
not isinstance(layer_range[1], str)):
raise ValueError(
'layer_range should contain string type only. '
f'Received: {layer_range}')
layer_range = get_layer_index_bound_by_layer_name(model, layer_range)
if layer_range[0] < 0 or layer_range[1] > len(model.layers):
raise ValueError('Both values in layer_range should be in range (0, '
f'{len(model.layers)}. Recieved: {layer_range}')
sub_n_first_node = {}
sub_n_last_node = {}
sub_w_first_node = {}
sub_w_last_node = {}
layers = model.layers
if not model._is_graph_network:
node = pydot.Node(str(id(model)), label=model.name)
dot.add_node(node)
return dot
elif isinstance(model, sequential.Sequential):
if not model.built:
model.build()
layers = super(sequential.Sequential, model).layers
# Create graph nodes.
for i, layer in enumerate(layers):
if (layer_range) and (i < layer_range[0] or i > layer_range[1]):
continue
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, wrappers.Wrapper):
if expand_nested and isinstance(layer.layer,
functional.Functional):
submodel_wrapper = model_to_dot(
layer.layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True)
# sub_w : submodel_wrapper
sub_w_nodes = submodel_wrapper.get_nodes()
sub_w_first_node[layer.layer.name] = sub_w_nodes[0]
sub_w_last_node[layer.layer.name] = sub_w_nodes[-1]
dot.add_subgraph(submodel_wrapper)
else:
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
if expand_nested and isinstance(layer, functional.Functional):
submodel_not_wrapper = model_to_dot(
layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True)
# sub_n : submodel_not_wrapper
sub_n_nodes = submodel_not_wrapper.get_nodes()
sub_n_first_node[layer.name] = sub_n_nodes[0]
sub_n_last_node[layer.name] = sub_n_nodes[-1]
dot.add_subgraph(submodel_not_wrapper)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including the layer's dtype.
if show_dtype:
def format_dtype(dtype):
if dtype is None:
return '?'
else:
return str(dtype)
label = '%s|%s' % (label, format_dtype(layer.dtype))
# Rebuild the label as a table including input/output shapes.
if show_shapes:
def format_shape(shape):
return str(shape).replace(str(None), 'None')
try:
outputlabels = format_shape(layer.output_shape)
except AttributeError:
outputlabels = '?'
if hasattr(layer, 'input_shape'):
inputlabels = format_shape(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[format_shape(ishape) for ishape in layer.input_shapes])
else:
inputlabels = '?'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label,
inputlabels,
outputlabels)
if not expand_nested or not isinstance(
layer, functional.Functional):
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for i, layer in enumerate(layers):
if (layer_range) and (i <= layer_range[0] or i > layer_range[1]):
continue
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model._network_nodes:
for inbound_layer in tf.nest.flatten(node.inbound_layers):
inbound_layer_id = str(id(inbound_layer))
if not expand_nested:
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
else:
# if inbound_layer is not Model or wrapped Model
if (not isinstance(inbound_layer,
functional.Functional) and
not is_wrapped_model(inbound_layer)):
# if current layer is not Model or wrapped Model
if (not isinstance(layer, functional.Functional) and
not is_wrapped_model(layer)):
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
# if current layer is Model
elif isinstance(layer, functional.Functional):
add_edge(dot, inbound_layer_id,
sub_n_first_node[layer.name].get_name())
# if current layer is wrapped Model
elif is_wrapped_model(layer):
add_edge(dot, inbound_layer_id, layer_id)
name = sub_w_first_node[layer.layer.name].get_name()
add_edge(dot, layer_id, name)
# if inbound_layer is Model
elif isinstance(inbound_layer, functional.Functional):
name = sub_n_last_node[inbound_layer.name].get_name()
if isinstance(layer, functional.Functional):
output_name = sub_n_first_node[layer.name].get_name()
add_edge(dot, name, output_name)
else:
add_edge(dot, name, layer_id)
# if inbound_layer is wrapped Model
elif is_wrapped_model(inbound_layer):
inbound_layer_name = inbound_layer.layer.name
add_edge(dot,
sub_w_last_node[inbound_layer_name].get_name(),
layer_id)
return dot
@keras_export('keras.utils.plot_model')
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96,
layer_range=None):
"""Converts a Keras model to dot format and save to a file.
Example:
```python
input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100)(input)
x = tf.keras.layers.LSTM(32)(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
model = tf.keras.Model(inputs=[input], outputs=[output])
dot_img_file = '/tmp/model_1.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Args:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: Whether to expand nested models into clusters.
dpi: Dots per inch.
layer_range: input of `list` containing two `str` items, which is the
starting layer name and ending layer name (both inclusive) indicating
the range of layers for which the plot will be generated. It also
accepts regex patterns instead of exact name. In such case, start
predicate will be the first element it matches to `layer_range[0]`
and the end predicate will be the last element it matches to
`layer_range[1]`. By default `None` which considers all layers of
model. Note that you must pass range such that the resultant subgraph
must be complete.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi,
layer_range=layer_range)
to_file = path_to_string(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != 'pdf':
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| 15,166 | 35.284689 | 80 | py |
keras | keras-master/keras/utils/conv_utils.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by convolution layers."""
import tensorflow.compat.v2 as tf
import itertools
import numpy as np
from keras import backend
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError(
f'Input rank not supported: {ndim}. Expected values are [3, 4, 5]')
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError(
f'Input rank not supported: {ndim}. Expected values are [3, 4, 5]')
else:
raise ValueError(
f'Invalid data_format: {data_format}. '
'Expected values are ["channels_first", "channels_last"]')
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Args:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
error_msg = (f'The `{name}` argument must be a tuple of {n} '
f'integers. Received: {value}')
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError(error_msg)
if len(value_tuple) != n:
raise ValueError(error_msg)
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
error_msg += (f'including element {single_value} of '
f'type {type(single_value)}')
raise ValueError(error_msg)
return value_tuple
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Args:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Args:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length,
filter_size,
padding,
output_padding=None,
stride=0,
dilation=1):
"""Determines output length of a transposed convolution given input length.
Args:
input_length: Integer.
filter_size: Integer.
padding: one of `"same"`, `"valid"`, `"full"`.
output_padding: Integer, amount of padding along the output dimension. Can
be set to `None` in which case the output length is inferred.
stride: Integer.
dilation: Integer.
Returns:
The output length (integer).
"""
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad +
output_padding)
return length
def normalize_data_format(value):
if value is None:
value = backend.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
f'"channels_first", "channels_last". Received: {value}')
return data_format
def normalize_padding(value):
if isinstance(value, (list, tuple)):
return value
padding = value.lower()
if padding not in {'valid', 'same', 'causal'}:
raise ValueError('The `padding` argument must be a list/tuple or one of '
'"valid", "same" (or "causal", only for `Conv1D). '
f'Received: {padding}')
return padding
def conv_kernel_mask(input_shape, kernel_shape, strides, padding):
"""Compute a mask representing the connectivity of a convolution operation.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an
output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array
of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries
indicating pairs of input and output locations that are connected by a weight.
Example:
>>> input_shape = (4,)
>>> kernel_shape = (2,)
>>> strides = (1,)
>>> padding = "valid"
>>> conv_kernel_mask(input_shape, kernel_shape, strides, padding)
array([[ True, False, False],
[ True, True, False],
[False, True, True],
[False, False, True]])
where rows and columns correspond to inputs and outputs respectively.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
A boolean 2N-D `np.ndarray` of shape
`(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)`
is the spatial shape of the output. `True` entries in the mask represent
pairs of input-output locations that are connected by a weight.
Raises:
ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the
same number of dimensions.
NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}.
"""
if padding not in {'same', 'valid'}:
raise NotImplementedError(f'Padding type {padding} not supported. '
'Only "valid" and "same" are implemented.')
in_dims = len(input_shape)
if isinstance(kernel_shape, int):
kernel_shape = (kernel_shape,) * in_dims
if isinstance(strides, int):
strides = (strides,) * in_dims
kernel_dims = len(kernel_shape)
stride_dims = len(strides)
if kernel_dims != in_dims or stride_dims != in_dims:
raise ValueError('Number of strides, input and kernel dimensions must all '
f'match. Received: stride_dims={stride_dims}, '
f'in_dims={in_dims}, kernel_dims={kernel_dims}')
output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding)
mask_shape = input_shape + output_shape
mask = np.zeros(mask_shape, np.bool)
output_axes_ticks = [range(dim) for dim in output_shape]
for output_position in itertools.product(*output_axes_ticks):
input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape,
output_position, strides, padding)
for input_position in itertools.product(*input_axes_ticks):
mask[input_position + output_position] = True
return mask
def conv_kernel_idxs(input_shape, kernel_shape, strides, padding, filters_in,
filters_out, data_format):
"""Yields output-input tuples of indices in a CNN layer.
The generator iterates over all `(output_idx, input_idx)` tuples, where
`output_idx` is an integer index in a flattened tensor representing a single
output image of a convolutional layer that is connected (via the layer
weights) to the respective single input image at `input_idx`
Example:
>>> input_shape = (2, 2)
>>> kernel_shape = (2, 1)
>>> strides = (1, 1)
>>> padding = "valid"
>>> filters_in = 1
>>> filters_out = 1
>>> data_format = "channels_last"
>>> list(conv_kernel_idxs(input_shape, kernel_shape, strides, padding,
... filters_in, filters_out, data_format))
[(0, 0), (0, 2), (1, 1), (1, 3)]
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
filters_in: `int`, number if filters in the input to the layer.
filters_out: `int', number if filters in the output of the layer.
data_format: string, "channels_first" or "channels_last".
Yields:
The next tuple `(output_idx, input_idx)`, where
`output_idx` is an integer index in a flattened tensor representing a single
output image of a convolutional layer that is connected (via the layer
weights) to the respective single input image at `input_idx`.
Raises:
ValueError: if `data_format` is neither
`"channels_last"` nor `"channels_first"`, or if number of strides, input,
and kernel number of dimensions do not match.
NotImplementedError: if `padding` is neither `"same"` nor `"valid"`.
"""
if padding not in ('same', 'valid'):
raise NotImplementedError(f'Padding type {padding} not supported. '
'Only "valid" and "same" are implemented.')
in_dims = len(input_shape)
if isinstance(kernel_shape, int):
kernel_shape = (kernel_shape,) * in_dims
if isinstance(strides, int):
strides = (strides,) * in_dims
kernel_dims = len(kernel_shape)
stride_dims = len(strides)
if kernel_dims != in_dims or stride_dims != in_dims:
raise ValueError('Number of strides, input and kernel dimensions must all '
f'match. Received: stride_dims={stride_dims}, '
f'in_dims={in_dims}, kernel_dims={kernel_dims}')
output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding)
output_axes_ticks = [range(dim) for dim in output_shape]
if data_format == 'channels_first':
concat_idxs = lambda spatial_idx, filter_idx: (filter_idx,) + spatial_idx
elif data_format == 'channels_last':
concat_idxs = lambda spatial_idx, filter_idx: spatial_idx + (filter_idx,)
else:
raise ValueError(
f'Data format `{data_format}` not recognized.'
'`data_format` must be "channels_first" or "channels_last".')
for output_position in itertools.product(*output_axes_ticks):
input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape,
output_position, strides, padding)
for input_position in itertools.product(*input_axes_ticks):
for f_in in range(filters_in):
for f_out in range(filters_out):
out_idx = np.ravel_multi_index(
multi_index=concat_idxs(output_position, f_out),
dims=concat_idxs(output_shape, filters_out))
in_idx = np.ravel_multi_index(
multi_index=concat_idxs(input_position, f_in),
dims=concat_idxs(input_shape, filters_in))
yield (out_idx, in_idx)
def conv_connected_inputs(input_shape, kernel_shape, output_position, strides,
padding):
"""Return locations of the input connected to an output position.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method
returns N ranges specifying the input region that was convolved with the
kernel to produce the output at position
`output_position = (p_out1, ..., p_outN)`.
Example:
>>> input_shape = (4, 4)
>>> kernel_shape = (2, 1)
>>> output_position = (1, 1)
>>> strides = (1, 1)
>>> padding = "valid"
>>> conv_connected_inputs(input_shape, kernel_shape, output_position,
... strides, padding)
[range(1, 3), range(1, 2)]
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single position
in the output of the convolution.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
N ranges `[[p_in_left1, ..., p_in_right1], ...,
[p_in_leftN, ..., p_in_rightN]]` specifying the region in the
input connected to output_position.
"""
ranges = []
ndims = len(input_shape)
for d in range(ndims):
left_shift = int(kernel_shape[d] / 2)
right_shift = kernel_shape[d] - left_shift
center = output_position[d] * strides[d]
if padding == 'valid':
center += left_shift
start = max(0, center - left_shift)
end = min(input_shape[d], center + right_shift)
ranges.append(range(start, end))
return ranges
def conv_output_shape(input_shape, kernel_shape, strides, padding):
"""Return the output shape of an N-D convolution.
Forces dimensions where input is empty (size 0) to remain empty.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.
"""
dims = range(len(kernel_shape))
output_shape = [
conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d])
for d in dims
]
output_shape = tuple(
[0 if input_shape[d] == 0 else output_shape[d] for d in dims])
return output_shape
def squeeze_batch_dims(inp, op, inner_rank):
"""Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
"""
with tf.name_scope('squeeze_batch_dims'):
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = tf.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = tf.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tf.TensorShape):
inp_reshaped = tf.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = tf.reshape(
inp, tf.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = tf.shape(out_reshaped)[-inner_rank:]
out = tf.reshape(
out_reshaped, tf.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
| 18,362 | 34.656311 | 80 | py |
keras | keras-master/keras/utils/dataset_creator.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Input dataset creator for `model.fit`."""
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.experimental.DatasetCreator', v1=[])
class DatasetCreator:
"""Object that returns a `tf.data.Dataset` upon invoking.
`tf.keras.utils.experimental.DatasetCreator` is designated as a supported type
for `x`, or the input, in `tf.keras.Model.fit`. Pass an instance of this class
to `fit` when using a callable (with a `input_context` argument) that returns
a `tf.data.Dataset`.
```python
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model.compile(tf.keras.optimizers.SGD(), loss="mse")
def dataset_fn(input_context):
global_batch_size = 64
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat()
dataset = dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2)
return dataset
input_options = tf.distribute.InputOptions(
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2)
model.fit(tf.keras.utils.experimental.DatasetCreator(
dataset_fn, input_options=input_options), epochs=10, steps_per_epoch=10)
```
`Model.fit` usage with `DatasetCreator` is intended to work across all
`tf.distribute.Strategy`s, as long as `Strategy.scope` is used at model
creation:
```python
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model.compile(tf.keras.optimizers.SGD(), loss="mse")
def dataset_fn(input_context):
...
input_options = ...
model.fit(tf.keras.utils.experimental.DatasetCreator(
dataset_fn, input_options=input_options), epochs=10, steps_per_epoch=10)
```
Note: When using `DatasetCreator`, `steps_per_epoch` argument in `Model.fit`
must be provided as the cardinality of such input cannot be inferred.
Args:
dataset_fn: A callable that takes a single argument of type
`tf.distribute.InputContext`, which is used for batch size calculation and
cross-worker input pipeline sharding (if neither is needed, the
`InputContext` parameter can be ignored in the `dataset_fn`), and returns
a `tf.data.Dataset`.
input_options: Optional `tf.distribute.InputOptions`, used for specific
options when used with distribution, for example, whether to prefetch
dataset elements to accelerator device memory or host device memory, and
prefetch buffer size in the replica device memory. No effect if not used
with distributed training. See `tf.distribute.InputOptions` for more
information.
"""
def __init__(self, dataset_fn, input_options=None):
if not callable(dataset_fn):
raise TypeError(
'`dataset_fn` for `DatasetCreator` must be a `callable`. '
f'Received: {dataset_fn}')
if input_options and (not isinstance(input_options,
tf.distribute.InputOptions)):
raise TypeError(
'`input_options` for `DatasetCreator` must be a '
f'`tf.distribute.InputOptions`. Received: {input_options}')
self.dataset_fn = dataset_fn
self.input_options = input_options
def __call__(self, *args, **kwargs):
# When a `DatasetCreator` is invoked, it forwards args/kwargs straight to
# the callable.
dataset = self.dataset_fn(*args, **kwargs)
if not isinstance(dataset, tf.data.Dataset):
raise TypeError(
'The `callable` provided to `DatasetCreator` must return '
f'a Dataset. It returns "{dataset}"')
return dataset
| 4,554 | 40.036036 | 80 | py |
keras | keras-master/keras/utils/losses_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to loss functions."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine import keras_tensor
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.losses.Reduction', v1=[])
class ReductionV2:
"""Types of loss reduction.
Contains the following values:
* `AUTO`: Indicates that the reduction option will be determined by the usage
context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
used with `tf.distribute.Strategy`, outside of built-in training loops such
as `tf.keras` `compile` and `fit`, we expect reduction value to be
`SUM` or `NONE`. Using `AUTO` in that case will raise an error.
* `NONE`: No **additional** reduction is applied to the output of the wrapped
loss function. When non-scalar losses are returned to Keras functions like
`fit`/`evaluate`, the unreduced vector loss is passed to the optimizer
but the reported loss will be a scalar value.
Caution: **Verify the shape of the outputs when using** `Reduction.NONE`.
The builtin loss functions wrapped by the loss classes reduce
one dimension (`axis=-1`, or `axis` if specified by loss function).
`Reduction.NONE` just means that no **additional** reduction is applied by
the class wrapper. For categorical losses with an example input shape of
`[batch, W, H, n_classes]` the `n_classes` dimension is reduced. For
pointwise losses your must include a dummy axis so that `[batch, W, H, 1]`
is reduced to `[batch, W, H]`. Without the dummy axis `[batch, W, H]`
will be incorrectly reduced to `[batch, W]`.
* `SUM`: Scalar sum of weighted losses.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
This reduction type is not supported when used with
`tf.distribute.Strategy` outside of built-in training loops like `tf.keras`
`compile`/`fit`.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size)
```
Please see the [custom training guide](
https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details on this.
"""
AUTO = 'auto'
NONE = 'none'
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
@classmethod
def all(cls):
return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError(
f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"')
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with backend.name_scope(name or 'remove_squeezable_dimensions'):
if not isinstance(predictions, tf.RaggedTensor):
predictions = tf.convert_to_tensor(predictions)
if not isinstance(labels, tf.RaggedTensor):
labels = tf.convert_to_tensor(labels)
predictions_shape = predictions.shape
predictions_rank = predictions_shape.ndims
labels_shape = labels.shape
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if (rank_diff == expected_rank_diff + 1 and
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = tf.squeeze(predictions, [-1])
elif (rank_diff == expected_rank_diff - 1 and
labels_shape.dims[-1].is_compatible_with(1)):
labels = tf.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = tf.rank(predictions) - tf.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = tf.cond(
tf.equal(expected_rank_diff + 1, rank_diff),
lambda: tf.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = tf.cond(
tf.equal(expected_rank_diff - 1, rank_diff),
lambda: tf.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None):
"""Squeeze or expand last dimension if needed.
1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
(using `remove_squeezable_dimensions`).
2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed,
`sample_weight` could be extended by one dimension.
If `sample_weight` is None, (y_pred, y_true) is returned.
"""
y_pred_shape = y_pred.shape
y_pred_rank = y_pred_shape.ndims
if y_true is not None:
# If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
# may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
# y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
# In this case, we should not try to remove squeezable dimension.
y_true_shape = y_true.shape
y_true_rank = y_true_shape.ndims
if (y_true_rank is not None) and (y_pred_rank is not None):
# Use static rank for `y_true` and `y_pred`.
if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
y_true, y_pred = remove_squeezable_dimensions(
y_true, y_pred)
else:
# Use dynamic rank.
rank_diff = tf.rank(y_pred) - tf.rank(y_true)
squeeze_dims = lambda: remove_squeezable_dimensions( # pylint: disable=g-long-lambda
y_true, y_pred)
is_last_dim_1 = tf.equal(1, tf.shape(y_pred)[-1])
maybe_squeeze_dims = lambda: tf.cond( # pylint: disable=g-long-lambda
is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
y_true, y_pred = tf.cond(
tf.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)
if sample_weight is None:
return y_pred, y_true
weights_shape = sample_weight.shape
weights_rank = weights_shape.ndims
if weights_rank == 0: # If weights is scalar, do nothing.
return y_pred, y_true, sample_weight
if (y_pred_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - y_pred_rank == 1:
sample_weight = tf.squeeze(sample_weight, [-1])
elif y_pred_rank - weights_rank == 1:
sample_weight = tf.expand_dims(sample_weight, [-1])
return y_pred, y_true, sample_weight
# Use dynamic rank.
weights_rank_tensor = tf.rank(sample_weight)
rank_diff = weights_rank_tensor - tf.rank(y_pred)
maybe_squeeze_weights = lambda: tf.squeeze(sample_weight, [-1])
def _maybe_expand_weights():
expand_weights = lambda: tf.expand_dims(sample_weight, [-1])
return tf.cond(
tf.equal(rank_diff, -1), expand_weights, lambda: sample_weight)
def _maybe_adjust_weights():
return tf.cond(
tf.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# squeeze or expand last dim of `sample_weight` if its rank differs by 1
# from the new rank of `y_pred`.
sample_weight = tf.cond(
tf.equal(weights_rank_tensor, 0), lambda: sample_weight,
_maybe_adjust_weights)
return y_pred, y_true, sample_weight
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = tf.reduce_sum(losses)
return tf.math.divide_no_nan(total_loss, num_present, name='value')
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with backend.name_scope('num_elements') as scope:
return tf.cast(tf.size(losses, name=scope), dtype=losses.dtype)
def reduce_weighted_loss(weighted_losses,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE):
"""Reduces the individual weighted loss measurements."""
if reduction == ReductionV2.NONE:
loss = weighted_losses
else:
loss = tf.reduce_sum(weighted_losses)
if reduction == ReductionV2.SUM_OVER_BATCH_SIZE:
loss = _safe_mean(loss, _num_elements(weighted_losses))
return loss
@keras_export('keras.__internal__.losses.compute_weighted_loss', v1=[])
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
# If this function is called directly, then we just default 'AUTO' to
# 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.
if reduction == ReductionV2.AUTO:
reduction = ReductionV2.SUM_OVER_BATCH_SIZE
if sample_weight is None:
sample_weight = 1.0
with backend.name_scope(name or 'weighted_loss'):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
tf.compat.v1.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
if not isinstance(losses,
(keras_tensor.KerasTensor, tf.RaggedTensor)):
losses = tf.convert_to_tensor(losses)
input_dtype = losses.dtype
if not isinstance(sample_weight,
(keras_tensor.KerasTensor, tf.RaggedTensor)):
sample_weight = tf.convert_to_tensor(sample_weight)
# TODO(psv): Handle casting here in a better way, eg. if losses is float64
# we do not want to lose precision.
losses = tf.cast(losses, 'float32')
sample_weight = tf.cast(sample_weight, 'float32')
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions( # pylint: disable=unbalanced-tuple-unpacking
losses, None, sample_weight)
weighted_losses = tf.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = tf.cast(loss, input_dtype)
return loss
def scale_loss_for_distribution(loss_value):
"""Scales and returns the given loss value by the number of replicas."""
num_replicas = (
tf.distribute.get_strategy().num_replicas_in_sync)
if num_replicas > 1:
loss_value *= (1. / num_replicas)
return loss_value
def cast_losses_to_common_dtype(losses):
"""Cast a list of losses to a common dtype.
If any loss is floating-point, they will all be casted to the most-precise
floating-point loss. Otherwise the losses are not casted. We also skip casting
losses if there are any complex losses.
Args:
losses: A list of losses.
Returns:
`losses`, but they have been casted to a common dtype.
"""
highest_float = None
for loss in losses:
if loss.dtype.is_floating:
if highest_float is None or loss.dtype.size > highest_float.size:
highest_float = loss.dtype
elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}:
highest_float = 'float32'
if loss.dtype.is_complex:
return losses # If we find any complex losses, do not cast any losses
if highest_float:
losses = [tf.cast(loss, highest_float) for loss in losses]
return losses
| 14,612 | 38.92623 | 106 | py |
keras | keras-master/keras/utils/multi_gpu_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for multi-gpu training."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine.training import Model
from keras.layers.core.lambda_layer import Lambda
from keras.layers.merge import concatenate
def _get_available_devices():
return [x.name for x in backend.get_session().list_devices()]
def _normalize_device_name(name):
name = '/' + name.lower().split('device:')[1]
return name
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
- Divide the model's input(s) into multiple sub-batches.
- Apply a model copy on each sub-batch. Every model copy
is executed on a dedicated GPU.
- Concatenate the results (on CPU) into one big batch.
E.g. if your `batch_size` is 64 and you use `gpus=2`,
then we will divide the input into 2 sub-batches of 32 samples,
process each sub-batch on one GPU, then return the full
batch of 64 processed samples.
This induces quasi-linear speedup on up to 8 GPUs.
This function is only available with the TensorFlow backend
for the time being.
Args:
model: A Keras model instance. To avoid OOM errors, this model could have
been built on CPU, for instance (see usage example below).
gpus: Integer >= 2, number of on GPUs on which to create model replicas.
cpu_merge: A boolean value to identify whether to force merging model
weights under the scope of the CPU or not.
cpu_relocation: A boolean value to identify whether to create the model's
weights under the scope of the CPU. If the model is not defined under
any preceding device scope, you can still rescue it by activating this
option.
Returns:
A Keras `Model` instance which can be used just like the initial
`model` argument, but which distributes its workload on multiple GPUs.
Example 1: Training models with weights merge on CPU
```python
import tensorflow as tf
from keras.applications import Xception
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
# Instantiate the base model (or "template" model).
# We recommend doing this with under a CPU device scope,
# so that the model's weights are hosted on CPU memory.
# Otherwise they may end up hosted on a GPU, which would
# complicate weight sharing.
with tf.device('/cpu:0'):
model = Xception(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on 8 GPUs.
# This assumes that your machine has 8 available GPUs.
parallel_model = multi_gpu_model(model, gpus=8)
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on 8 GPUs.
# Since the batch size is 256, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=256)
# Save model via the template model (which shares the same weights):
model.save('my_model.h5')
```
Example 2: Training models with weights merge on CPU using cpu_relocation
```python
..
# Not needed to change the device scope for model definition:
model = Xception(weights=None, ..)
try:
model = multi_gpu_model(model, cpu_relocation=True)
print("Training using multiple GPUs..")
except:
print("Training using single GPU or CPU..")
model.compile(..)
..
```
Example 3: Training models with weights merge on GPU (recommended for NV-link)
```python
..
# Not needed to change the device scope for model definition:
model = Xception(weights=None, ..)
try:
model = multi_gpu_model(model, cpu_merge=False)
print("Training using multiple GPUs..")
except:
print("Training using single GPU or CPU..")
model.compile(..)
..
```
Raises:
ValueError: if the `gpus` argument does not match available devices.
"""
if isinstance(gpus, (list, tuple)):
if len(gpus) <= 1:
raise ValueError('For multi-gpu usage to be effective, '
'call `multi_gpu_model` with `len(gpus) >= 2`. '
'Received: `gpus=%s`' % gpus)
num_gpus = len(gpus)
target_gpu_ids = gpus
else:
if gpus <= 1:
raise ValueError('For multi-gpu usage to be effective, '
'call `multi_gpu_model` with `gpus >= 2`. '
'Received: `gpus=%s`' % gpus)
num_gpus = gpus
target_gpu_ids = range(num_gpus)
target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in target_gpu_ids]
available_devices = _get_available_devices()
available_devices = [
_normalize_device_name(name) for name in available_devices
]
for device in target_devices:
if device not in available_devices:
raise ValueError('To call `multi_gpu_model` with `gpus=%s`, '
'we expect the following devices to be available: %s. '
'However this machine only has: %s. '
'Try reducing `gpus`.' %
(gpus, target_devices, available_devices))
def get_slice(data, i, parts):
"""Slice an array into `parts` slices and return slice `i`.
Args:
data: array to slice.
i: index of slice to return.
parts: number of slices to make.
Returns:
Slice `i` of `data`.
"""
shape = tf.shape(data)
batch_size = shape[:1]
input_shape = shape[1:]
step = batch_size // parts
if i == parts - 1:
size = batch_size - step * i
else:
size = step
size = tf.concat([size, input_shape], axis=0)
stride = tf.concat([step, input_shape * 0], axis=0)
start = stride * i
return tf.slice(data, start, size)
# Relocate the model definition under CPU device scope if needed
if cpu_relocation:
from keras.models import clone_model # pylint: disable=g-import-not-at-top
with tf.device('/cpu:0'):
model = clone_model(model)
all_outputs = [[] for _ in range(len(model.outputs))]
# Place a copy of the model on each GPU,
# each getting a slice of the inputs.
for i, gpu_id in enumerate(target_gpu_ids):
with tf.device('/gpu:%d' % gpu_id):
with backend.name_scope('replica_%d' % gpu_id):
inputs = []
# Retrieve a slice of the input.
for x in model.inputs:
input_shape = tuple(x.shape.as_list())[1:]
slice_i = Lambda(
get_slice,
output_shape=input_shape,
arguments={
'i': i,
'parts': num_gpus
})(
x)
inputs.append(slice_i)
# Apply model on slice
# (creating a model replica on the target device).
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later.
for o, output in enumerate(outputs):
all_outputs[o].append(output)
# Deduplicate output names to handle Siamese networks.
occurrences = {}
for n in model.output_names:
if n not in occurrences:
occurrences[n] = 1
else:
occurrences[n] += 1
conflict_counter = {n: 0 for n, count in occurrences.items() if count > 1}
output_names = []
for n in model.output_names:
if n in conflict_counter:
conflict_counter[n] += 1
n += '_%d' % conflict_counter[n]
output_names.append(n)
# Merge outputs under expected scope.
with tf.device('/cpu:0' if cpu_merge else '/gpu:%d' % target_gpu_ids[0]):
merged = []
for name, outputs in zip(output_names, all_outputs):
merged.append(concatenate(outputs, axis=0, name=name))
return Model(model.inputs, merged)
| 8,957 | 33.992188 | 80 | py |
keras | keras-master/keras/utils/mode_keys.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras model mode constants."""
# pylint: disable=unused-import
from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys as ModeKeys
# pylint: enable=unused-import
| 877 | 42.9 | 89 | py |
keras | keras-master/keras/utils/losses_utils_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_utils."""
import tensorflow.compat.v2 as tf
from keras import combinations
from keras.utils import losses_utils
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RemoveSqueezableTest(tf.test.TestCase):
"""Test remove_squeezable_dimensions"""
def test_ragged_3d_same_shape(self):
""" shape (2, (sequence={1, 2}), 3)"""
x = tf.ragged.constant([[[1, 2, 3]], [[4, 5, 6], [7, 8, 9]]])
rank = x.shape.ndims
x_p, _ = losses_utils.remove_squeezable_dimensions(x, x)
self.assertEqual(x_p.shape.ndims, rank)
def test_ragged_3d_4d_squeezable(self):
""" shapes:
x: (2, (sequence={1, 2}), 3)
y: (2, (sequence={1, 2}), 3, 1)
"""
x = tf.ragged.constant([[[1, 2, 3]], [[4, 5, 6], [7, 8, 9]]])
y = tf.expand_dims(x, axis=-1)
self.assertEqual(x.shape.ndims, 3)
self.assertEqual(y.shape.ndims, 4)
_, y_p = losses_utils.remove_squeezable_dimensions(x, y)
y_p.shape.assert_is_compatible_with(x.shape)
self.assertEqual(y_p.shape.ndims, 3)
x_p, _ = losses_utils.remove_squeezable_dimensions(y, x)
x_p.shape.assert_is_compatible_with(x.shape)
self.assertEqual(x_p.shape.ndims, 3)
def test_dense_2d_3d_squeezable(self):
x = tf.constant([[1, 2], [3, 4]])
y = tf.constant([[[1], [2]], [[3], [4]]])
_, y_p = losses_utils.remove_squeezable_dimensions(x, y)
y_p.shape.assert_is_compatible_with(x.shape)
self.assertEqual(y_p.shape.ndims, x.shape.ndims)
x_p, _ = losses_utils.remove_squeezable_dimensions(y, x)
x_p.shape.assert_is_compatible_with(x.shape)
class RemoveSqueezableTestGraphOnly(tf.test.TestCase):
"""Test remove_squeezable_dimensions (graph-mode only)."""
def test_placeholder(self):
"""Test dynamic rank tensors."""
with tf.Graph().as_default():
x = tf.compat.v1.placeholder_with_default([1., 2., 3.], shape=None)
y = tf.compat.v1.placeholder_with_default([[1.], [2.], [3.]], shape=None)
_, y_p = losses_utils.remove_squeezable_dimensions(x, y)
y_p.shape.assert_is_compatible_with(x.shape)
self.assertAllEqual(tf.shape(x), tf.shape(y_p))
x_p, _ = losses_utils.remove_squeezable_dimensions(y, x)
x_p.shape.assert_is_compatible_with(x.shape)
if __name__ == '__main__':
tf.test.main()
| 2,989 | 37.333333 | 80 | py |
keras | keras-master/keras/utils/kernelized_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods related to kernelized layers."""
import tensorflow.compat.v2 as tf
def _to_matrix(u):
"""If input tensor is a vector (i.e., has rank 1), converts it to matrix."""
u_rank = len(u.shape)
if u_rank not in [1, 2]:
raise ValueError('The input tensor should have rank 1 or 2. '
f'Received rank: {u_rank}')
if u_rank == 1:
return tf.expand_dims(u, 0)
return u
def _align_matrices(x, y):
"""Aligns x and y tensors to allow computations over pairs of their rows."""
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
'The outermost dimensions of the input tensors should match. '
f'Received y = {y_shape[1]} vs x = {x_shape[1]}.')
x_tile = tf.tile(
tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1])
y_tile = tf.tile(
tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])
return x_tile, y_tile
def inner_product(u, v):
u = _to_matrix(u)
v = _to_matrix(v)
return tf.matmul(u, v, transpose_b=True)
def exact_gaussian_kernel(x, y, stddev):
r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev.
The Gaussian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v||^2 / (2* stddev^2))
where the norm is the l2-norm. x, y can be either vectors or matrices. If they
are vectors, they must have the same dimension. If they are matrices, they
must have the same number of columns. In the latter case, the method returns
(as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix
of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_squared_l2_norm = tf.reduce_sum(
tf.math.squared_difference(x_aligned, y_aligned), 2)
return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev))
def exact_laplacian_kernel(x, y, stddev):
r"""Computes exact Laplacian kernel value(s) for tensors x and y using stddev.
The Laplacian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v|| / stddev)
where the norm is the l1-norm. x, y can be either vectors or matrices. If they
are vectors, they must have the same dimension. If they are matrices, they
must have the same number of columns. In the latter case, the method returns
(as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix
of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(
tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
return tf.exp(-diff_l1_norm / stddev)
| 4,387 | 37.831858 | 80 | py |
keras | keras-master/keras/utils/__init__.py | 0 | 0 | 0 | py | |
keras | keras-master/keras/utils/multi_gpu_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi-gpu training utilities."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import optimizer_v1
from keras.utils import multi_gpu_utils
from keras.utils import np_utils
def check_if_compatible_devices(gpus=2):
available_devices = [
keras.utils.multi_gpu_utils._normalize_device_name(name)
for name in keras.utils.multi_gpu_utils._get_available_devices()
]
if '/gpu:%d' % (gpus - 1) not in available_devices:
return False
return True
class TestMultiGPUModel(tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(TestMultiGPUModel, self).__init__(methodName)
gpu_devices = tf.config.list_physical_devices('GPU')
if len(gpu_devices) == 1:
# A GPU is available, simulate 2 instead.
tf.config.set_logical_device_configuration(gpu_devices[0], [
tf.config.LogicalDeviceConfiguration(500),
tf.config.LogicalDeviceConfiguration(500)
])
def test_multi_gpu_test_simple_model(self):
gpus = 2
num_samples = 1000
input_dim = 10
output_dim = 1
hidden_dim = 10
epochs = 2
target_gpu_id = [0, 1]
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(hidden_dim,
input_shape=(input_dim,)))
model.add(keras.layers.Dense(output_dim))
x = np.random.random((num_samples, input_dim))
y = np.random.random((num_samples, output_dim))
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=gpus)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit(x, y, epochs=epochs)
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=target_gpu_id)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit(x, y, epochs=epochs)
def test_multi_gpu_test_multi_io_model(self):
gpus = 2
num_samples = 1000
input_dim_a = 10
input_dim_b = 5
output_dim_a = 1
output_dim_b = 2
hidden_dim = 10
epochs = 2
target_gpu_id = [0, 1]
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
input_a = keras.Input((input_dim_a,))
input_b = keras.Input((input_dim_b,))
a = keras.layers.Dense(hidden_dim)(input_a)
b = keras.layers.Dense(hidden_dim)(input_b)
c = keras.layers.concatenate([a, b])
output_a = keras.layers.Dense(output_dim_a)(c)
output_b = keras.layers.Dense(output_dim_b)(c)
model = keras.models.Model([input_a, input_b], [output_a, output_b])
a_x = np.random.random((num_samples, input_dim_a))
b_x = np.random.random((num_samples, input_dim_b))
a_y = np.random.random((num_samples, output_dim_a))
b_y = np.random.random((num_samples, output_dim_b))
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=gpus)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=target_gpu_id)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
def test_multi_gpu_test_invalid_devices(self):
if not check_if_compatible_devices(gpus=2):
self.skipTest('multi gpu only')
with self.cached_session():
input_shape = (1000, 10)
model = keras.models.Sequential()
model.add(keras.layers.Dense(10,
activation='relu',
input_shape=input_shape[1:]))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss='mse', optimizer='rmsprop')
x = np.random.random(input_shape)
y = np.random.random((input_shape[0], 1))
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=len(keras.backend._get_available_gpus()) + 1)
parallel_model.fit(x, y, epochs=2)
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=[0, 2, 4, 6, 8])
parallel_model.fit(x, y, epochs=2)
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=1)
parallel_model.fit(x, y, epochs=2)
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=[0])
parallel_model.fit(x, y, epochs=2)
def test_nested_model_with_tensor_input(self):
gpus = 2
input_dim = 10
shape = (input_dim,)
num_samples = 16
num_classes = 10
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with tf.Graph().as_default(), self.cached_session():
input_shape = (num_samples,) + shape
x_train = np.random.randint(0, 255, input_shape)
y_train = np.random.randint(0, num_classes, (input_shape[0],))
y_train = np_utils.to_categorical(y_train, num_classes)
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
dataset = tf.compat.v1.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat()
dataset = dataset.batch(4)
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
inputs, targets = iterator.get_next()
input_tensor = keras.layers.Input(tensor=inputs)
model = keras.models.Sequential()
model.add(keras.layers.Dense(3,
input_shape=(input_dim,)))
model.add(keras.layers.Dense(num_classes))
output = model(input_tensor)
outer_model = keras.Model(input_tensor, output)
parallel_model = multi_gpu_utils.multi_gpu_model(outer_model, gpus=gpus)
parallel_model.compile(
loss='categorical_crossentropy',
optimizer=optimizer_v1.RMSprop(lr=0.0001, decay=1e-6),
metrics=['accuracy'],
target_tensors=[targets])
parallel_model.fit(epochs=1, steps_per_epoch=3)
def test_multi_gpu_with_multi_input_layers(self):
gpus = 2
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
inputs = keras.Input((4, 3))
init_state = keras.Input((3,))
outputs = keras.layers.SimpleRNN(
3, return_sequences=True)(inputs, initial_state=init_state)
x = [np.random.randn(2, 4, 3), np.random.randn(2, 3)]
y = np.random.randn(2, 4, 3)
model = keras.Model([inputs, init_state], outputs)
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=gpus)
parallel_model.compile(loss='mean_squared_error', optimizer='adam')
parallel_model.train_on_batch(x, y)
def test_multi_gpu_with_siamese_network(self):
gpus = 2
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
input_shape = (3,)
nested_model = keras.models.Sequential([
keras.layers.Dense(32, input_shape=input_shape),
keras.layers.Dense(1)
], name='nested')
input1 = keras.Input(input_shape)
input2 = keras.Input(input_shape)
score1 = nested_model(input1)
score2 = nested_model(input2)
score_sum = keras.layers.Add(name='add')([score1, score2])
siamese = keras.models.Model(inputs=[input1, input2],
outputs=[score_sum, score1, score2],
name='siamese')
parallel_siamese = multi_gpu_utils.multi_gpu_model(siamese, gpus)
self.assertEqual(parallel_siamese.output_names,
['add', 'nested', 'nested_1'])
if __name__ == '__main__':
tf.test.main()
| 8,678 | 35.313808 | 80 | py |
keras | keras-master/keras/utils/layer_utils_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer_utils."""
import tensorflow.compat.v2 as tf
import collections
import contextlib
import multiprocessing.dummy
import pickle
import time
import timeit
import numpy as np
from keras.utils import layer_utils
_PICKLEABLE_CALL_COUNT = collections.Counter()
class MyPickleableObject(tf.__internal__.tracking.AutoTrackable):
"""Needed for InterfaceTests.test_property_cache_serialization.
This class must be at the top level. This is a constraint of pickle,
unrelated to `cached_per_instance`.
"""
@property
@layer_utils.cached_per_instance
def my_id(self):
_PICKLEABLE_CALL_COUNT[self] += 1
return id(self)
class LayerUtilsTest(tf.test.TestCase):
def test_property_cache(self):
test_counter = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
def __init__(self):
super(MyObject, self).__init__()
self._frozen = True
def __setattr__(self, key, value):
"""Enforce that cache does not set attribute on MyObject."""
if getattr(self, "_frozen", False):
raise ValueError("Cannot mutate when frozen.")
return super(MyObject, self).__setattr__(key, value)
@property
@layer_utils.cached_per_instance
def test_property(self):
test_counter[id(self)] += 1
return id(self)
first_object = MyObject()
second_object = MyObject()
# Make sure the objects return the correct values
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Make sure the cache does not share across objects
self.assertNotEqual(first_object.test_property, second_object.test_property)
# Check again (Now the values should be cached.)
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Count the function calls to make sure the cache is actually being used.
self.assertAllEqual(tuple(test_counter.values()), (1, 1))
def test_property_cache_threaded(self):
call_count = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
@property
@layer_utils.cached_per_instance
def test_property(self):
# Random sleeps to ensure that the execution thread changes
# mid-computation.
call_count["test_property"] += 1
time.sleep(np.random.random() + 1.)
# Use a RandomState which is seeded off the instance's id (the mod is
# because numpy limits the range of seeds) to ensure that an instance
# returns the same value in different threads, but different instances
# return different values.
return int(np.random.RandomState(id(self) % (2 ** 31)).randint(2 ** 16))
def get_test_property(self, _):
"""Function provided to .map for threading test."""
return self.test_property
# Test that multiple threads return the same value. This requires that
# the underlying function is repeatable, as cached_property makes no attempt
# to prioritize the first call.
test_obj = MyObject()
with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:
# Intentionally make a large pool (even when there are only a small number
# of cpus) to ensure that the runtime switches threads.
results = pool.map(test_obj.get_test_property, range(64))
self.assertEqual(len(set(results)), 1)
# Make sure we actually are testing threaded behavior.
self.assertGreater(call_count["test_property"], 1)
# Make sure new threads still cache hit.
with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:
start_time = timeit.default_timer() # Don't time pool instantiation.
results = pool.map(test_obj.get_test_property, range(4))
total_time = timeit.default_timer() - start_time
# Note(taylorrobie): The reason that it is safe to time a unit test is that
# a cache hit will be << 1 second, and a cache miss is
# guaranteed to be >= 1 second. Empirically confirmed by
# 100,000 runs with no flakes.
self.assertLess(total_time, 0.95)
def test_property_cache_serialization(self):
# Reset call count. .keys() must be wrapped in a list, because otherwise we
# would mutate the iterator while iterating.
for k in list(_PICKLEABLE_CALL_COUNT.keys()):
_PICKLEABLE_CALL_COUNT.pop(k)
first_instance = MyPickleableObject()
self.assertEqual(id(first_instance), first_instance.my_id)
# Test that we can pickle and un-pickle
second_instance = pickle.loads(pickle.dumps(first_instance))
self.assertEqual(id(second_instance), second_instance.my_id)
self.assertNotEqual(first_instance.my_id, second_instance.my_id)
# Make sure de-serialized object uses the cache.
self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)
# Make sure the decorator cache is not being serialized with the object.
expected_size = len(pickle.dumps(second_instance))
for _ in range(5):
# Add some more entries to the cache.
_ = MyPickleableObject().my_id
self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)
size_check_instance = MyPickleableObject()
_ = size_check_instance.my_id
self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))
if __name__ == "__main__":
tf.test.main()
| 6,168 | 35.94012 | 80 | py |
keras | keras-master/keras/utils/version_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities for Keras classes with v1 and v2 versions."""
import tensorflow.compat.v2 as tf
from keras.utils.generic_utils import LazyLoader
# TODO(b/134426265): Switch back to single-quotes once the issue
# with copybara is fixed.
# pylint: disable=g-inconsistent-quotes
training = LazyLoader(
"training", globals(),
"keras.engine.training")
training_v1 = LazyLoader(
"training_v1", globals(),
"keras.engine.training_v1")
base_layer = LazyLoader(
"base_layer", globals(),
"keras.engine.base_layer")
base_layer_v1 = LazyLoader(
"base_layer_v1", globals(),
"keras.engine.base_layer_v1")
callbacks = LazyLoader(
"callbacks", globals(),
"keras.callbacks")
callbacks_v1 = LazyLoader(
"callbacks_v1", globals(),
"keras.callbacks_v1")
# pylint: enable=g-inconsistent-quotes
class ModelVersionSelector:
"""Chooses between Keras v1 and v2 Model class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
cls = swap_class(cls, training.Model, training_v1.Model, use_v2) # pylint: disable=self-cls-assignment
return super(ModelVersionSelector, cls).__new__(cls)
class LayerVersionSelector:
"""Chooses between Keras v1 and v2 Layer class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
cls = swap_class(cls, base_layer.Layer, base_layer_v1.Layer, use_v2) # pylint: disable=self-cls-assignment
return super(LayerVersionSelector, cls).__new__(cls)
class TensorBoardVersionSelector:
"""Chooses between Keras v1 and v2 TensorBoard callback class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
start_cls = cls
cls = swap_class(start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard,
use_v2)
if start_cls == callbacks_v1.TensorBoard and cls == callbacks.TensorBoard:
# Since the v2 class is not a subclass of the v1 class, __init__ has to
# be called manually.
return cls(*args, **kwargs)
return super(TensorBoardVersionSelector, cls).__new__(cls)
def should_use_v2():
"""Determine if v1 or v2 version should be used."""
if tf.executing_eagerly():
return True
elif tf.compat.v1.executing_eagerly_outside_functions():
# Check for a v1 `wrap_function` FuncGraph.
# Code inside a `wrap_function` is treated like v1 code.
graph = tf.compat.v1.get_default_graph()
if (getattr(graph, "name", False) and
graph.name.startswith("wrapped_function")):
return False
return True
else:
return False
def swap_class(cls, v2_cls, v1_cls, use_v2):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
return v2_cls if use_v2 else v1_cls
# Recursively search superclasses to swap in the right Keras class.
new_bases = []
for base in cls.__bases__:
if ((use_v2 and issubclass(base, v1_cls)
# `v1_cls` often extends `v2_cls`, so it may still call `swap_class`
# even if it doesn't need to. That being said, it may be the safest
# not to over optimize this logic for the sake of correctness,
# especially if we swap v1 & v2 classes that don't extend each other,
# or when the inheritance order is different.
or (not use_v2 and issubclass(base, v2_cls)))):
new_base = swap_class(base, v2_cls, v1_cls, use_v2)
else:
new_base = base
new_bases.append(new_base)
cls.__bases__ = tuple(new_bases)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not tf.compat.v1.executing_eagerly_outside_functions():
error_msg = (
f"Calling `{cls_name}.{method_name}` in graph mode is not supported "
f"when the `{cls_name}` instance was constructed with eager mode "
f"enabled. Please construct your `{cls_name}` instance in graph mode or"
f" call `{cls_name}.{method_name}` with eager mode enabled.")
raise ValueError(error_msg)
def is_v1_layer_or_model(obj):
return isinstance(obj, (base_layer_v1.Layer, training_v1.Model))
| 4,883 | 35.721805 | 111 | py |
keras | keras-master/keras/utils/tf_utils_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras TF utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras import combinations
from keras.utils import tf_utils
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestIsSymbolicTensor(tf.test.TestCase, parameterized.TestCase):
def test_default_behavior(self):
if tf.executing_eagerly():
self.assertFalse(tf_utils.is_symbolic_tensor(
tf.Variable(name='blah', initial_value=0.)))
self.assertFalse(
tf_utils.is_symbolic_tensor(
tf.convert_to_tensor(0.)))
self.assertFalse(tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
else:
self.assertTrue(tf_utils.is_symbolic_tensor(
tf.Variable(name='blah', initial_value=0.)))
self.assertTrue(
tf_utils.is_symbolic_tensor(
tf.convert_to_tensor(0.)))
self.assertTrue(tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
def test_works_with_registered(self):
class CustomClass:
def value(self):
return tf.convert_to_tensor(42.)
tf.register_tensor_conversion_function(
CustomClass, lambda value, **_: value.value())
tf_utils.register_symbolic_tensor_type(CustomClass)
if tf.executing_eagerly():
self.assertFalse(tf_utils.is_symbolic_tensor(
tf.Variable(name='blah', initial_value=0.)))
self.assertFalse(
tf_utils.is_symbolic_tensor(
tf.convert_to_tensor(0.)))
self.assertFalse(tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
else:
self.assertTrue(tf_utils.is_symbolic_tensor(
tf.Variable(name='blah', initial_value=0.)))
self.assertTrue(
tf_utils.is_symbolic_tensor(
tf.convert_to_tensor(0.)))
self.assertTrue(tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
def test_enables_nontensor_plumbing(self):
if tf.executing_eagerly():
self.skipTest('`compile` functionality changed.')
# Setup.
class Foo:
def __init__(self, input_):
self._input = input_
self.value = tf.convert_to_tensor([[42.]])
@property
def dtype(self):
return self.value.dtype
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value)
tf_utils.register_symbolic_tensor_type(Foo)
class PlumbingLayer(keras.layers.Lambda):
def __init__(self, fn, **kwargs):
def _fn(*fargs, **fkwargs):
d = fn(*fargs, **fkwargs)
x = tf.convert_to_tensor(d)
d.shape = x.shape
d.get_shape = x.get_shape
return d, x
super(PlumbingLayer, self).__init__(_fn, **kwargs)
self._enter_dunder_call = False
def __call__(self, inputs, *args, **kwargs):
self._enter_dunder_call = True
d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)
self._enter_dunder_call = False
return d
def call(self, inputs, *args, **kwargs):
d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
if self._enter_dunder_call:
return d, v
return d
# User-land.
model = keras.Sequential([
keras.layers.InputLayer((1,)),
PlumbingLayer(Foo), # Makes a `Foo` object.
])
# Let's ensure Keras graph history is preserved by composing the models.
model = keras.Model(model.inputs, model(model.outputs))
# Now we instantiate the model and verify we have a `Foo` object, not a
# `Tensor`.
y = model(tf.convert_to_tensor([[7.]]))
self.assertIsInstance(y, Foo)
# Confirm that (custom) loss sees `Foo` instance, not Tensor.
obtained_prediction_box = [None]
def custom_loss(y_obs, y_pred):
del y_obs
obtained_prediction_box[0] = y_pred
return y_pred
# Apparently `compile` calls the loss function enough to trigger the
# side-effect.
model.compile('SGD', loss=custom_loss)
self.assertIsInstance(obtained_prediction_box[0], Foo)
class ConvertInnerNodeDataTest(tf.test.TestCase):
def test_convert_inner_node_data(self):
data = tf_utils.convert_inner_node_data((tf_utils.ListWrapper(['l', 2, 3]),
tf_utils.ListWrapper(['l', 5, 6])))
self.assertEqual(data, (['l', 2, 3], ['l', 5, 6]))
data = tf_utils.convert_inner_node_data(((['l', 2, 3], ['l', 5, 6])),
wrap=True)
self.assertTrue(all(isinstance(ele, tf_utils.ListWrapper) for ele in data))
class AttrsTest(tf.test.TestCase):
def test_map_structure_with_atomic_accept_attr(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s(frozen=True)
class Foo:
bar = attr.ib()
self.assertEqual(
Foo(2),
tf_utils.map_structure_with_atomic(
is_atomic_fn=lambda x: isinstance(x, int),
map_fn=lambda x: x + 1,
nested=Foo(1)))
class TestIsRagged(tf.test.TestCase):
def test_is_ragged_return_true_for_ragged_tensor(self):
tensor = tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
self.assertTrue(tf_utils.is_ragged(tensor))
def test_is_ragged_return_false_for_list(self):
tensor = [1., 2., 3.]
self.assertFalse(tf_utils.is_ragged(tensor))
class TestIsSparse(tf.test.TestCase):
def test_is_sparse_return_true_for_sparse_tensor(self):
tensor = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
self.assertTrue(tf_utils.is_sparse(tensor))
def test_is_sparse_return_true_for_sparse_tensor_value(self):
tensor = tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
self.assertTrue(tf_utils.is_sparse(tensor))
def test_is_sparse_return_false_for_list(self):
tensor = [1., 2., 3.]
self.assertFalse(tf_utils.is_sparse(tensor))
class TestIsExtensionType(tf.test.TestCase):
def test_is_extension_type_return_true_for_ragged_tensor(self):
self.assertTrue(tf_utils.is_extension_type(
tf.ragged.constant([[1, 2], [3]])))
def test_is_extension_type_return_true_for_sparse_tensor(self):
self.assertTrue(tf_utils.is_extension_type(
tf.sparse.from_dense([[1, 2], [3, 4]])))
def test_is_extension_type_return_false_for_dense_tensor(self):
self.assertFalse(tf_utils.is_extension_type(
tf.constant([[1, 2], [3, 4]])))
def test_is_extension_type_return_false_for_list(self):
tensor = [1., 2., 3.]
self.assertFalse(tf_utils.is_extension_type(tensor))
if __name__ == '__main__':
tf.test.main()
| 7,924 | 33.012876 | 80 | py |
keras | keras-master/keras/utils/io_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities related to disk I/O."""
import os
def path_to_string(path):
"""Convert `PathLike` objects to their string representation.
If given a non-string typed path object, converts it to its string
representation.
If the object passed to `path` is not among the above, then it is
returned unchanged. This allows e.g. passthrough of file objects
through this function.
Args:
path: `PathLike` object that represents a path
Returns:
A string representation of the path argument, if Python support exists.
"""
if isinstance(path, os.PathLike):
return os.fspath(path)
return path
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath)).strip().lower()
while overwrite not in ('y', 'n'):
overwrite = input('Enter "y" (overwrite) or "n" '
'(cancel).').strip().lower()
if overwrite == 'n':
return False
print('[TIP] Next time specify overwrite=True!')
return True
| 1,964 | 31.75 | 80 | py |
keras | keras-master/keras/feature_column/sequence_feature_column_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import combinations
from keras.feature_column import sequence_feature_column as ksfc
from keras.saving import model_config
def _initialized_session(config=None):
sess = tf.compat.v1.Session(config=config)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
return sess
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SequenceFeaturesTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = tf.compat.v1.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = tf.compat.v1.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(tf.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = tf.feature_column.embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = tf.feature_column.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = tf.feature_column.embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
# Test that columns are reordered alphabetically.
sequence_input_layer = ksfc.SequenceFeatures(
[embedding_column_b, embedding_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b,})
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = sequence_input_layer.weights
self.assertCountEqual(
('sequence_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in weights]))
self.assertAllEqual(embedding_values_a, self.evaluate(weights[0]))
self.assertAllEqual(embedding_values_b, self.evaluate(weights[1]))
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = tf.feature_column.embedding_column(
categorical_column_a, dimension=2)
sequence_input_layer = ksfc.SequenceFeatures([embedding_column_a])
with self.assertRaisesRegex(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
_, _ = sequence_input_layer({'aaa': sparse_input})
def test_shared_embedding_column(self):
with tf.Graph().as_default():
vocabulary_size = 3
sparse_input_a = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = tf.compat.v1.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(tf.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
expected_input_layer = [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 3., 4.], [0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 5., 6.], [3., 4., 1., 2.]],
]
expected_sequence_length = [1, 2]
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = tf.feature_column.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
# Test that columns are reordered alphabetically.
shared_embedding_columns = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
initializer=_get_initializer(embedding_dimension, embedding_values))
sequence_input_layer = ksfc.SequenceFeatures(shared_embedding_columns)
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
global_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
with _initialized_session() as sess:
self.assertAllEqual(embedding_values,
global_vars[0].eval(session=sess))
self.assertAllEqual(expected_input_layer,
input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_shared_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence shared embedding column."""
with tf.Graph().as_default():
vocabulary_size = 3
sparse_input_a = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = tf.feature_column.shared_embeddings(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_input_layer = ksfc.SequenceFeatures(shared_embedding_columns)
with self.assertRaisesRegex(
ValueError,
r'In embedding_column: aaa_shared_embedding\. '
r'categorical_column must '
r'be of type SequenceCategoricalColumn to use SequenceFeatures\.'):
_, _ = sequence_input_layer({'aaa': sparse_input_a,
'bbb': sparse_input_b})
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 1, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [1, 0]
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 1, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
# feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -]
[[2., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [2, 2]},
)
def test_indicator_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = tf.compat.v1.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = tf.compat.v1.SparseTensorValue(**sparse_input_args_b)
vocabulary_size_a = 3
vocabulary_size_b = 2
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size_a)
indicator_column_a = tf.feature_column.indicator_column(categorical_column_a)
categorical_column_b = tf.feature_column.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size_b)
indicator_column_b = tf.feature_column.indicator_column(categorical_column_b)
# Test that columns are reordered alphabetically.
sequence_input_layer = ksfc.SequenceFeatures(
[indicator_column_b, indicator_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
def test_indicator_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence categorical column."""
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = tf.feature_column.indicator_column(categorical_column_a)
sequence_input_layer = ksfc.SequenceFeatures([indicator_column_a])
with self.assertRaisesRegex(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
_, _ = sequence_input_layer({'aaa': sparse_input})
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20., 3., 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
def test_numeric_column(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args)
numeric_column = tf.feature_column.sequence_numeric_column('aaa')
sequence_input_layer = ksfc.SequenceFeatures([numeric_column])
input_layer, sequence_length = sequence_input_layer({'aaa': sparse_input})
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
)
def test_numeric_column_multi_dim(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests SequenceFeatures for multi-dimensional numeric_column."""
sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args)
numeric_column = tf.feature_column.sequence_numeric_column('aaa', shape=(2, 2))
sequence_input_layer = ksfc.SequenceFeatures([numeric_column])
input_layer, sequence_length = sequence_input_layer({'aaa': sparse_input})
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
def test_sequence_length_not_equal(self):
"""Tests that an error is raised when sequence lengths are not equal."""
# Input a with sequence_length = [2, 1]
sparse_input_a = tf.compat.v1.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Input b with sequence_length = [1, 1]
sparse_input_b = tf.compat.v1.SparseTensorValue(
indices=((0, 0), (1, 0)),
values=(1., 10.),
dense_shape=(2, 2))
numeric_column_a = tf.feature_column.sequence_numeric_column('aaa')
numeric_column_b = tf.feature_column.sequence_numeric_column('bbb')
sequence_input_layer = ksfc.SequenceFeatures(
[numeric_column_a, numeric_column_b])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
r'Condition x == y did not hold.*'):
_, sequence_length = sequence_input_layer({
'aaa': sparse_input_a,
'bbb': sparse_input_b
})
self.evaluate(sequence_length)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
def test_static_shape_from_tensors_numeric(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args)
numeric_column = tf.feature_column.sequence_numeric_column('aaa', shape=(2, 2))
sequence_input_layer = ksfc.SequenceFeatures([numeric_column])
input_layer, _ = sequence_input_layer({'aaa': sparse_input})
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
def test_static_shape_from_tensors_indicator(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = tf.compat.v1.SparseTensorValue(**sparse_input_args)
categorical_column = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = tf.feature_column.indicator_column(categorical_column)
sequence_input_layer = ksfc.SequenceFeatures([indicator_column])
input_layer, _ = sequence_input_layer({'aaa': sparse_input})
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
def test_compute_output_shape(self):
price1 = tf.feature_column.sequence_numeric_column('price1', shape=2)
price2 = tf.feature_column.sequence_numeric_column('price2')
features = {
'price1': tf.SparseTensor(
indices=[[0, 0, 0], [0, 0, 1],
[0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1],
[2, 0, 0], [2, 0, 1],
[3, 0, 0], [3, 0, 1]],
values=[0., 1., 10., 11., 100., 101., 200., 201., 300., 301.],
dense_shape=(4, 3, 2)),
'price2': tf.SparseTensor(
indices=[[0, 0],
[0, 1],
[1, 0],
[2, 0],
[3, 0]],
values=[10., 11., 20., 30., 40.],
dense_shape=(4, 3))}
sequence_features = ksfc.SequenceFeatures([price1, price2])
seq_input, seq_len = sequence_features(features)
self.assertEqual(
sequence_features.compute_output_shape((None, None)),
(None, None, 3))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[[0., 1., 10.], [10., 11., 11.], [0., 0., 0.]],
[[100., 101., 20.], [0., 0., 0.], [0., 0., 0.]],
[[200., 201., 30.], [0., 0., 0.], [0., 0., 0.]],
[[300., 301., 40.], [0., 0., 0.], [0., 0., 0.]]],
self.evaluate(seq_input))
self.assertAllClose([2, 1, 1, 1], self.evaluate(seq_len))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SequenceFeaturesSerializationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_get_config(self, trainable, name):
cols = [tf.feature_column.sequence_numeric_column('a')]
orig_layer = ksfc.SequenceFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
self.assertEqual(config['name'], orig_layer.name)
self.assertEqual(config['trainable'], trainable)
self.assertLen(config['feature_columns'], 1)
self.assertEqual(config['feature_columns'][0]['class_name'],
'SequenceNumericColumn')
self.assertEqual(config['feature_columns'][0]['config']['shape'], (1,))
@parameterized.named_parameters(('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_from_config(self, trainable, name):
cols = [tf.feature_column.sequence_numeric_column('a')]
orig_layer = ksfc.SequenceFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
new_layer = ksfc.SequenceFeatures.from_config(config)
self.assertEqual(new_layer.name, orig_layer.name)
self.assertEqual(new_layer.trainable, trainable)
self.assertLen(new_layer._feature_columns, 1)
self.assertEqual(new_layer._feature_columns[0].name, 'a')
def test_serialization_sequence_features(self):
rating = tf.feature_column.sequence_numeric_column('rating')
sequence_feature = ksfc.SequenceFeatures([rating])
config = keras.layers.serialize(sequence_feature)
revived = keras.layers.deserialize(config)
self.assertIsInstance(revived, ksfc.SequenceFeatures)
class SequenceFeaturesSavingTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_saving_with_sequence_features(self):
cols = [
tf.feature_column.sequence_numeric_column('a'),
tf.feature_column.indicator_column(
tf.feature_column.sequence_categorical_column_with_vocabulary_list(
'b', ['one', 'two']))
]
input_layers = {
'a':
keras.layers.Input(shape=(None, 1), sparse=True, name='a'),
'b':
keras.layers.Input(
shape=(None, 1), sparse=True, name='b', dtype='string')
}
fc_layer, _ = ksfc.SequenceFeatures(cols)(input_layers)
# TODO(tibell): Figure out the right dtype and apply masking.
# sequence_length_mask = array_ops.sequence_mask(sequence_length)
# x = keras.layers.GRU(32)(fc_layer, mask=sequence_length_mask)
x = keras.layers.GRU(32)(fc_layer)
output = keras.layers.Dense(10)(x)
model = keras.models.Model(input_layers, output)
model.compile(
loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
config = model.to_json()
loaded_model = model_config.model_from_json(config)
batch_size = 10
timesteps = 1
values_a = np.arange(10, dtype=np.float32)
indices_a = np.zeros((10, 3), dtype=np.int64)
indices_a[:, 0] = np.arange(10)
inputs_a = tf.SparseTensor(indices_a, values_a,
(batch_size, timesteps, 1))
values_b = np.zeros(10, dtype=np.str)
indices_b = np.zeros((10, 3), dtype=np.int64)
indices_b[:, 0] = np.arange(10)
inputs_b = tf.SparseTensor(indices_b, values_b,
(batch_size, timesteps, 1))
with self.cached_session():
# Initialize tables for V1 lookup.
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.tables_initializer())
self.assertLen(
loaded_model.predict({
'a': inputs_a,
'b': inputs_b
}, steps=1), batch_size)
if __name__ == '__main__':
tf.test.main()
| 27,955 | 40.913043 | 89 | py |
keras | keras-master/keras/feature_column/dense_features_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import test_util
from keras import combinations
from keras import keras_parameterized
from keras.feature_column import dense_features as df
def _initialized_session(config=None):
sess = tf.compat.v1.Session(config=config)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
return sess
class DenseFeaturesTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_retrieving_input(self):
features = {'a': [0.]}
dense_features = df.DenseFeatures(tf.feature_column.numeric_column('a'))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.]], inputs)
@combinations.generate(combinations.combine(mode=['eager']))
def test_reuses_variables(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
@combinations.generate(combinations.combine(mode=['eager']))
def test_dense_feature_with_partitioner(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0), (3, 0)),
values=(0, 1, 3, 2),
dense_shape=(4, 4))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=4)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
offset = partition_info._var_offset[0]
del shape # unused
del dtype # unused
if offset == 0:
embedding_values = (
(1, 0), # id 0
(0, 1)) # id 1
else:
embedding_values = (
(1, 1), # id 2
(2, 2)) # id 3
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures(
[embedding_column], partitioner=tf.compat.v1.fixed_size_partitioner(2))
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [2, 2], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(2, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(2, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
self.assertIs(variables[1], dense_features.variables[1])
@combinations.generate(combinations.combine(mode=['eager']))
def test_feature_column_dense_features_gradient(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
df.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegex(ValueError, 'must be a .*DenseColumn'):
df.DenseFeatures(feature_columns=[
tf.feature_column.categorical_column_with_hash_bucket('wire_cast', 4)
])(
features={
'a': [[0]]
})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
df.DenseFeatures(
feature_columns={'a': tf.feature_column.numeric_column('a')})(
features={
'a': [[0]]
})
def test_bare_column(self):
with tf.Graph().as_default():
features = features = {'a': [0.]}
net = df.DenseFeatures(tf.feature_column.numeric_column('a'))(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with tf.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (tf.feature_column.numeric_column(key) for key in features)
net = df.DenseFeatures(columns)(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, 'Duplicate feature column name found for columns'):
df.DenseFeatures(feature_columns=[
tf.feature_column.numeric_column('a'),
tf.feature_column.numeric_column('a')
])(
features={
'a': [[0]]
})
def test_one_column(self):
price = tf.feature_column.numeric_column('price')
with tf.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = tf.feature_column.numeric_column('price', shape=2)
with tf.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_compute_output_shape(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2', shape=4)
with tf.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]]
}
dense_features = df.DenseFeatures([price1, price2])
self.assertEqual((None, 6), dense_features.compute_output_shape((None,)))
net = dense_features(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]],
self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = tf.feature_column.numeric_column('price', shape=2)
with tf.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegex(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
df.DenseFeatures([price])(features)
def test_reshaping(self):
price = tf.feature_column.numeric_column('price', shape=[1, 2])
with tf.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
net = df.DenseFeatures([price1, price2])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_cols_to_output_tensors(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
cols_dict = {}
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
dense_features = df.DenseFeatures([price1, price2])
net = dense_features(features, cols_dict)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]],
self.evaluate(cols_dict[price1]))
self.assertAllClose([[3.], [4.]], self.evaluate(cols_dict[price2]))
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_column_order(self):
price_a = tf.feature_column.numeric_column('price_a')
price_b = tf.feature_column.numeric_column('price_b')
with tf.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = df.DenseFeatures([price_a, price_b])(features)
net2 = df.DenseFeatures([price_b, price_a])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = tf.feature_column.categorical_column_with_identity(
'animal', num_buckets=4)
with tf.Graph().as_default():
features = {
'animal':
tf.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegex(Exception, 'must be a .*DenseColumn'):
df.DenseFeatures([animal])(features)
def test_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
price3 = tf.feature_column.numeric_column('price3')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
with self.assertRaisesRegex(tf.errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 2
'price2': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = tf.feature_column.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = tf.feature_column.embedding_column(
some_sparse_column, dimension=10)
with tf.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that 2 variables get created in this case.
self.assertEqual(
2,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'dense_features/sparse_feature_embedding/embedding_weights:0',
'dense_features_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertCountEqual(expected_var_names, [
v.name for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
])
@test_util.run_deprecated_v1
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
with tf.Graph().as_default():
features = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
self.assertCountEqual(['aaa_bbb_shared_embedding:0'], [
v.name for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
])
@test_util.run_deprecated_v1
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
with tf.Graph().as_default():
features = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
with tf.Graph().as_default():
features1 = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features1)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
self.assertCountEqual(['aaa_bbb_shared_embedding:0'], [
v.name for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
])
@test_util.run_deprecated_v1
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = tf.feature_column.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price':
tf.constant([
11.,
12.,
]),
'body-style':
tf.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country':
tf.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
@test_util.run_deprecated_v1
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = tf.feature_column.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': tf.compat.v1.placeholder(tf.float32),
'body-style': tf.compat.v1.sparse_placeholder(tf.string),
# This is dense tensor for the categorical_column.
'country': tf.compat.v1.placeholder(tf.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = tf.compat.v1.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
@test_util.run_deprecated_v1
def test_with_rank_0_feature(self):
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
features = {
'price': tf.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
df.DenseFeatures([price])(features)
# Dynamic rank 0 should fail
features = {
'price': tf.compat.v1.placeholder(tf.float32),
}
net = df.DenseFeatures([price])(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class IndicatorColumnTest(tf.test.TestCase):
@test_util.run_deprecated_v1
def test_dense_features(self):
animal = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity(
'animal', num_buckets=4))
with tf.Graph().as_default():
features = {
'animal':
tf.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = df.DenseFeatures([animal])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
class EmbeddingColumnTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True,
'partition_variables': False,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False,
'partition_variables': False,
}, {
'testcase_name': 'use_safe_embedding_lookup_partitioned',
'use_safe_embedding_lookup': True,
'partition_variables': True,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup_partitioned',
'use_safe_embedding_lookup': False,
'partition_variables': True,
})
@test_util.run_deprecated_v1
def test_dense_features(self, use_safe_embedding_lookup, partition_variables):
# Inputs.
vocabulary_size = 4
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(9., 13.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
if partition_variables:
self.assertEqual([vocabulary_size, embedding_dimension],
partition_info.full_shape)
self.assertAllEqual((2, embedding_dimension), shape)
else:
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertIsNone(partition_info)
self.assertEqual(tf.float32, dtype)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
partitioner = None
if partition_variables:
partitioner = tf.compat.v1.fixed_size_partitioner(2, axis=0)
with tf.compat.v1.variable_scope('vars', partitioner=partitioner):
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
l = df.DenseFeatures((embedding_column,))
dense_features = l({'aaa': sparse_input})
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
if partition_variables:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights/part_0:0',
'vars/dense_features/aaa_embedding/embedding_weights/part_1:0'),
tuple([v.name for v in global_vars]))
else:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertIsInstance(v, tf.Variable)
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
if partition_variables:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights/part_0:0',
'vars/dense_features/aaa_embedding/embedding_weights/part_1:0'),
tuple([v.name for v in trainable_vars]))
else:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0]))
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
if use_safe_embedding_lookup:
self.assertIn(
'SparseFillEmptyRows',
[x.type for x in tf.compat.v1.get_default_graph().get_operations()])
else:
self.assertNotIn(
'SparseFillEmptyRows',
[x.type for x in tf.compat.v1.get_default_graph().get_operations()])
@test_util.run_deprecated_v1
def test_dense_features_not_trainable(self):
# Inputs.
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(tf.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
trainable=False)
# Provide sparse input and get dense result.
dense_features = df.DenseFeatures((embedding_column,))({
'aaa': sparse_input
})
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertCountEqual([],
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values, self.evaluate(global_vars[0]))
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
class SharedEmbeddingColumnTest(tf.test.TestCase, parameterized.TestCase):
def _test_dense_features(self, trainable=True):
# Inputs.
vocabulary_size = 3
sparse_input_a = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 4)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_b = tf.compat.v1.SparseTensorValue(
# example 0, ids [0]
# example 1, ids []
indices=((0, 0),),
values=(0,),
dense_shape=(2, 5))
sparse_input_c = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 1), (1, 1), (1, 3)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_d = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
indices=((0, 1),),
values=(2,),
dense_shape=(2, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(tf.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0:
# A ids [2], embedding = [7, 11]
# B ids [0], embedding = [1, 2]
# C ids [2], embedding = [7, 11]
# D ids [2], embedding = [7, 11]
(7., 11., 1., 2., 7., 11., 7., 11.),
# example 1:
# A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# B ids [], embedding = [0, 0]
# C ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# D ids [], embedding = [0, 0]
(2., 3.5, 0., 0., 2., 3.5, 0., 0.),
)
# Build columns.
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
categorical_column_c = tf.feature_column.categorical_column_with_identity(
key='ccc', num_buckets=vocabulary_size)
categorical_column_d = tf.feature_column.categorical_column_with_identity(
key='ddd', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tf.feature_column.shared_embeddings(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
embedding_column_c, embedding_column_d = tf.feature_column.shared_embeddings(
[categorical_column_c, categorical_column_d],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
features = {
'aaa': sparse_input_a,
'bbb': sparse_input_b,
'ccc': sparse_input_c,
'ddd': sparse_input_d
}
# Provide sparse input and get dense result.
dense_features = df.DenseFeatures(
feature_columns=(embedding_column_b, embedding_column_a,
embedding_column_c, embedding_column_d))(
features)
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'],
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertIsInstance(v, tf.Variable)
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
if trainable:
self.assertCountEqual(
['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'],
tuple([v.name for v in trainable_vars]))
else:
self.assertCountEqual([], tuple([v.name for v in trainable_vars]))
shared_embedding_vars = global_vars
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values,
self.evaluate(shared_embedding_vars[0]))
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
@test_util.run_deprecated_v1
def test_dense_features(self):
self._test_dense_features()
@test_util.run_deprecated_v1
def test_dense_features_no_trainable(self):
self._test_dense_features(trainable=False)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DenseFeaturesSerializationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_get_config(self, trainable, name):
cols = [
tf.feature_column.numeric_column('a'),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='b', num_buckets=3),
dimension=2)
]
orig_layer = df.DenseFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
self.assertEqual(config['name'], orig_layer.name)
self.assertEqual(config['trainable'], trainable)
self.assertLen(config['feature_columns'], 2)
self.assertEqual(config['feature_columns'][0]['class_name'],
'NumericColumn')
self.assertEqual(config['feature_columns'][0]['config']['shape'], (1,))
self.assertEqual(config['feature_columns'][1]['class_name'],
'EmbeddingColumn')
@parameterized.named_parameters(('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_from_config(self, trainable, name):
cols = [
tf.feature_column.numeric_column('a'),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'b', vocabulary_list=['1', '2', '3']),
dimension=2),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_hash_bucket(
key='c', hash_bucket_size=3))
]
orig_layer = df.DenseFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
new_layer = df.DenseFeatures.from_config(config)
self.assertEqual(new_layer.name, orig_layer.name)
self.assertEqual(new_layer.trainable, trainable)
self.assertLen(new_layer._feature_columns, 3)
self.assertEqual(new_layer._feature_columns[0].name, 'a')
self.assertEqual(new_layer._feature_columns[1].initializer.mean, 0.0)
self.assertEqual(new_layer._feature_columns[1].categorical_column.name, 'b')
self.assertIsInstance(new_layer._feature_columns[0], cols[0].__class__)
self.assertIsInstance(new_layer._feature_columns[1], cols[1].__class__)
self.assertIsInstance(new_layer._feature_columns[2], cols[2].__class__)
def test_crossed_column(self):
a = tf.feature_column.categorical_column_with_vocabulary_list(
'a', vocabulary_list=['1', '2', '3'])
b = tf.feature_column.categorical_column_with_vocabulary_list(
'b', vocabulary_list=['1', '2', '3'])
ab = tf.feature_column.crossed_column([a, b], hash_bucket_size=2)
cols = [tf.feature_column.indicator_column(ab)]
orig_layer = df.DenseFeatures(cols)
config = orig_layer.get_config()
new_layer = df.DenseFeatures.from_config(config)
self.assertLen(new_layer._feature_columns, 1)
self.assertEqual(new_layer._feature_columns[0].name, 'a_X_b_indicator')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SequenceFeatureColumnsTest(tf.test.TestCase):
"""Tests DenseFeatures with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = tf.feature_column.embedding_column(
categorical_column_a, dimension=2)
input_layer = df.DenseFeatures([embedding_column_a])
with self.assertRaisesRegex(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
_ = input_layer({'aaa': sparse_input})
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = tf.feature_column.indicator_column(
categorical_column_a)
input_layer = df.DenseFeatures([indicator_column_a])
with self.assertRaisesRegex(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
_ = input_layer({'aaa': sparse_input})
if __name__ == '__main__':
tf.test.main()
| 43,338 | 36.817627 | 124 | py |
keras | keras-master/keras/feature_column/dense_features_v2.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.feature_column import base_feature_layer as kfc
from keras.feature_column import dense_features
from keras.utils import tf_contextlib
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.DenseFeatures', v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords", 10K),
dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self,
feature_columns,
trainable=True,
name=None,
**kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super(DenseFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
# pylint: disable=protected-access
super(kfc._BaseFeaturesLayer, self).build(None) # pylint: disable=bad-super-call
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager): # pylint: disable=protected-access
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
"""A context that disables manual dependency tracking for the given `obj`.
Sometimes library methods might track objects on their own and we might want
to disable that and do the tracking on our own. One can then use this context
manager to disable the tracking the library method does and do your own
tracking.
For example:
class TestLayer(tf.keras.Layer):
def build():
with no_manual_dependency_tracking_scope(self):
var = self.add_variable("name1") # Creates a var and doesn't track it
self._track_trackable("name2", var) # We track variable with name `name2`
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies manually.
"""
# pylint: disable=protected-access
previous_value = getattr(obj, '_manual_tracking', True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
| 6,123 | 37.515723 | 109 | py |
keras | keras-master/keras/feature_column/sequence_feature_column_integration_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import test_util
from keras import backend
from keras.feature_column import dense_features
from keras.feature_column import sequence_feature_column as ksfc
from keras.layers import merge
from keras.layers import recurrent
class SequenceFeatureColumnIntegrationTest(tf.test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([tf.compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = tf.feature_column.categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
tf.feature_column.embedding_column(col, dimension=10),
tf.feature_column.numeric_column('float_ctx')
]
identity_col = tf.feature_column.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = tf.feature_column.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
tf.feature_column.embedding_column(identity_col, dimension=10),
tf.feature_column.embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = tf.io.parse_single_sequence_example(
example,
context_features=tf.feature_column.make_parse_example_spec(ctx_cols),
sequence_features=tf.feature_column.make_parse_example_spec(seq_cols))
ctx.update(seq)
return ctx
ds = tf.data.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()
# Tile the context features across the sequence features
sequence_input_layer = ksfc.SequenceFeatures(seq_cols)
seq_input, _ = sequence_input_layer(features)
dense_input_layer = dense_features.DenseFeatures(ctx_cols)
ctx_input = dense_input_layer(features)
ctx_input = backend.repeat(ctx_input, tf.shape(seq_input)[1])
concatenated_input = merge.concatenate([seq_input, ctx_input])
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(concatenated_input)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
@test_util.run_deprecated_v1
def test_shared_sequence_non_sequence_into_input_layer(self):
non_seq = tf.feature_column.categorical_column_with_identity('non_seq',
num_buckets=10)
seq = tf.feature_column.sequence_categorical_column_with_identity('seq',
num_buckets=10)
shared_non_seq, shared_seq = tf.feature_column.shared_embeddings(
[non_seq, seq],
dimension=4,
combiner='sum',
initializer=tf.ones_initializer(),
shared_embedding_collection_name='shared')
seq = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2])
non_seq = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2])
features = {'seq': seq, 'non_seq': non_seq}
# Tile the context features across the sequence features
seq_input, seq_length = ksfc.SequenceFeatures([shared_seq])(features)
non_seq_input = dense_features.DenseFeatures([shared_non_seq])(features)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_seq, output_seq_length, output_non_seq = sess.run(
[seq_input, seq_length, non_seq_input])
self.assertAllEqual(output_seq, [[[1, 1, 1, 1], [1, 1, 1, 1]],
[[1, 1, 1, 1], [0, 0, 0, 0]]])
self.assertAllEqual(output_seq_length, [2, 1])
self.assertAllEqual(output_non_seq, [[2, 2, 2, 2], [1, 1, 1, 1]])
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
tf.test.main()
| 7,505 | 28.320313 | 88 | py |
keras | keras-master/keras/feature_column/base_feature_layer.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction."""
# This file was originally under tf/python/feature_column, and was moved to
# Keras package in order to remove the reverse dependency from TF to Keras.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import collections
import re
from keras.engine.base_layer import Layer
from keras.utils import generic_utils
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self,
feature_columns,
expected_column_type,
trainable,
name,
partitioner=None,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(
feature_columns)
self._state_manager = tf.__internal__.feature_column.StateManager( # pylint: disable=protected-access
self, self.trainable)
self._partitioner = partitioner
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with tf.compat.v1.variable_scope(
self.name, partitioner=self._partitioner):
with tf.compat.v1.variable_scope(
_sanitize_column_name_for_variable_scope(column.name)):
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor.
"""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(tf.shape(tensor), num_elements)
return tf.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return tf.concat(output_tensors, -1)
def get_config(self):
column_configs = [tf.__internal__.feature_column.serialize_feature_column(fc)
for fc in self._feature_columns]
config = {'feature_columns': column_configs}
config['partitioner'] = generic_utils.serialize_keras_object(
self._partitioner)
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config_cp = config.copy()
columns_by_name = {}
config_cp['feature_columns'] = [tf.__internal__.feature_column.deserialize_feature_column(
c, custom_objects, columns_by_name) for c in config['feature_columns']]
config_cp['partitioner'] = generic_utils.deserialize_keras_object(
config['partitioner'], custom_objects)
return cls(**config_cp)
def _sanitize_column_name_for_variable_scope(name):
"""Sanitizes user-provided feature names for use as variable scopes."""
invalid_char = re.compile('[^A-Za-z0-9_.\\-]')
return invalid_char.sub('_', name)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
expected_batch_size = None
for i in range(0, len(tensors)):
# bath_size is a Dimension object.
batch_size = tf.compat.v1.Dimension(tf.compat.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, tf.__internal__.feature_column.FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, tf.__internal__.feature_column.FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
| 8,323 | 37.183486 | 106 | py |
keras | keras-master/keras/feature_column/sequence_feature_column.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras import backend
from keras.feature_column import base_feature_layer as kfc
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=protected-access
@keras_export('keras.experimental.SequenceFeatures')
class SequenceFeatures(kfc._BaseFeaturesLayer):
"""A layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
import tensorflow as tf
# Behavior of some cells or feature columns may depend on whether we are in
# training or inference mode, e.g. applying dropout.
training = True
rating = tf.feature_column.sequence_numeric_column('rating')
watches = tf.feature_column.sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = tf.feature_column.embedding_column(watches,
dimension=10)
columns = [rating, watches_embedding]
features = {
'rating': tf.sparse.from_dense([[1.0,1.1, 0, 0, 0],
[2.0,2.1,2.2, 2.3, 2.5]]),
'watches': tf.sparse.from_dense([[2, 85, 0, 0, 0],[33,78, 2, 73, 1]])
}
sequence_input_layer = tf.keras.experimental.SequenceFeatures(columns)
sequence_input, sequence_length = sequence_input_layer(
features, training=training)
sequence_length_mask = tf.sequence_mask(sequence_length)
hidden_size = 32
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
"""
def __init__(
self,
feature_columns,
trainable=True,
name=None,
**kwargs):
""""Constructs a SequenceFeatures layer.
Args:
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the SequenceFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: If any of the `feature_columns` is not a
`SequenceDenseColumn`.
"""
super(SequenceFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=tf.__internal__.feature_column.SequenceDenseColumn,
**kwargs)
@property
def _is_feature_layer(self):
return True
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], input_shape[1], total_elements)
def call(self, features, training=None):
"""Returns sequence input corresponding to the `feature_columns`.
Args:
features: A dict mapping keys to tensors.
training: Python boolean or None, indicating whether to the layer is being
run in training mode. This argument is passed to the call method of any
`FeatureColumn` that takes a `training` argument. For example, if a
`FeatureColumn` performed dropout, the column could expose a `training`
argument to control whether the dropout should be applied. If `None`,
defaults to `tf.keras.backend.learning_phase()`.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If features are not a dictionary.
"""
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: ',
features)
if training is None:
training = backend.learning_phase()
transformation_cache = tf.__internal__.feature_column.FeatureTransformationCache(features)
output_tensors = []
sequence_lengths = []
for column in self._feature_columns:
with backend.name_scope(column.name):
try:
dense_tensor, sequence_length = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager, training=training)
except TypeError:
dense_tensor, sequence_length = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager)
# Flattens the final dimension to produce a 3D Tensor.
output_tensors.append(self._process_dense_tensor(column, dense_tensor))
sequence_lengths.append(sequence_length)
# Check and process sequence lengths.
kfc._verify_static_batch_size_equality( # pylint: disable=protected-access
sequence_lengths, self._feature_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return self._verify_and_concat_tensors(output_tensors), sequence_length
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with backend.name_scope(name or 'assert_all_equal'):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(tf.compat.v1.assert_equal(tensors[0], t))
with tf.control_dependencies(assert_equal_ops):
return tf.identity(tensors[0])
| 7,029 | 38.055556 | 94 | py |
keras | keras-master/keras/feature_column/__init__.py | 0 | 0 | 0 | py | |
keras | keras-master/keras/feature_column/dense_features_v2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_features_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import numpy as np
from tensorflow.python.eager import backprop
from keras import combinations
from keras import keras_parameterized
from keras.feature_column import dense_features_v2 as df
def _initialized_session(config=None):
sess = tf.compat.v1.Session(config=config)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
return sess
class DenseFeaturesTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_retrieving_input(self):
features = {'a': [0.]}
dense_features = df.DenseFeatures(tf.feature_column.numeric_column('a'))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.]], inputs)
@combinations.generate(combinations.combine(mode=['eager']))
def test_reuses_variables(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
@combinations.generate(combinations.combine(mode=['eager']))
def test_feature_column_dense_features_gradient(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_dense_feature_with_training_arg(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
# Monkey patch the second numeric column to simulate a column that has
# different behavior by mode.
def training_aware_get_dense_tensor(transformation_cache,
state_manager,
training=None):
return transformation_cache.get(price2, state_manager, training=training)
def training_aware_transform_feature(transformation_cache,
state_manager,
training=None):
input_tensor = transformation_cache.get(
price2.key, state_manager, training=training)
if training:
return input_tensor * 10.0
else:
return input_tensor * 20.0
price2.get_dense_tensor = training_aware_get_dense_tensor
price2.transform_feature = training_aware_transform_feature
with tf.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
train_mode = df.DenseFeatures([price1, price2])(features, training=True)
predict_mode = df.DenseFeatures([price1, price2
])(features, training=False)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 30.], [5., 6., 40.]],
self.evaluate(train_mode))
self.assertAllClose([[1., 2., 60.], [5., 6., 80.]],
self.evaluate(predict_mode))
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
df.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegex(ValueError, 'must be a .*DenseColumn'):
df.DenseFeatures(feature_columns=[
tf.feature_column.categorical_column_with_hash_bucket('wire_cast', 4)
])(
features={
'a': [[0]]
})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
df.DenseFeatures(feature_columns={'a': tf.feature_column.numeric_column('a')})(
features={
'a': [[0]]
})
def test_bare_column(self):
with tf.Graph().as_default():
features = features = {'a': [0.]}
net = df.DenseFeatures(tf.feature_column.numeric_column('a'))(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with tf.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (tf.feature_column.numeric_column(key) for key in features)
net = df.DenseFeatures(columns)(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, 'Duplicate feature column name found for columns'):
df.DenseFeatures(
feature_columns=[tf.feature_column.numeric_column('a'),
tf.feature_column.numeric_column('a')])(
features={
'a': [[0]]
})
def test_one_column(self):
price = tf.feature_column.numeric_column('price')
with tf.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = tf.feature_column.numeric_column('price', shape=2)
with tf.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_compute_output_shape(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2', shape=4)
with tf.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]]
}
dense_features = df.DenseFeatures([price1, price2])
self.assertEqual((None, 6), dense_features.compute_output_shape((None,)))
net = dense_features(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]],
self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = tf.feature_column.numeric_column('price', shape=2)
with tf.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegex(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
df.DenseFeatures([price])(features)
def test_reshaping(self):
price = tf.feature_column.numeric_column('price', shape=[1, 2])
with tf.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
net = df.DenseFeatures([price1, price2])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_cols_to_output_tensors(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
cols_dict = {}
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
dense_features = df.DenseFeatures([price1, price2])
net = dense_features(features, cols_dict)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]],
self.evaluate(cols_dict[price1]))
self.assertAllClose([[3.], [4.]], self.evaluate(cols_dict[price2]))
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_column_order(self):
price_a = tf.feature_column.numeric_column('price_a')
price_b = tf.feature_column.numeric_column('price_b')
with tf.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = df.DenseFeatures([price_a, price_b])(features)
net2 = df.DenseFeatures([price_b, price_a])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = tf.feature_column.categorical_column_with_identity('animal', num_buckets=4)
with tf.Graph().as_default():
features = {
'animal':
tf.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegex(Exception, 'must be a .*DenseColumn'):
df.DenseFeatures([animal])(features)
def test_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
price3 = tf.feature_column.numeric_column('price3')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
with self.assertRaisesRegex(tf.errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 2
'price2': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = tf.feature_column.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = tf.feature_column.embedding_column(
some_sparse_column, dimension=10)
with tf.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that 2 variables get created in this case.
self.assertEqual(2,
len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'dense_features/sparse_feature_embedding/embedding_weights:0',
'dense_features_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertItemsEqual(
expected_var_names,
[v.name for v in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
# feature_column.shared_embeddings is not supported in eager.
with tf.Graph().as_default():
embedding_column_b, embedding_column_a = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
features = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1,
len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0'],
[v.name for v in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
# feature_column.shared_embeddings is not supported in eager.
with tf.Graph().as_default():
embedding_column_b, embedding_column_a = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
features = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1,
len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
with tf.Graph().as_default():
features1 = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features1)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1,
len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['aaa_bbb_shared_embedding:0'],
[v.name for v in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)])
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = tf.feature_column.embedding_column(
country, dimension=5, initializer=_initializer)
with tf.Graph().as_default():
# Provides 1-dim tensor and dense tensor.
features = {
'price':
tf.constant([
11.,
12.,
]),
'body-style':
tf.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country':
tf.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = tf.feature_column.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
with tf.Graph().as_default():
features = {
'price': tf.compat.v1.placeholder(tf.float32),
'body-style': tf.compat.v1.sparse_placeholder(tf.string),
# This is dense tensor for the categorical_column.
'country': tf.compat.v1.placeholder(tf.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = tf.compat.v1.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
features = {
'price': tf.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
df.DenseFeatures([price])(features)
with tf.Graph().as_default():
# Dynamic rank 0 should fail
features = {
'price': tf.compat.v1.placeholder(tf.float32),
}
net = df.DenseFeatures([price])(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
if __name__ == '__main__':
tf.test.main()
| 25,872 | 38.500763 | 124 | py |
keras | keras-master/keras/feature_column/dense_features.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import json
from keras import backend
from keras.feature_column import base_feature_layer as kfc
from keras.saving.saved_model import json_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.layers.DenseFeatures'])
class DenseFeatures(kfc._BaseFeaturesLayer): # pylint: disable=protected-access
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column-oriented data should be converted
to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V1 version of this layer that uses variable_scope's or partitioner
to create variables which works well with PartitionedVariables. Variable
scopes are deprecated in V2, so the V2 version uses name_scopes instead. But
currently that lacks support for partitioned variables. Use this if you need
partitioned variables. Use the partitioner argument if you have a Keras model
and uses `tf.compat.v1.keras.estimator.model_to_estimator` for training.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords", 10K),
dimension=16)
columns = [price, keywords_embedded, ...]
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=4)
feature_layer = tf.compat.v1.keras.layers.DenseFeatures(
feature_columns=columns, partitioner=partitioner)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.compat.v1.keras.layers.Dense(
units, activation='relu')(dense_tensor)
prediction = tf.compat.v1.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self,
feature_columns,
trainable=True,
name=None,
partitioner=None,
**kwargs):
"""Constructs a DenseFeatures layer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
partitioner: Partitioner for input layer. Defaults to None.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super(DenseFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
partitioner=partitioner,
expected_column_type=tf.__internal__.feature_column.DenseColumn,
**kwargs)
@property
def _is_feature_layer(self):
return True
@property
def _tracking_metadata(self):
"""String stored in metadata field in the SavedModel proto.
Returns:
A serialized JSON storing information necessary for recreating this layer.
"""
metadata = json.loads(super(DenseFeatures, self)._tracking_metadata)
metadata['_is_feature_layer'] = True
return json.dumps(metadata, default=json_utils.get_json_type)
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], total_elements)
def call(self, features, cols_to_output_tensors=None, training=None):
"""Returns a dense tensor corresponding to the `feature_columns`.
Example usage:
>>> t1 = tf.feature_column.embedding_column(
... tf.feature_column.categorical_column_with_hash_bucket("t1", 2),
... dimension=8)
>>> t2 = tf.feature_column.numeric_column('t2')
>>> feature_layer = tf.compat.v1.keras.layers.DenseFeatures([t1, t2])
>>> features = {"t1": tf.constant(["a", "b"]), "t2": tf.constant([1, 2])}
>>> dense_tensor = feature_layer(features, training=True)
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values can be a `SparseTensor` or a `Tensor` depends
on corresponding `FeatureColumn`.
cols_to_output_tensors: If not `None`, this will be filled with a dict
mapping feature columns to output tensors created.
training: Python boolean or None, indicating whether to the layer is being
run in training mode. This argument is passed to the call method of any
`FeatureColumn` that takes a `training` argument. For example, if a
`FeatureColumn` performed dropout, the column could expose a `training`
argument to control whether the dropout should be applied. If `None`,
defaults to `tf.keras.backend.learning_phase()`.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: If features are not a dictionary.
"""
if training is None:
training = backend.learning_phase()
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: ',
features)
transformation_cache = tf.__internal__.feature_column.FeatureTransformationCache(features)
output_tensors = []
for column in self._feature_columns:
with backend.name_scope(column.name):
try:
tensor = column.get_dense_tensor(
transformation_cache, self._state_manager, training=training)
except TypeError:
tensor = column.get_dense_tensor(transformation_cache,
self._state_manager)
processed_tensors = self._process_dense_tensor(column, tensor)
if cols_to_output_tensors is not None:
cols_to_output_tensors[column] = processed_tensors
output_tensors.append(processed_tensors)
return self._verify_and_concat_tensors(output_tensors)
| 7,434 | 41.485714 | 94 | py |
keras | keras-master/keras/preprocessing/text_dataset.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text dataset generation utilities."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.preprocessing import dataset_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.text_dataset_from_directory',
'keras.preprocessing.text_dataset_from_directory',
v1=[])
def text_dataset_from_directory(directory,
labels='inferred',
label_mode='int',
class_names=None,
batch_size=32,
max_length=None,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False):
"""Generates a `tf.data.Dataset` from text files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
```
Then calling `text_dataset_from_directory(main_directory, labels='inferred')`
will return a `tf.data.Dataset` that yields batches of texts from
the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Only `.txt` files are supported at this time.
Args:
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing text files for a class.
Otherwise, the directory structure is ignored.
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
text files found in the directory. Labels should be sorted according
to the alphanumeric order of the text file paths
(obtained via `os.walk(directory)` in Python).
label_mode:
- 'int': means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- 'categorical' means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- 'binary' means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- None (no labels).
class_names: Only valid if "labels" is "inferred". This is the explicit
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
batch_size: Size of the batches of data. Default: 32.
max_length: Maximum size of a text string. Texts longer than this will
be truncated to `max_length`.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to False.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is None, it yields `string` tensors of shape
`(batch_size,)`, containing the contents of a batch of text files.
- Otherwise, it yields a tuple `(texts, labels)`, where `texts`
has shape `(batch_size,)` and `labels` follows the format described
below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorial`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
"""
if labels not in ('inferred', None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
'`labels` argument should be a list/tuple of integer labels, of '
'the same size as the number of text files in the target '
'directory. If you wish to infer the labels from the subdirectory '
'names in the target directory, pass `labels="inferred"`. '
'If you wish to get a dataset that only contains text samples '
f'(no labels), pass `labels=None`. Received: labels={labels}')
if class_names:
raise ValueError('You can only pass `class_names` if '
f'`labels="inferred"`. Received: labels={labels}, and '
f'class_names={class_names}')
if label_mode not in {'int', 'categorical', 'binary', None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", "binary", '
f'or None. Received: label_mode={label_mode}')
if labels is None or label_mode is None:
labels = None
label_mode = None
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed)
if seed is None:
seed = np.random.randint(1e6)
file_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=('.txt',),
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links)
if label_mode == 'binary' and len(class_names) != 2:
raise ValueError(
f'When passing `label_mode="binary"`, there must be exactly 2 '
f'class_names. Received: class_names={class_names}')
file_paths, labels = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, subset)
if not file_paths:
raise ValueError(f'No text files found in directory {directory}. '
f'Allowed format: .txt')
dataset = paths_and_labels_to_dataset(
file_paths=file_paths,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names),
max_length=max_length)
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.prefetch(tf.data.AUTOTUNE).batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset
def paths_and_labels_to_dataset(file_paths,
labels,
label_mode,
num_classes,
max_length):
"""Constructs a dataset of text strings and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
string_ds = path_ds.map(
lambda x: path_to_string_content(x, max_length))
if label_mode:
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
string_ds = tf.data.Dataset.zip((string_ds, label_ds))
return string_ds
def path_to_string_content(path, max_length):
txt = tf.io.read_file(path)
if max_length is not None:
txt = tf.compat.v1.strings.substr(txt, 0, max_length)
return txt
| 8,096 | 40.523077 | 80 | py |
keras | keras-master/keras/preprocessing/timeseries_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for timeseries."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.preprocessing import timeseries
class TimeseriesDatasetTest(tf.test.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
self.assertEqual(inputs.shape, (2, 9))
# Check target values
self.assertAllClose(targets, inputs[:, 0] * 2)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
self.assertAllClose(inputs[j], np.arange(i * 5 + j, i * 5 + j + 9))
def test_timeseries_regression(self):
# Test simple timeseries regression use case
data = np.arange(10)
offset = 3
targets = data[offset:]
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=offset, batch_size=1)
i = 0
for batch in dataset:
self.assertLen(batch, 2)
inputs, targets = batch
self.assertEqual(inputs.shape, (1, 3))
# Check values
self.assertAllClose(targets[0], data[offset + i])
self.assertAllClose(inputs[0], data[i : i + offset])
i += 1
self.assertEqual(i, 7) # Expect 7 batches
def test_no_targets(self):
data = np.arange(50)
dataset = timeseries.timeseries_dataset_from_array(
data, None, sequence_length=10, batch_size=5)
# Expect 9 batches
i = None
for i, batch in enumerate(dataset):
if i < 8:
self.assertEqual(batch.shape, (5, 10))
elif i == 8:
self.assertEqual(batch.shape, (1, 10))
for j in range(min(5, len(batch))):
# Check each sample in the batch
self.assertAllClose(batch[j], np.arange(i * 5 + j, i * 5 + j + 10))
self.assertEqual(i, 8)
def test_shuffle(self):
# Test cross-epoch random order and seed determinism
data = np.arange(10)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=5, batch_size=1, shuffle=True, seed=123)
first_seq = None
for x, y in dataset.take(1):
self.assertNotAllClose(x, np.arange(0, 5))
self.assertAllClose(x[:, 0] * 2, y)
first_seq = x
# Check that a new iteration with the same dataset yields different results
for x, _ in dataset.take(1):
self.assertNotAllClose(x, first_seq)
# Check determism with same seed
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=5, batch_size=1, shuffle=True, seed=123)
for x, _ in dataset.take(1):
self.assertAllClose(x, first_seq)
def test_sampling_rate(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sampling_rate=2)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 16:
self.assertEqual(inputs.shape, (5, 9))
if i == 16:
# Last batch: size 3
self.assertEqual(inputs.shape, (3, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 + j
end_index = start_index + 9 * 2
self.assertAllClose(inputs[j], np.arange(start_index, end_index, 2))
def test_sequence_stride(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sequence_stride=3)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 6:
self.assertEqual(inputs.shape, (5, 9))
if i == 6:
# Last batch: size 1
self.assertEqual(inputs.shape, (1, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 * 3 + j * 3
end_index = start_index + 9
self.assertAllClose(inputs[j],
np.arange(start_index, end_index))
def test_start_and_end_index(self):
data = np.arange(100)
dataset = timeseries.timeseries_dataset_from_array(
data, None,
sequence_length=9, batch_size=5, sequence_stride=3, sampling_rate=2,
start_index=10, end_index=90)
for batch in dataset:
self.assertAllLess(batch[0], 90)
self.assertAllGreater(batch[0], 9)
def test_errors(self):
# bad start index
with self.assertRaisesRegex(ValueError, '`start_index` must be '):
_ = timeseries.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=-1)
with self.assertRaisesRegex(ValueError, '`start_index` must be '):
_ = timeseries.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=11)
# bad end index
with self.assertRaisesRegex(ValueError, '`end_index` must be '):
_ = timeseries.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=-1)
with self.assertRaisesRegex(ValueError, '`end_index` must be '):
_ = timeseries.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=11)
# bad sampling_rate
with self.assertRaisesRegex(ValueError, '`sampling_rate` must be '):
_ = timeseries.timeseries_dataset_from_array(
np.arange(10), None, 3, sampling_rate=0)
# bad sequence stride
with self.assertRaisesRegex(ValueError, '`sequence_stride` must be '):
_ = timeseries.timeseries_dataset_from_array(
np.arange(10), None, 3, sequence_stride=0)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 6,838 | 36.78453 | 80 | py |
keras | keras-master/keras/preprocessing/image.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=g-import-not-at-top
# pylint: disable=g-classes-have-attributes
"""Set of tools for real-time data augmentation on image data."""
import tensorflow.compat.v2 as tf
from keras_preprocessing import image
import numpy as np
try:
from scipy import linalg # pylint: disable=unused-import
from scipy import ndimage # pylint: disable=unused-import
except ImportError:
pass
from keras import backend
from keras.preprocessing.image_dataset import image_dataset_from_directory # pylint: disable=unused-import
from keras.utils import data_utils
from keras.utils import tf_inspect
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
random_rotation = image.random_rotation
random_shift = image.random_shift
random_shear = image.random_shear
random_zoom = image.random_zoom
apply_channel_shift = image.apply_channel_shift
random_channel_shift = image.random_channel_shift
apply_brightness_shift = image.apply_brightness_shift
random_brightness = image.random_brightness
apply_affine_transform = image.apply_affine_transform
@keras_export('keras.preprocessing.image.smart_resize', v1=[])
def smart_resize(x, size, interpolation='bilinear'):
"""Resize images to a target size without aspect ratio distortion.
TensorFlow image datasets typically yield images that have each a different
size. However, these images need to be batched before they can be
processed by Keras layers. To be batched, images need to share the same height
and width.
You could simply do:
```python
size = (200, 200)
ds = ds.map(lambda img: tf.image.resize(img, size))
```
However, if you do this, you distort the aspect ratio of your images, since
in general they do not all have the same aspect ratio as `size`. This is
fine in many cases, but not always (e.g. for GANs this can be a problem).
Note that passing the argument `preserve_aspect_ratio=True` to `resize`
will preserve the aspect ratio, but at the cost of no longer respecting the
provided target size. Because `tf.image.resize` doesn't crop images,
your output images will still have different sizes.
This calls for:
```python
size = (200, 200)
ds = ds.map(lambda img: smart_resize(img, size))
```
Your output images will actually be `(200, 200)`, and will not be distorted.
Instead, the parts of the image that do not fit within the target size
get cropped out.
The resizing process is:
1. Take the largest centered crop of the image that has the same aspect ratio
as the target size. For instance, if `size=(200, 200)` and the input image has
size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.
2. Resize the cropped image to the target size. In the example above,
we resize the `(340, 340)` crop to `(200, 200)`.
Args:
x: Input image or batch of images (as a tensor or NumPy array).
Must be in format `(height, width, channels)` or
`(batch_size, height, width, channels)`.
size: Tuple of `(height, width)` integer. Target size.
interpolation: String, interpolation to use for resizing.
Defaults to `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`,
`area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.
Returns:
Array with shape `(size[0], size[1], channels)`. If the input image was a
NumPy array, the output is a NumPy array, and if it was a TF tensor,
the output is a TF tensor.
"""
if len(size) != 2:
raise ValueError('Expected `size` to be a tuple of 2 integers, '
f'but got: {size}.')
img = tf.convert_to_tensor(x)
if img.shape.rank is not None:
if img.shape.rank < 3 or img.shape.rank > 4:
raise ValueError(
'Expected an image array with shape `(height, width, channels)`, '
'or `(batch_size, height, width, channels)`, but '
f'got input with incorrect rank, of shape {img.shape}.')
shape = tf.shape(img)
height, width = shape[-3], shape[-2]
target_height, target_width = size
if img.shape.rank is not None:
static_num_channels = img.shape[-1]
else:
static_num_channels = None
crop_height = tf.cast(
tf.cast(width * target_height, 'float32') / target_width, 'int32')
crop_width = tf.cast(
tf.cast(height * target_width, 'float32') / target_height, 'int32')
# Set back to input height / width if crop_height / crop_width is not smaller.
crop_height = tf.minimum(height, crop_height)
crop_width = tf.minimum(width, crop_width)
crop_box_hstart = tf.cast(
tf.cast(height - crop_height, 'float32') / 2, 'int32')
crop_box_wstart = tf.cast(
tf.cast(width - crop_width, 'float32') / 2, 'int32')
if img.shape.rank == 4:
crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0])
crop_box_size = tf.stack([-1, crop_height, crop_width, -1])
else:
crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0])
crop_box_size = tf.stack([crop_height, crop_width, -1])
img = tf.slice(img, crop_box_start, crop_box_size)
img = tf.image.resize(
images=img,
size=size,
method=interpolation)
# Apparent bug in resize_images_v2 may cause shape to be lost
if img.shape.rank is not None:
if img.shape.rank == 4:
img.set_shape((None, None, None, static_num_channels))
if img.shape.rank == 3:
img.set_shape((None, None, static_num_channels))
if isinstance(x, np.ndarray):
return img.numpy()
return img
@keras_export('keras.utils.array_to_img',
'keras.preprocessing.image.array_to_img')
def array_to_img(x, data_format=None, scale=True, dtype=None):
"""Converts a 3D Numpy array to a PIL Image instance.
Usage:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = tf.keras.preprocessing.image.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a Numpy array.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(image.array_to_img)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
return image.array_to_img(x, data_format=data_format, scale=scale, **kwargs)
@keras_export('keras.utils.img_to_array',
'keras.preprocessing.image.img_to_array')
def img_to_array(img, data_format=None, dtype=None):
"""Converts a PIL Image instance to a Numpy array.
Usage:
```python
from PIL import Image
img_data = np.random.random(size=(100, 100, 3))
img = tf.keras.preprocessing.image.array_to_img(img_data)
array = tf.keras.preprocessing.image.img_to_array(img)
```
Args:
img: Input PIL Image instance.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(image.img_to_array)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
return image.img_to_array(img, data_format=data_format, **kwargs)
@keras_export('keras.utils.save_img',
'keras.preprocessing.image.save_img')
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True,
**kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
Args:
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
if data_format is None:
data_format = backend.image_data_format()
image.save_img(path,
x,
data_format=data_format,
file_format=file_format,
scale=scale, **kwargs)
@keras_export('keras.utils.load_img',
'keras.preprocessing.image.load_img')
def load_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
Usage:
```
image = tf.keras.preprocessing.image.load_img(image_path)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
```
Args:
path: Path to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
return image.load_img(path, grayscale=grayscale, color_mode=color_mode,
target_size=target_size, interpolation=interpolation)
@keras_export('keras.preprocessing.image.Iterator')
class Iterator(image.Iterator, data_utils.Sequence):
pass
@keras_export('keras.preprocessing.image.DirectoryIterator')
class DirectoryIterator(image.DirectoryIterator, Iterator): # pylint: disable=inconsistent-mro
"""Iterator capable of reading images from a directory on disk.
Args:
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
- `"binary"`: binary targets (if there are only two classes),
- `"categorical"`: categorical targets,
- `"sparse"`: integer targets,
- `"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
- `None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
dtype: Dtype to use for generated arrays.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(
image.ImageDataGenerator.__init__)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
super(DirectoryIterator, self).__init__(
directory, image_data_generator,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
**kwargs)
@keras_export('keras.preprocessing.image.NumpyArrayIterator')
class NumpyArrayIterator(image.NumpyArrayIterator, Iterator):
"""Iterator yielding data from a Numpy array.
Args:
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
dtype: Dtype to use for the generated arrays.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(
image.NumpyArrayIterator.__init__)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
super(NumpyArrayIterator, self).__init__(
x, y, image_data_generator,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
**kwargs)
class DataFrameIterator(image.DataFrameIterator, Iterator): # pylint: disable=inconsistent-mro
"""Iterator capable of reading images from a directory on disk as a dataframe.
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the images in
a string column. It should include other column/s depending on the
`class_mode`:
- if `class_mode` is `"categorical"` (default value) it must include
the `y_col` column with the class/es of each image. Values in
column can be string/list/tuple if a single class or list/tuple if
multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include the
given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain the
columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization. If None, no transformations and
normalizations are made.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. Color mode to read
images.
classes: Optional list of strings, classes to use (e.g. `["dogs",
"cats"]`). If None, all classes in `y_col` will be used.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", "sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels. Supports
multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in `model.predict()`).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
dtype: Dtype to use for the generated arrays.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option
can lead to speed-up in the instantiation of this class. Default: `True`.
"""
def __init__(
self,
dataframe,
directory=None,
image_data_generator=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
dtype='float32',
validate_filenames=True):
super(DataFrameIterator, self).__init__(
dataframe=dataframe,
directory=directory,
image_data_generator=image_data_generator,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
dtype=dtype,
validate_filenames=validate_filenames
)
@keras_export('keras.preprocessing.image.ImageDataGenerator')
class ImageDataGenerator(image.ImageDataGenerator):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
Args:
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
brightness_range: Tuple or list of two floats. Range for picking
a brightness shift value from.
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(after applying all other transformations).
preprocessing_function: function that will be applied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
dtype: Dtype to use for the generated arrays.
Raises:
ValueError: If the value of the argument, `data_format` is other than
`"channels_last"` or `"channels_first"`.
ValueError: If the value of the argument, `validation_split` > 1
or `validation_split` < 0.
Examples:
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit(datagen.flow(x_train, y_train, batch_size=32,
subset='training'),
validation_data=datagen.flow(x_train, y_train,
batch_size=8, subset='validation'),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(
image.ImageDataGenerator.__init__)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
super(ImageDataGenerator, self).__init__(
featurewise_center=featurewise_center,
samplewise_center=samplewise_center,
featurewise_std_normalization=featurewise_std_normalization,
samplewise_std_normalization=samplewise_std_normalization,
zca_whitening=zca_whitening,
zca_epsilon=zca_epsilon,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
brightness_range=brightness_range,
shear_range=shear_range,
zoom_range=zoom_range,
channel_shift_range=channel_shift_range,
fill_mode=fill_mode,
cval=cval,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
rescale=rescale,
preprocessing_function=preprocessing_function,
data_format=data_format,
validation_split=validation_split,
**kwargs)
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
Args:
x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first
element should contain the images and the second element another numpy
array or a list of numpy arrays that gets passed to the output without
any modifications. Can be used to feed the model miscellaneous data
along with the images. In case of grayscale data, the channels axis of
the image array should have value 1, in case of RGB data, it should
have value 3, and in case of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str (default: `''`). Prefix to use for filenames of saved
pictures (only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif",
"tif", "jpg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
Returns:
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
Raises:
ValueError: If the Value of the argument, `subset` is other than
"training" or "validation".
"""
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
Args:
directory: string, path to the target directory. It should contain one
subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside
each of the subdirectories directory tree will be included in the
generator. See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`, defaults to `(256,
256)`. The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None. If not provided, the list
of classes will be automatically inferred from the subdirectory
names/structure under `directory`, where each subdirectory will be
treated as a different class (and the order of the classes, which
will map to the label indices, will be alphanumeric). The
dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
- "sparse" will be 1D integer labels,
- "input" will be images identical to input images (mainly used to
work with autoencoders).
- If None, no labels are returned (the generator will only yield
batches of image data, which is useful to use with
`model.predict()`).
Please note that in case of class_mode None, the data still needs to
reside in a subdirectory of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True) If set to False,
sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif",
"tif", "jpg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
Returns:
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
**kwargs):
"""Takes the dataframe and the path to a directory + generates batches.
The generated batches contain augmented/normalized data.
**A simple tutorial can be found **[here](
http://bit.ly/keras_flow_from_dataframe).
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the images
in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must include
the `y_col` column with the class/es of each image. Values in
column can be string/list/tuple if a single class or list/tuple if
multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include the
given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain
the columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1 or 3 color channels.
classes: optional list of classes (e.g. `['dogs', 'cats']`). Default is
None. If not provided, the list of classes will be automatically
inferred from the `y_col`, which will map to the label indices, will
be alphanumeric). The dictionary containing the mapping from class
names to class indices can be obtained via the attribute
`class_indices`.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels.
Supports multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in
`model.predict()`).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif",
"tif", "jpg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the execution of this function.
Defaults to `True`.
**kwargs: legacy arguments for raising deprecation warnings.
Returns:
A `DataFrameIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
if 'has_ext' in kwargs:
tf_logging.warning(
'has_ext is deprecated, filenames in the dataframe have '
'to match the exact filenames in disk.', DeprecationWarning)
if 'sort' in kwargs:
tf_logging.warning(
'sort is deprecated, batches will be created in the'
'same order than the filenames provided if shuffle'
'is set to False.', DeprecationWarning)
if class_mode == 'other':
tf_logging.warning(
'`class_mode` "other" is deprecated, please use '
'`class_mode` "raw".', DeprecationWarning)
class_mode = 'raw'
if 'drop_duplicates' in kwargs:
tf_logging.warning(
'drop_duplicates is deprecated, you can drop duplicates '
'by using the pandas.DataFrame.drop_duplicates method.',
DeprecationWarning)
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames)
keras_export('keras.preprocessing.image.random_rotation', allow_multiple_exports=True)(random_rotation)
keras_export('keras.preprocessing.image.random_shift', allow_multiple_exports=True)(random_shift)
keras_export('keras.preprocessing.image.random_shear', allow_multiple_exports=True)(random_shear)
keras_export('keras.preprocessing.image.random_zoom', allow_multiple_exports=True)(random_zoom)
keras_export(
'keras.preprocessing.image.apply_channel_shift', allow_multiple_exports=True)(apply_channel_shift)
keras_export(
'keras.preprocessing.image.random_channel_shift', allow_multiple_exports=True)(random_channel_shift)
keras_export(
'keras.preprocessing.image.apply_brightness_shift', allow_multiple_exports=True)(apply_brightness_shift)
keras_export('keras.preprocessing.image.random_brightness', allow_multiple_exports=True)(random_brightness)
keras_export(
'keras.preprocessing.image.apply_affine_transform', allow_multiple_exports=True)(apply_affine_transform)
| 49,573 | 41.995663 | 108 | py |
keras | keras-master/keras/preprocessing/dataset_utils.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image dataset loading utilities."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import multiprocessing
import os
import numpy as np
def index_directory(directory,
labels,
formats,
class_names=None,
shuffle=True,
seed=None,
follow_links=False):
"""Make list of all files in the subdirs of `directory`, with their labels.
Args:
directory: The target directory (string).
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
valid files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
class_names: Only valid if "labels" is "inferred". This is the explicit
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Returns:
tuple (file_paths, labels, class_names).
file_paths: list of file paths (strings).
labels: list of matching integer labels (same length as file_paths)
class_names: names of the classes corresponding to these labels, in order.
"""
if labels is None:
# in the no-label case, index from the parent directory down.
subdirs = ['']
class_names = subdirs
else:
subdirs = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
subdirs.append(subdir)
if not class_names:
class_names = subdirs
else:
if set(class_names) != set(subdirs):
raise ValueError(
'The `class_names` passed did not match the '
'names of the subdirectories of the target directory. '
'Expected: %s, but received: %s' %
(subdirs, class_names))
class_indices = dict(zip(class_names, range(len(class_names))))
# Build an index of the files
# in the different class subfolders.
pool = multiprocessing.pool.ThreadPool()
results = []
filenames = []
for dirpath in (os.path.join(directory, subdir) for subdir in subdirs):
results.append(
pool.apply_async(index_subdirectory,
(dirpath, class_indices, follow_links, formats)))
labels_list = []
for res in results:
partial_filenames, partial_labels = res.get()
labels_list.append(partial_labels)
filenames += partial_filenames
if labels not in ('inferred', None):
if len(labels) != len(filenames):
raise ValueError('Expected the lengths of `labels` to match the number '
'of files in the target directory. len(labels) is %s '
'while we found %s files in %s.' % (
len(labels), len(filenames), directory))
else:
i = 0
labels = np.zeros((len(filenames),), dtype='int32')
for partial_labels in labels_list:
labels[i:i + len(partial_labels)] = partial_labels
i += len(partial_labels)
if labels is None:
print('Found %d files.' % (len(filenames),))
else:
print('Found %d files belonging to %d classes.' %
(len(filenames), len(class_names)))
pool.close()
pool.join()
file_paths = [os.path.join(directory, fname) for fname in filenames]
if shuffle:
# Shuffle globally to erase macro-structure
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(file_paths)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
return file_paths, labels, class_names
def iter_valid_files(directory, follow_links, formats):
walk = os.walk(directory, followlinks=follow_links)
for root, _, files in sorted(walk, key=lambda x: x[0]):
for fname in sorted(files):
if fname.lower().endswith(formats):
yield root, fname
def index_subdirectory(directory, class_indices, follow_links, formats):
"""Recursively walks directory and list image paths and their class index.
Args:
directory: string, target directory.
class_indices: dict mapping class names to their index.
follow_links: boolean, whether to recursively follow subdirectories
(if False, we only list top-level images in `directory`).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
Returns:
tuple `(filenames, labels)`. `filenames` is a list of relative file
paths, and `labels` is a list of integer labels corresponding to these
files.
"""
dirname = os.path.basename(directory)
valid_files = iter_valid_files(directory, follow_links, formats)
labels = []
filenames = []
for root, fname in valid_files:
labels.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return filenames, labels
def get_training_or_validation_split(samples, labels, validation_split, subset):
"""Potentially restict samples & labels to a training or validation split.
Args:
samples: List of elements.
labels: List of corresponding labels.
validation_split: Float, fraction of data to reserve for validation.
subset: Subset of the data to return.
Either "training", "validation", or None. If None, we return all of the
data.
Returns:
tuple (samples, labels), potentially restricted to the specified subset.
"""
if not validation_split:
return samples, labels
num_val_samples = int(validation_split * len(samples))
if subset == 'training':
print('Using %d files for training.' % (len(samples) - num_val_samples,))
samples = samples[:-num_val_samples]
labels = labels[:-num_val_samples]
elif subset == 'validation':
print('Using %d files for validation.' % (num_val_samples,))
samples = samples[-num_val_samples:]
labels = labels[-num_val_samples:]
else:
raise ValueError('`subset` must be either "training" '
'or "validation", received: %s' % (subset,))
return samples, labels
def labels_to_dataset(labels, label_mode, num_classes):
"""Create a tf.data.Dataset from the list/tuple of labels.
Args:
labels: list/tuple of labels to be converted into a tf.data.Dataset.
label_mode:
- 'binary' indicates that the labels (there can be only 2) are encoded as
`float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`).
- 'categorical' means that the labels are mapped into a categorical vector.
(e.g. for `categorical_crossentropy` loss).
num_classes: number of classes of labels.
"""
label_ds = tf.data.Dataset.from_tensor_slices(labels)
if label_mode == 'binary':
label_ds = label_ds.map(
lambda x: tf.expand_dims(tf.cast(x, 'float32'), axis=-1))
elif label_mode == 'categorical':
label_ds = label_ds.map(lambda x: tf.one_hot(x, num_classes))
return label_ds
def check_validation_split_arg(validation_split, subset, shuffle, seed):
"""Raise errors in case of invalid argument values.
Args:
shuffle: Whether to shuffle the data. Either True or False.
seed: random seed for shuffling and transformations.
validation_split: float between 0 and 1, fraction of data to reserve for
validation.
subset: One of "training" or "validation". Only used if `validation_split`
is set.
"""
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be between 0 and 1, received: %s' %
(validation_split,))
if (validation_split or subset) and not (validation_split and subset):
raise ValueError(
'If `subset` is set, `validation_split` must be set, and inversely.')
if subset not in ('training', 'validation', None):
raise ValueError('`subset` must be either "training" '
'or "validation", received: %s' % (subset,))
if validation_split and shuffle and seed is None:
raise ValueError(
'If using `validation_split` and shuffling the data, you must provide '
'a `seed` argument, to make sure that there is no overlap between the '
'training and validation subset.')
| 9,421 | 37.773663 | 80 | py |
keras | keras-master/keras/preprocessing/text_dataset_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for text_dataset."""
import tensorflow.compat.v2 as tf
import os
import random
import shutil
import string
from keras import keras_parameterized
from keras.preprocessing import text_dataset
class TextDatasetFromDirectoryTest(keras_parameterized.TestCase):
def _prepare_directory(self,
num_classes=2,
nested_dirs=False,
count=16,
length=20):
# Get a unique temp directory
temp_dir = os.path.join(self.get_temp_dir(), str(random.randint(0, 1e6)))
os.mkdir(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = 'class_%s' % (class_index,)
if nested_dirs:
class_paths = [
class_directory, os.path.join(class_directory, 'subfolder_1'),
os.path.join(class_directory, 'subfolder_2'), os.path.join(
class_directory, 'subfolder_1', 'sub-subfolder')
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
for i in range(count):
path = paths[i % len(paths)]
filename = os.path.join(path, 'text_%s.txt' % (i,))
f = open(os.path.join(temp_dir, filename), 'w')
text = ''.join([random.choice(string.printable) for _ in range(length)])
f.write(text)
f.close()
return temp_dir
def test_text_dataset_from_directory_standalone(self):
# Test retrieving txt files without labels from a directory and its subdirs.
# Save a few extra files in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i in range(3):
filename = 'text_%s.txt' % (i,)
f = open(os.path.join(directory, filename), 'w')
text = ''.join([random.choice(string.printable) for _ in range(20)])
f.write(text)
f.close()
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=5, label_mode=None, max_length=10)
batch = next(iter(dataset))
# We just return the texts, no labels
self.assertEqual(batch.shape, (5,))
self.assertEqual(batch.dtype.name, 'string')
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_text_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode='int', max_length=10)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, 'string')
self.assertEqual(len(batch[0].numpy()[0]), 10) # Test max_length
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, 'int32')
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode='binary')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, 'string')
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, 'float32')
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode='categorical')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, 'string')
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, 'float32')
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode=None)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_text_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode=None)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8,))
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode=None)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode='int')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, 'string')
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, 'int32')
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode='categorical')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, 'string')
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, 'float32')
def test_text_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=10, validation_split=0.2, subset='training',
seed=1337)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=10, validation_split=0.2, subset='validation',
seed=1337)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2,))
def test_text_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, labels=[0, 1], shuffle=False)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_text_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(num_classes=2, count=25,
nested_dirs=True)
dataset = text_dataset.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, follow_links=True)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_text_dataset_from_directory_no_files(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, 'No text files found'):
_ = text_dataset.text_dataset_from_directory(directory)
def test_text_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, '`labels` argument should be'):
_ = text_dataset.text_dataset_from_directory(
directory, labels='other')
with self.assertRaisesRegex(ValueError, '`label_mode` argument must be'):
_ = text_dataset.text_dataset_from_directory(
directory, label_mode='other')
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'):
_ = text_dataset.text_dataset_from_directory(
directory, labels=[0, 0, 1, 1, 1],
class_names=['class_0', 'class_1', 'class_2'])
with self.assertRaisesRegex(
ValueError,
'Expected the lengths of `labels` to match the number of files'):
_ = text_dataset.text_dataset_from_directory(
directory, labels=[0, 0, 1, 1])
with self.assertRaisesRegex(
ValueError, '`class_names` passed did not match'):
_ = text_dataset.text_dataset_from_directory(
directory, class_names=['class_0', 'class_2'])
with self.assertRaisesRegex(ValueError, 'there must be exactly 2'):
_ = text_dataset.text_dataset_from_directory(
directory, label_mode='binary')
with self.assertRaisesRegex(ValueError,
'`validation_split` must be between 0 and 1'):
_ = text_dataset.text_dataset_from_directory(
directory, validation_split=2)
with self.assertRaisesRegex(ValueError,
'`subset` must be either "training" or'):
_ = text_dataset.text_dataset_from_directory(
directory, validation_split=0.2, subset='other')
with self.assertRaisesRegex(ValueError, '`validation_split` must be set'):
_ = text_dataset.text_dataset_from_directory(
directory, validation_split=0, subset='training')
with self.assertRaisesRegex(ValueError, 'must provide a `seed`'):
_ = text_dataset.text_dataset_from_directory(
directory, validation_split=0.2, subset='training')
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 9,998 | 38.211765 | 80 | py |
keras | keras-master/keras/preprocessing/sequence.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for preprocessing sequence data."""
# pylint: disable=invalid-name
from keras_preprocessing import sequence
from keras.utils import data_utils
from tensorflow.python.util.tf_export import keras_export
make_sampling_table = sequence.make_sampling_table
skipgrams = sequence.skipgrams
# TODO(fchollet): consider making `_remove_long_seq` public.
_remove_long_seq = sequence._remove_long_seq # pylint: disable=protected-access
@keras_export('keras.preprocessing.sequence.TimeseriesGenerator')
class TimeseriesGenerator(sequence.TimeseriesGenerator, data_utils.Sequence):
"""Utility class for generating batches of temporal data.
This class takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
# Arguments
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
# Returns
A [Sequence](https://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence) instance.
# Examples
```python
from keras.preprocessing.sequence import TimeseriesGenerator
import numpy as np
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = TimeseriesGenerator(data, targets,
length=10, sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
batch_0 = data_gen[0]
x, y = batch_0
assert np.array_equal(x,
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]]))
assert np.array_equal(y,
np.array([[10], [11]]))
```
"""
pass
@keras_export('keras.preprocessing.sequence.pad_sequences')
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to int32). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post' (optional, defaults to 'pre'):
pad either before or after each sequence.
truncating: String, 'pre' or 'post' (optional, defaults to 'pre'):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to 0.)
Returns:
Numpy array with shape `(len(sequences), maxlen)`
Raises:
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
return sequence.pad_sequences(
sequences, maxlen=maxlen, dtype=dtype,
padding=padding, truncating=truncating, value=value)
keras_export(
'keras.preprocessing.sequence.make_sampling_table', allow_multiple_exports=True)(make_sampling_table)
keras_export('keras.preprocessing.sequence.skipgrams', allow_multiple_exports=True)(skipgrams)
| 6,758 | 41.509434 | 105 | py |
keras | keras-master/keras/preprocessing/image_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing utils."""
import tensorflow.compat.v2 as tf
import os
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from keras import keras_parameterized
from keras import layers
from keras.engine import sequential
from keras.preprocessing import image as preprocessing_image
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
def _generate_test_images():
img_w = img_h = 20
rgb_images = []
gray_images = []
for _ in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = preprocessing_image.array_to_img(imarray, scale=False)
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = preprocessing_image.array_to_img(imarray, scale=False)
gray_images.append(im)
return [rgb_images, gray_images]
class TestImage(keras_parameterized.TestCase):
def test_smart_resize(self):
test_input = np.random.random((20, 40, 3))
output = preprocessing_image.smart_resize(test_input, size=(50, 50))
self.assertIsInstance(output, np.ndarray)
self.assertListEqual(list(output.shape), [50, 50, 3])
output = preprocessing_image.smart_resize(test_input, size=(10, 10))
self.assertListEqual(list(output.shape), [10, 10, 3])
output = preprocessing_image.smart_resize(test_input, size=(100, 50))
self.assertListEqual(list(output.shape), [100, 50, 3])
output = preprocessing_image.smart_resize(test_input, size=(5, 15))
self.assertListEqual(list(output.shape), [5, 15, 3])
@parameterized.named_parameters(
('size1', (50, 50)),
('size2', (10, 10)),
('size3', (100, 50)),
('size4', (5, 15)))
def test_smart_resize_tf_dataset(self, size):
test_input_np = np.random.random((2, 20, 40, 3))
test_ds = tf.data.Dataset.from_tensor_slices(test_input_np)
resize = lambda img: preprocessing_image.smart_resize(img, size=size)
test_ds = test_ds.map(resize)
for sample in test_ds.as_numpy_iterator():
self.assertIsInstance(sample, np.ndarray)
self.assertListEqual(list(sample.shape), [size[0], size[1], 3])
def test_smart_resize_batch(self):
img = np.random.random((2, 20, 40, 3))
out = preprocessing_image.smart_resize(img, size=(20, 20))
self.assertListEqual(list(out.shape), [2, 20, 20, 3])
self.assertAllClose(out, img[:, :, 10:-10, :])
def test_smart_resize_errors(self):
with self.assertRaisesRegex(ValueError, 'a tuple of 2 integers'):
preprocessing_image.smart_resize(
np.random.random((20, 20, 2)), size=(10, 5, 3))
with self.assertRaisesRegex(ValueError, 'incorrect rank'):
preprocessing_image.smart_resize(np.random.random((2, 4)), size=(10, 5))
with self.assertRaisesRegex(ValueError, 'incorrect rank'):
preprocessing_image.smart_resize(
np.random.random((2, 4, 4, 5, 3)), size=(10, 5))
def test_image_data_generator(self):
if PIL is None:
return # Skip test if PIL is not available.
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(preprocessing_image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
# Basic test before fit
x = np.random.random((32, 10, 10, 3))
generator.flow(x)
# Fit
generator.fit(images, augment=True)
for x, _ in generator.flow(
images,
np.arange(images.shape[0]),
shuffle=True):
self.assertEqual(x.shape[1:], images.shape[1:])
break
def test_image_data_generator_with_split_value_error(self):
with self.assertRaises(ValueError):
preprocessing_image.ImageDataGenerator(validation_split=5)
def test_image_data_generator_invalid_data(self):
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
generator.flow(np.arange(5))
# Invalid number of channels: will work but raise a warning
x = np.random.random((32, 10, 10, 5))
generator.flow(x)
with self.assertRaises(ValueError):
generator = preprocessing_image.ImageDataGenerator(
data_format='unknown')
generator = preprocessing_image.ImageDataGenerator(zoom_range=(2., 2.))
def test_image_data_generator_fit(self):
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_first')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory, os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'), os.path.join(
class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(temp_dir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(temp_dir, filename))
count += 1
# Test image loading util
fname = os.path.join(temp_dir, filenames[0])
_ = preprocessing_image.load_img(fname)
_ = preprocessing_image.load_img(fname, grayscale=True)
_ = preprocessing_image.load_img(fname, target_size=(10, 10))
_ = preprocessing_image.load_img(fname, target_size=(10, 10),
interpolation='bilinear')
# create iterator
generator = preprocessing_image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
self.assertEqual(len(dir_iterator.class_indices), num_classes)
self.assertEqual(len(dir_iterator.classes), count)
self.assertEqual(set(dir_iterator.filenames), set(filenames))
def preprocessing_function(x):
"""This will fail if not provided by a Numpy array.
Note: This is made to enforce backward compatibility.
Args:
x: A numpy array.
Returns:
An array of zeros with the same shape as the given array.
"""
self.assertEqual(x.shape, (26, 26, 3))
self.assertIs(type(x), np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = preprocessing_image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_directory(
str(temp_dir),
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
self.assertEqual(len(dir_seq), count // 3 + 1)
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
def directory_iterator_with_validation_split_test_helper(
self, validation_split):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
tmp_folder = tempfile.mkdtemp(prefix='test_images')
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmp_folder, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(tmp_folder, filename))
count += 1
# create iterator
generator = preprocessing_image.ImageDataGenerator(
validation_split=validation_split)
with self.assertRaises(ValueError):
generator.flow_from_directory(tmp_folder, subset='foo')
num_validation = int(count * validation_split)
num_training = count - num_validation
train_iterator = generator.flow_from_directory(
tmp_folder, subset='training')
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_directory(
tmp_folder, subset='validation')
self.assertEqual(valid_iterator.samples, num_validation)
# check number of classes and images
self.assertEqual(len(train_iterator.class_indices), num_classes)
self.assertEqual(len(train_iterator.classes), num_training)
self.assertEqual(
len(set(train_iterator.filenames) & set(filenames)), num_training)
model = sequential.Sequential([layers.Flatten(), layers.Dense(2)])
model.compile(optimizer='sgd', loss='mse')
model.fit(train_iterator, epochs=1)
shutil.rmtree(tmp_folder)
@keras_parameterized.run_all_keras_modes
def test_directory_iterator_with_validation_split_25_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.25)
@keras_parameterized.run_all_keras_modes
def test_directory_iterator_with_validation_split_40_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.40)
@keras_parameterized.run_all_keras_modes
def test_directory_iterator_with_validation_split_50_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.50)
def test_img_utils(self):
if PIL is None:
return # Skip test if PIL is not available.
height, width = 10, 8
# Test channels_first data format
x = np.random.random((3, height, width))
img = preprocessing_image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = preprocessing_image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = preprocessing_image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = preprocessing_image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test channels_last data format
x = np.random.random((height, width, 3))
img = preprocessing_image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = preprocessing_image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test 2D
x = np.random.random((height, width, 1))
img = preprocessing_image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = preprocessing_image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
def test_batch_standardize(self):
if PIL is None:
return # Skip test if PIL is not available.
# ImageDataGenerator.standardize should work on batches
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(preprocessing_image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
generator.fit(images, augment=True)
transformed = np.copy(images)
for i, im in enumerate(transformed):
transformed[i] = generator.random_transform(im)
transformed = generator.standardize(transformed)
def test_img_transforms(self):
x = np.random.random((3, 200, 200))
_ = preprocessing_image.random_rotation(x, 20)
_ = preprocessing_image.random_shift(x, 0.2, 0.2)
_ = preprocessing_image.random_shear(x, 2.)
_ = preprocessing_image.random_zoom(x, (0.5, 0.5))
_ = preprocessing_image.apply_channel_shift(x, 2, 2)
_ = preprocessing_image.apply_affine_transform(x, 2)
with self.assertRaises(ValueError):
preprocessing_image.random_zoom(x, (0, 0, 0))
_ = preprocessing_image.random_channel_shift(x, 2.)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 15,990 | 34.694196 | 80 | py |
keras | keras-master/keras/preprocessing/image_dataset.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image dataset loading utilities."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras.layers.preprocessing import image_preprocessing
from keras.preprocessing import dataset_utils
from keras.preprocessing import image as keras_image_ops
from tensorflow.python.util.tf_export import keras_export
ALLOWLIST_FORMATS = ('.bmp', '.gif', '.jpeg', '.jpg', '.png')
@keras_export('keras.utils.image_dataset_from_directory',
'keras.preprocessing.image_dataset_from_directory',
v1=[])
def image_dataset_from_directory(directory,
labels='inferred',
label_mode='int',
class_names=None,
color_mode='rgb',
batch_size=32,
image_size=(256, 256),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation='bilinear',
follow_links=False,
crop_to_aspect_ratio=False,
**kwargs):
"""Generates a `tf.data.Dataset` from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory, labels='inferred')`
will return a `tf.data.Dataset` that yields batches of images from
the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Supported image formats: jpeg, png, bmp, gif.
Animated gifs are truncated to the first frame.
Args:
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
image files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
label_mode:
- 'int': means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- 'categorical' means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- 'binary' means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- None (no labels).
class_names: Only valid if "labels" is "inferred". This is the explicit
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
batch_size: Size of the batches of data. Default: 32.
image_size: Size to resize images to after they are read from disk.
Defaults to `(256, 256)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
interpolation: String, the interpolation method used when resizing images.
Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`,
`area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to False.
crop_to_aspect_ratio: If True, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the target
aspect ratio, the output image will be cropped so as to return the largest
possible window in the image (of size `image_size`) that matches
the target aspect ratio. By default (`crop_to_aspect_ratio=False`),
aspect ratio may not be preserved.
**kwargs: Legacy keyword arguments.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is None, it yields `float32` tensors of shape
`(batch_size, image_size[0], image_size[1], num_channels)`,
encoding images (see below for rules regarding `num_channels`).
- Otherwise, it yields a tuple `(images, labels)`, where `images`
has shape `(batch_size, image_size[0], image_size[1], num_channels)`,
and `labels` follows the format described below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorial`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
Rules regarding number of channels in the yielded images:
- if `color_mode` is `grayscale`,
there's 1 channel in the image tensors.
- if `color_mode` is `rgb`,
there are 3 channel in the image tensors.
- if `color_mode` is `rgba`,
there are 4 channel in the image tensors.
"""
if 'smart_resize' in kwargs:
crop_to_aspect_ratio = kwargs.pop('smart_resize')
if kwargs:
raise TypeError(f'Unknown keywords argument(s): {tuple(kwargs.keys())}')
if labels not in ('inferred', None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
'`labels` argument should be a list/tuple of integer labels, of '
'the same size as the number of image files in the target '
'directory. If you wish to infer the labels from the subdirectory '
'names in the target directory, pass `labels="inferred"`. '
'If you wish to get a dataset that only contains images '
f'(no labels), pass `labels=None`. Received: labels={labels}')
if class_names:
raise ValueError('You can only pass `class_names` if '
f'`labels="inferred"`. Received: labels={labels}, and '
f'class_names={class_names}')
if label_mode not in {'int', 'categorical', 'binary', None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", "binary", '
f'or None. Received: label_mode={label_mode}')
if labels is None or label_mode is None:
labels = None
label_mode = None
if color_mode == 'rgb':
num_channels = 3
elif color_mode == 'rgba':
num_channels = 4
elif color_mode == 'grayscale':
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rbg", "rgba", "grayscale"}. '
f'Received: color_mode={color_mode}')
interpolation = image_preprocessing.get_interpolation(interpolation)
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWLIST_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links)
if label_mode == 'binary' and len(class_names) != 2:
raise ValueError(
f'When passing `label_mode="binary"`, there must be exactly 2 '
f'class_names. Received: class_names={class_names}')
image_paths, labels = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, subset)
if not image_paths:
raise ValueError(f'No images found in directory {directory}. '
f'Allowed formats: {ALLOWLIST_FORMATS}')
dataset = paths_and_labels_to_dataset(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio)
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.prefetch(tf.data.AUTOTUNE).batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
# Include file paths for images as attribute.
dataset.file_paths = image_paths
return dataset
def paths_and_labels_to_dataset(image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
crop_to_aspect_ratio=False):
"""Constructs a dataset of images and labels."""
# TODO(fchollet): consider making num_parallel_calls settable
path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
args = (image_size, num_channels, interpolation, crop_to_aspect_ratio)
img_ds = path_ds.map(
lambda x: load_image(x, *args))
if label_mode:
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
img_ds = tf.data.Dataset.zip((img_ds, label_ds))
return img_ds
def load_image(path, image_size, num_channels, interpolation,
crop_to_aspect_ratio=False):
"""Load an image from a path and resize it."""
img = tf.io.read_file(path)
img = tf.image.decode_image(
img, channels=num_channels, expand_animations=False)
if crop_to_aspect_ratio:
img = keras_image_ops.smart_resize(img, image_size,
interpolation=interpolation)
else:
img = tf.image.resize(img, image_size, method=interpolation)
img.set_shape((image_size[0], image_size[1], num_channels))
return img
| 11,380 | 42.273764 | 80 | py |
keras | keras-master/keras/preprocessing/text_test.py | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for text data preprocessing utils."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.preprocessing import text as preprocessing_text
class TestText(tf.test.TestCase):
def test_one_hot(self):
text = 'The cat sat on the mat.'
encoded = preprocessing_text.one_hot(text, 5)
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 0)
# Test on unicode.
text = u'The cat sat on the mat.'
encoded = preprocessing_text.one_hot(text, 5)
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 0)
def test_tokenizer(self):
texts = [
'The cat sat on the mat.',
'The dog sat on the log.',
'Dogs and cats living together.'
]
tokenizer = preprocessing_text.Tokenizer(num_words=10)
tokenizer.fit_on_texts(texts)
sequences = []
for seq in tokenizer.texts_to_sequences_generator(texts):
sequences.append(seq)
self.assertLess(np.max(np.max(sequences)), 10)
self.assertEqual(np.min(np.min(sequences)), 1)
tokenizer.fit_on_sequences(sequences)
for mode in ['binary', 'count', 'tfidf', 'freq']:
matrix = tokenizer.texts_to_matrix(texts, mode)
self.assertEqual(matrix.shape, (3, 10))
def test_hashing_trick_hash(self):
text = 'The cat sat on the mat.'
encoded = preprocessing_text.hashing_trick(text, 5)
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 1)
def test_hashing_trick_md5(self):
text = 'The cat sat on the mat.'
encoded = preprocessing_text.hashing_trick(
text, 5, hash_function='md5')
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 1)
def test_tokenizer_oov_flag(self):
x_train = ['This text has only known words']
x_test = ['This text has some unknown words'] # 2 OOVs: some, unknown
# Default, without OOV flag
tokenizer = preprocessing_text.Tokenizer()
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
self.assertEqual(len(x_test_seq[0]), 4) # discards 2 OOVs
# With OOV feature
tokenizer = preprocessing_text.Tokenizer(oov_token='<unk>')
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
self.assertEqual(len(x_test_seq[0]), 6) # OOVs marked in place
def test_sequential_fit(self):
texts = [
'The cat sat on the mat.', 'The dog sat on the log.',
'Dogs and cats living together.'
]
word_sequences = [['The', 'cat', 'is', 'sitting'],
['The', 'dog', 'is', 'standing']]
tokenizer = preprocessing_text.Tokenizer()
tokenizer.fit_on_texts(texts)
tokenizer.fit_on_texts(word_sequences)
self.assertEqual(tokenizer.document_count, 5)
tokenizer.texts_to_matrix(texts)
tokenizer.texts_to_matrix(word_sequences)
def test_text_to_word_sequence(self):
text = 'hello! ? world!'
seq = preprocessing_text.text_to_word_sequence(text)
self.assertEqual(seq, ['hello', 'world'])
def test_text_to_word_sequence_multichar_split(self):
text = 'hello!stop?world!'
seq = preprocessing_text.text_to_word_sequence(text, split='stop')
self.assertEqual(seq, ['hello', 'world'])
def test_text_to_word_sequence_unicode(self):
text = u'ali! veli? kırk dokuz elli'
seq = preprocessing_text.text_to_word_sequence(text)
self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli'])
def test_text_to_word_sequence_unicode_multichar_split(self):
text = u'ali!stopveli?stopkırkstopdokuzstopelli'
seq = preprocessing_text.text_to_word_sequence(text, split='stop')
self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli'])
def test_tokenizer_unicode(self):
texts = [
u'ali veli kırk dokuz elli', u'ali veli kırk dokuz elli veli kırk dokuz'
]
tokenizer = preprocessing_text.Tokenizer(num_words=5)
tokenizer.fit_on_texts(texts)
self.assertEqual(len(tokenizer.word_counts), 5)
if __name__ == '__main__':
tf.test.main()
| 4,972 | 34.269504 | 80 | py |
keras | keras-master/keras/preprocessing/image_dataset_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_dataset."""
import tensorflow.compat.v2 as tf
import os
import shutil
import numpy as np
from keras import keras_parameterized
from keras.preprocessing import image as image_preproc
from keras.preprocessing import image_dataset
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
class ImageDatasetFromDirectoryTest(keras_parameterized.TestCase):
def _get_images(self, count=16, color_mode='rgb'):
width = height = 24
imgs = []
for _ in range(count):
if color_mode == 'grayscale':
img = np.random.randint(0, 256, size=(height, width, 1))
elif color_mode == 'rgba':
img = np.random.randint(0, 256, size=(height, width, 4))
else:
img = np.random.randint(0, 256, size=(height, width, 3))
img = image_preproc.array_to_img(img)
imgs.append(img)
return imgs
def _prepare_directory(self,
num_classes=2,
grayscale=False,
nested_dirs=False,
color_mode='rgb',
count=16):
# Get a unique temp directory
temp_dir = os.path.join(self.get_temp_dir(), str(np.random.randint(1e6)))
os.mkdir(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = 'class_%s' % (class_index,)
if nested_dirs:
class_paths = [
class_directory, os.path.join(class_directory, 'subfolder_1'),
os.path.join(class_directory, 'subfolder_2'), os.path.join(
class_directory, 'subfolder_1', 'sub-subfolder')
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save images to the paths
i = 0
for img in self._get_images(color_mode=color_mode, count=count):
path = paths[i % len(paths)]
if color_mode == 'rgb':
ext = 'jpg'
else:
ext = 'png'
filename = os.path.join(path, 'image_%s.%s' % (i, ext))
img.save(os.path.join(temp_dir, filename))
i += 1
return temp_dir
def test_image_dataset_from_directory_standalone(self):
# Test retrieving images without labels from a directory and its subdirs.
if PIL is None:
return # Skip test if PIL is not available.
# Save a few extra images in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, img in enumerate(self._get_images(3)):
filename = 'image_%s.jpg' % (i,)
img.save(os.path.join(directory, filename))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=5, image_size=(18, 18), labels=None)
batch = next(iter(dataset))
# We return plain images
self.assertEqual(batch.shape, (5, 18, 18, 3))
self.assertEqual(batch.dtype.name, 'float32')
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_image_dataset_from_directory_binary(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='int')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, 'int32')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='binary')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, 'float32')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='categorical')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, 'float32')
def test_static_shape_in_graph(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='int')
test_case = self
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), [None, 18, 18, 3])
symbolic_fn(dataset)
def test_sample_count(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_image_dataset_from_directory_multiclass(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 18, 18, 3))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='int')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, 'int32')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode='categorical')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, 'float32')
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, 'float32')
def test_image_dataset_from_directory_color_modes(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, color_mode='rgba')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode='rgba')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 4))
self.assertEqual(batch[0].dtype.name, 'float32')
directory = self._prepare_directory(num_classes=4, color_mode='grayscale')
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode='grayscale')
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 1))
self.assertEqual(batch[0].dtype.name, 'float32')
def test_image_dataset_from_directory_validation_split(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=10)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=10, image_size=(18, 18),
validation_split=0.2, subset='training', seed=1337)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=10, image_size=(18, 18),
validation_split=0.2, subset='validation', seed=1337)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 18, 18, 3))
def test_image_dataset_from_directory_manual_labels(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18),
labels=[0, 1], shuffle=False)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_image_dataset_from_directory_follow_links(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=25,
nested_dirs=True)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None,
follow_links=True)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_image_dataset_from_directory_no_images(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, 'No images found.'):
_ = image_dataset.image_dataset_from_directory(directory)
def test_image_dataset_from_directory_crop_to_aspect_ratio(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=5, image_size=(18, 18), crop_to_aspect_ratio=True)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (5, 18, 18, 3))
def test_image_dataset_from_directory_errors(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, '`labels` argument should be'):
_ = image_dataset.image_dataset_from_directory(
directory, labels='other')
with self.assertRaisesRegex(ValueError, '`label_mode` argument must be'):
_ = image_dataset.image_dataset_from_directory(
directory, label_mode='other')
with self.assertRaisesRegex(ValueError, '`color_mode` must be one of'):
_ = image_dataset.image_dataset_from_directory(
directory, color_mode='other')
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'):
_ = image_dataset.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1, 1],
class_names=['class_0', 'class_1', 'class_2'])
with self.assertRaisesRegex(
ValueError,
'Expected the lengths of `labels` to match the number of files'):
_ = image_dataset.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1])
with self.assertRaisesRegex(
ValueError, '`class_names` passed did not match'):
_ = image_dataset.image_dataset_from_directory(
directory, class_names=['class_0', 'class_2'])
with self.assertRaisesRegex(ValueError, 'there must be exactly 2'):
_ = image_dataset.image_dataset_from_directory(
directory, label_mode='binary')
with self.assertRaisesRegex(ValueError,
'`validation_split` must be between 0 and 1'):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=2)
with self.assertRaisesRegex(ValueError,
'`subset` must be either "training" or'):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0.2, subset='other')
with self.assertRaisesRegex(ValueError, '`validation_split` must be set'):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0, subset='training')
with self.assertRaisesRegex(ValueError, 'must provide a `seed`'):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0.2, subset='training')
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 13,542 | 37.365439 | 80 | py |
keras | keras-master/keras/preprocessing/text.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for text input preprocessing."""
# pylint: disable=invalid-name
from keras_preprocessing import text
from keras.preprocessing.text_dataset import text_dataset_from_directory # pylint: disable=unused-import
from tensorflow.python.util.tf_export import keras_export
hashing_trick = text.hashing_trick
Tokenizer = text.Tokenizer
@keras_export('keras.preprocessing.text.text_to_word_sequence')
def text_to_word_sequence(input_text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
"""Converts a text to a sequence of words (or tokens).
This function transforms a string of text into a list of words
while ignoring `filters` which include punctuations by default.
>>> sample_text = 'This is a sample sentence.'
>>> tf.keras.preprocessing.text.text_to_word_sequence(sample_text)
['this', 'is', 'a', 'sample', 'sentence']
Args:
input_text: Input text (string).
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n'``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to convert the input to lowercase.
split: str. Separator for word splitting.
Returns:
A list of words (or tokens).
"""
return text.text_to_word_sequence(
input_text, filters=filters, lower=lower, split=split)
@keras_export('keras.preprocessing.text.one_hot')
def one_hot(input_text,
n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
r"""One-hot encodes a text into a list of word indexes of size `n`.
This function receives as input a string of text and returns a
list of encoded integers each corresponding to a word (or token)
in the given input string.
Args:
input_text: Input text (string).
n: int. Size of vocabulary.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default:
```
'!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n
```,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
Returns:
List of integers in `[1, n]`. Each integer encodes a word
(unicity non-guaranteed).
"""
return text.one_hot(input_text, n, filters=filters, lower=lower, split=split)
# text.tokenizer_from_json is only available if keras_preprocessing >= 1.1.0
try:
tokenizer_from_json = text.tokenizer_from_json
keras_export('keras.preprocessing.text.tokenizer_from_json', allow_multiple_exports=True)(
tokenizer_from_json)
except AttributeError:
pass
keras_export('keras.preprocessing.text.hashing_trick', allow_multiple_exports=True)(hashing_trick)
keras_export('keras.preprocessing.text.Tokenizer', allow_multiple_exports=True)(Tokenizer)
| 3,685 | 37 | 105 | py |
keras | keras-master/keras/preprocessing/timeseries.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras timeseries dataset utilities."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.timeseries_dataset_from_array',
'keras.preprocessing.timeseries_dataset_from_array',
v1=[])
def timeseries_dataset_from_array(
data,
targets,
sequence_length,
sequence_stride=1,
sampling_rate=1,
batch_size=128,
shuffle=False,
seed=None,
start_index=None,
end_index=None):
"""Creates a dataset of sliding windows over a timeseries provided as array.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
length of the sequences/windows, spacing between two sequence/windows, etc.,
to produce batches of timeseries inputs and targets.
Args:
data: Numpy array or eager tensor
containing consecutive data points (timesteps).
Axis 0 is expected to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
`targets[i]` should be the target
corresponding to the window that starts at index `i`
(see example 2 below).
Pass None if you don't have target data (in this case the dataset will
only yield the input data).
sequence_length: Length of the output sequences (in number of timesteps).
sequence_stride: Period between successive output sequences.
For stride `s`, output samples would
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i], data[i + r], ... data[i + sequence_length]`
are used for create a sample sequence.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
seed: Optional int; random seed for shuffling.
start_index: Optional int; data points earlier (exclusive)
than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Optional int; data points later (exclusive) than `end_index`
will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
Returns:
A tf.data.Dataset instance. If `targets` was passed, the dataset yields
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
only `batch_of_sequences`.
Example 1:
Consider indices `[0, 1, ... 99]`.
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
`shuffle=False`, the dataset will yield batches of sequences
composed of the following indices:
```
First sequence: [0 2 4 6 8 10 12 14 16 18]
Second sequence: [3 5 7 9 11 13 15 17 19 21]
Third sequence: [6 8 10 12 14 16 18 20 22 24]
...
Last sequence: [78 80 82 84 86 88 90 92 94 96]
```
In this case the last 3 data points are discarded since no full sequence
can be generated to include them (the next sequence would have started
at index 81, and thus its last step would have gone over 99).
Example 2: Temporal regression.
Consider an array `data` of scalar values, of shape `(steps,)`.
To generate a dataset that uses the past 10
timesteps to predict the next timestep, you would use:
```python
input_data = data[:-10]
targets = data[10:]
dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
input_data, targets, sequence_length=10)
for batch in dataset:
inputs, targets = batch
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
assert np.array_equal(targets[0], data[10]) # Corresponding target: step 10
break
```
Example 3: Temporal regression for many-to-many architectures.
Consider two arrays of scalar values `X` and `Y`,
both of shape `(100,)`. The resulting dataset should consist samples with
20 timestamps each. The samples should not overlap.
To generate a dataset that uses the current timestamp
to predict the corresponding target timestep, you would use:
```python
X = np.arange(100)
Y = X*2
sample_length = 20
input_dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
X, None, sequence_length=sample_length, sequence_stride=sample_length)
target_dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
Y, None, sequence_length=sample_length, sequence_stride=sample_length)
for batch in zip(input_dataset, target_dataset):
inputs, targets = batch
assert np.array_equal(inputs[0], X[:sample_length])
# second sample equals output timestamps 20-40
assert np.array_equal(targets[1], Y[sample_length:2*sample_length])
break
```
"""
if start_index:
if start_index < 0:
raise ValueError(f'`start_index` must be 0 or greater. Received: '
f'start_index={start_index}')
if start_index >= len(data):
raise ValueError(f'`start_index` must be lower than the length of the '
f'data. Received: start_index={start_index}, for data '
f'of length {len(data)}')
if end_index:
if start_index and end_index <= start_index:
raise ValueError(f'`end_index` must be higher than `start_index`. '
f'Received: start_index={start_index}, and '
f'end_index={end_index} ')
if end_index >= len(data):
raise ValueError(f'`end_index` must be lower than the length of the '
f'data. Received: end_index={end_index}, for data of '
f'length {len(data)}')
if end_index <= 0:
raise ValueError('`end_index` must be higher than 0. '
f'Received: end_index={end_index}')
# Validate strides
if sampling_rate <= 0:
raise ValueError(f'`sampling_rate` must be higher than 0. Received: '
f'sampling_rate={sampling_rate}')
if sampling_rate >= len(data):
raise ValueError(f'`sampling_rate` must be lower than the length of the '
f'data. Received: sampling_rate={sampling_rate}, for data '
f'of length {len(data)}')
if sequence_stride <= 0:
raise ValueError(f'`sequence_stride` must be higher than 0. Received: '
f'sequence_stride={sequence_stride}')
if sequence_stride >= len(data):
raise ValueError(f'`sequence_stride` must be lower than the length of the '
f'data. Received: sequence_stride={sequence_stride}, for '
f'data of length {len(data)}')
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(data)
# Determine the lowest dtype to store start positions (to lower memory usage).
num_seqs = end_index - start_index - (sequence_length * sampling_rate) + 1
if targets is not None:
num_seqs = min(num_seqs, len(targets))
if num_seqs < 2147483647:
index_dtype = 'int32'
else:
index_dtype = 'int64'
# Generate start positions
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
if shuffle:
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(start_positions)
sequence_length = tf.cast(sequence_length, dtype=index_dtype)
sampling_rate = tf.cast(sampling_rate, dtype=index_dtype)
positions_ds = tf.data.Dataset.from_tensors(start_positions).repeat()
# For each initial window position, generates indices of the window elements
indices = tf.data.Dataset.zip(
(tf.data.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: tf.range( # pylint: disable=g-long-lambda
positions[i],
positions[i] + sequence_length * sampling_rate,
sampling_rate),
num_parallel_calls=tf.data.AUTOTUNE)
dataset = sequences_from_indices(data, indices, start_index, end_index)
if targets is not None:
indices = tf.data.Dataset.zip(
(tf.data.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: positions[i],
num_parallel_calls=tf.data.AUTOTUNE)
target_ds = sequences_from_indices(
targets, indices, start_index, end_index)
dataset = tf.data.Dataset.zip((dataset, target_ds))
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.prefetch(tf.data.AUTOTUNE).batch(batch_size)
return dataset
def sequences_from_indices(array, indices_ds, start_index, end_index):
dataset = tf.data.Dataset.from_tensors(array[start_index : end_index])
dataset = tf.data.Dataset.zip((dataset.repeat(), indices_ds)).map(
lambda steps, inds: tf.gather(steps, inds), # pylint: disable=unnecessary-lambda
num_parallel_calls=tf.data.AUTOTUNE)
return dataset
| 9,808 | 39.870833 | 87 | py |
keras | keras-master/keras/preprocessing/__init__.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides keras data preprocessing utils to pre-process tf.data.Datasets before they are fed to the model."""
# pylint: disable=g-import-not-at-top
# TODO(mihaimaruseac): remove the import of keras_preprocessing and injecting
# once we update to latest version of keras_preprocessing
import keras_preprocessing
from keras import backend
from keras.preprocessing import image
from keras.preprocessing import sequence
from keras.preprocessing import text
from keras.preprocessing import timeseries
from keras.utils import all_utils as utils
# This exists for compatibility with prior version of keras_preprocessing.
keras_preprocessing.set_keras_submodules(backend=backend, utils=utils)
| 1,378 | 44.966667 | 111 | py |
keras | keras-master/keras/preprocessing/sequence_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequence data preprocessing utils."""
import tensorflow.compat.v2 as tf
from math import ceil
import numpy as np
from keras.preprocessing import sequence as preprocessing_sequence
class TestSequence(tf.test.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='pre')
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='post')
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='pre')
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='post')
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = preprocessing_sequence.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='pre')
self.assertAllClose(b, [[[0, 0], [0, 0], [1, 1]], [[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='post')
self.assertAllClose(b, [[[1, 1], [0, 0], [0, 0]], [[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]]])
# test truncating
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='pre')
self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3,
3]]])
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='post')
self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3,
2]]])
# test value
b = preprocessing_sequence.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[[1, 1], [1, 1], [1, 1]], [[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
def test_make_sampling_table(self):
a = preprocessing_sequence.make_sampling_table(3)
self.assertAllClose(
a, np.asarray([0.00315225, 0.00315225, 0.00547597]), rtol=.1)
def test_skipgrams(self):
# test with no window size and binary labels
couples, labels = preprocessing_sequence.skipgrams(
np.arange(3), vocabulary_size=3)
for couple in couples:
self.assertIn(couple[0], [0, 1, 2])
self.assertIn(couple[1], [0, 1, 2])
# test window size and categorical labels
couples, labels = preprocessing_sequence.skipgrams(
np.arange(5), vocabulary_size=5, window_size=1, categorical=True)
for couple in couples:
self.assertLessEqual(couple[0] - couple[1], 3)
for l in labels:
self.assertEqual(len(l), 2)
def test_remove_long_seq(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
new_seq, new_label = preprocessing_sequence._remove_long_seq(
maxlen=3, seq=a, label=['a', 'b', ['c', 'd']])
self.assertEqual(new_seq, [[[1, 1]], [[2, 1], [2, 2]]])
self.assertEqual(new_label, ['a', 'b'])
def test_TimeseriesGenerator(self):
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, batch_size=2)
self.assertEqual(len(data_gen), 20)
self.assertAllClose(data_gen[0][0],
np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5],
[7], [9]]]))
self.assertAllClose(data_gen[0][1], np.array([[10], [11]]))
self.assertAllClose(data_gen[1][0],
np.array([[[2], [4], [6], [8], [10]], [[3], [5], [7],
[9], [11]]]))
self.assertAllClose(data_gen[1][1], np.array([[12], [13]]))
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, reverse=True, batch_size=2)
self.assertEqual(len(data_gen), 20)
self.assertAllClose(data_gen[0][0],
np.array([[[8], [6], [4], [2], [0]], [[9], [7], [5],
[3], [1]]]))
self.assertAllClose(data_gen[0][1], np.array([[10], [11]]))
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, shuffle=True, batch_size=1)
batch = data_gen[0]
r = batch[1][0][0]
self.assertAllClose(batch[0],
np.array([[[r - 10], [r - 8], [r - 6], [r - 4],
[r - 2]]]))
self.assertAllClose(batch[1], np.array([
[r],
]))
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, stride=2, batch_size=2)
self.assertEqual(len(data_gen), 10)
self.assertAllClose(data_gen[1][0],
np.array([[[4], [6], [8], [10], [12]], [[6], [8], [10],
[12], [14]]]))
self.assertAllClose(data_gen[1][1], np.array([[14], [16]]))
data_gen = preprocessing_sequence.TimeseriesGenerator(
data,
targets,
length=10,
sampling_rate=2,
start_index=10,
end_index=30,
batch_size=2)
self.assertEqual(len(data_gen), 6)
self.assertAllClose(data_gen[0][0],
np.array([[[10], [12], [14], [16], [18]],
[[11], [13], [15], [17], [19]]]))
self.assertAllClose(data_gen[0][1], np.array([[20], [21]]))
data = np.array([np.random.random_sample((1, 2, 3, 4)) for i in range(50)])
targets = np.array([np.random.random_sample((3, 2, 1)) for i in range(50)])
data_gen = preprocessing_sequence.TimeseriesGenerator(
data,
targets,
length=10,
sampling_rate=2,
start_index=10,
end_index=30,
batch_size=2)
self.assertEqual(len(data_gen), 6)
self.assertAllClose(data_gen[0][0],
np.array(
[np.array(data[10:19:2]),
np.array(data[11:20:2])]))
self.assertAllClose(data_gen[0][1], np.array([targets[20], targets[21]]))
with self.assertRaises(ValueError) as context:
preprocessing_sequence.TimeseriesGenerator(data, targets, length=50)
error = str(context.exception)
self.assertIn('`start_index+length=50 > end_index=49` is disallowed', error)
def test_TimeSeriesGenerator_doesnt_miss_any_sample(self):
x = np.array([[i] for i in range(10)])
for length in range(3, 10):
g = preprocessing_sequence.TimeseriesGenerator(
x, x, length=length, batch_size=1)
expected = max(0, len(x) - length)
actual = len(g)
self.assertEqual(expected, actual)
if actual > 0:
# All elements in range(length, 10) should be used as current step
expected = np.arange(length, 10).reshape(-1, 1)
y = np.concatenate([g[ix][1] for ix in range(len(g))], axis=0)
self.assertAllClose(y, expected)
x = np.array([[i] for i in range(23)])
strides = (1, 1, 5, 7, 3, 5, 3)
lengths = (3, 3, 4, 3, 1, 3, 7)
batch_sizes = (6, 6, 6, 5, 6, 6, 6)
shuffles = (False, True, True, False, False, False, False)
for stride, length, batch_size, shuffle in zip(strides, lengths,
batch_sizes, shuffles):
g = preprocessing_sequence.TimeseriesGenerator(
x,
x,
length=length,
sampling_rate=1,
stride=stride,
start_index=0,
end_index=None,
shuffle=shuffle,
reverse=False,
batch_size=batch_size)
if shuffle:
# all batches have the same size when shuffle is True.
expected_sequences = ceil(
(23 - length) / float(batch_size * stride)) * batch_size
else:
# last batch will be different if `(samples - length) / stride`
# is not a multiple of `batch_size`.
expected_sequences = ceil((23 - length) / float(stride))
expected_batches = ceil(expected_sequences / float(batch_size))
y = [g[ix][1] for ix in range(len(g))]
actual_sequences = sum(len(iy) for iy in y)
actual_batches = len(y)
self.assertEqual(expected_sequences, actual_sequences)
self.assertEqual(expected_batches, actual_batches)
if __name__ == '__main__':
tf.test.main()
| 9,596 | 38.331967 | 80 | py |
keras | keras-master/keras/mixed_precision/device_compatibility_check.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains function to log if devices are compatible with mixed precision."""
import tensorflow.compat.v2 as tf
import itertools
from tensorflow.python.platform import tf_logging
_COMPAT_CHECK_PREFIX = 'Mixed precision compatibility check (mixed_float16): '
_COMPAT_CHECK_OK_PREFIX = _COMPAT_CHECK_PREFIX + 'OK'
_COMPAT_CHECK_WARNING_PREFIX = _COMPAT_CHECK_PREFIX + 'WARNING'
_COMPAT_CHECK_WARNING_SUFFIX = (
'If you will use compatible GPU(s) not attached to this host, e.g. by '
'running a multi-worker model, you can ignore this warning. This message '
'will only be logged once')
def _dedup_strings(device_strs):
"""Groups together consecutive identical strings.
For example, given:
['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3']
This function returns:
['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)']
Args:
device_strs: A list of strings, each representing a device.
Returns:
A copy of the input, but identical consecutive strings are merged into a
single string.
"""
new_device_strs = []
for device_str, vals in itertools.groupby(device_strs):
num = len(list(vals))
if num == 1:
new_device_strs.append(device_str)
else:
new_device_strs.append('%s (x%d)' % (device_str, num))
return new_device_strs
def _log_device_compatibility_check(policy_name, gpu_details_list):
"""Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16.
Args:
policy_name: The name of the dtype policy.
gpu_details_list: A list of dicts, one dict per GPU. Each dict
is the device details for a GPU, as returned by
`tf.config.experimental.get_device_details()`.
"""
if policy_name != 'mixed_float16':
# TODO(b/145686977): Log if the policy is 'mixed_bfloat16'. This requires
# checking if a TPU is available.
return
supported_device_strs = []
unsupported_device_strs = []
for details in gpu_details_list:
name = details.get('device_name', 'Unknown GPU')
cc = details.get('compute_capability')
if cc:
device_str = '%s, compute capability %s.%s' % (name, cc[0], cc[1])
if cc >= (7, 0):
supported_device_strs.append(device_str)
else:
unsupported_device_strs.append(device_str)
else:
unsupported_device_strs.append(
name + ', no compute capability (probably not an Nvidia GPU)')
if unsupported_device_strs:
warning_str = _COMPAT_CHECK_WARNING_PREFIX + '\n'
if supported_device_strs:
warning_str += ('Some of your GPUs may run slowly with dtype policy '
'mixed_float16 because they do not all have compute '
'capability of at least 7.0. Your GPUs:\n')
elif len(unsupported_device_strs) == 1:
warning_str += ('Your GPU may run slowly with dtype policy mixed_float16 '
'because it does not have compute capability of at least '
'7.0. Your GPU:\n')
else:
warning_str += ('Your GPUs may run slowly with dtype policy '
'mixed_float16 because they do not have compute '
'capability of at least 7.0. Your GPUs:\n')
for device_str in _dedup_strings(supported_device_strs +
unsupported_device_strs):
warning_str += ' ' + device_str + '\n'
warning_str += ('See https://developer.nvidia.com/cuda-gpus for a list of '
'GPUs and their compute capabilities.\n')
warning_str += _COMPAT_CHECK_WARNING_SUFFIX
tf_logging.warning(warning_str)
elif not supported_device_strs:
tf_logging.warning(
'%s\n'
'The dtype policy mixed_float16 may run slowly because '
'this machine does not have a GPU. Only Nvidia GPUs with '
'compute capability of at least 7.0 run quickly with '
'mixed_float16.\n%s' % (_COMPAT_CHECK_WARNING_PREFIX,
_COMPAT_CHECK_WARNING_SUFFIX))
elif len(supported_device_strs) == 1:
tf_logging.info('%s\n'
'Your GPU will likely run quickly with dtype policy '
'mixed_float16 as it has compute capability of at least '
'7.0. Your GPU: %s' % (_COMPAT_CHECK_OK_PREFIX,
supported_device_strs[0]))
else:
tf_logging.info('%s\n'
'Your GPUs will likely run quickly with dtype policy '
'mixed_float16 as they all have compute capability of at '
'least 7.0' % _COMPAT_CHECK_OK_PREFIX)
_logged_compatibility_check = False
def log_device_compatibility_check(policy_name):
"""Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16. A log is shown only the
first time this function is called.
Args:
policy_name: The name of the dtype policy.
"""
global _logged_compatibility_check
if _logged_compatibility_check:
return
_logged_compatibility_check = True
gpus = tf.config.list_physical_devices('GPU')
gpu_details_list = [tf.config.experimental.get_device_details(g) for g in gpus]
_log_device_compatibility_check(policy_name, gpu_details_list)
| 5,937 | 39.121622 | 81 | py |
keras | keras-master/keras/mixed_precision/mixed_precision_graph_rewrite_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Keras integration with enable_mixed_precision_graph_rewrite()."""
import tensorflow.compat.v2 as tf
import os
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.mixed_precision import loss_scale_optimizer as loss_scale_optimizer_v2
from keras.mixed_precision import policy
from keras.optimizer_v2 import gradient_descent as gradient_descent_v2
class MixedPrecisionTest(keras_parameterized.TestCase):
IGNORE_PERF_VAR = 'TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'
def setUp(self):
super(MixedPrecisionTest, self).setUp()
# Enable the tests to be run on pre-Volta GPUs by telling the grappler pass
# to ignore performance and always transform the graph.
self._original_ignore_perf_value = os.getenv(self.IGNORE_PERF_VAR)
os.environ[self.IGNORE_PERF_VAR] = '1'
def tearDown(self):
# Set the IGNORE_PERF_VAR variable back to it's original value.
if self._original_ignore_perf_value is not None:
os.environ[self.IGNORE_PERF_VAR] = self._original_ignore_perf_value
else:
del os.environ[self.IGNORE_PERF_VAR]
tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite()
super(MixedPrecisionTest, self).tearDown()
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_wrap_optimizer(self):
opt = gradient_descent_v2.SGD(1.0)
opt = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(opt, 123.)
self.assertIsInstance(
opt, loss_scale_optimizer_v2.LossScaleOptimizerV1)
self.assertEqual(self.evaluate(opt.loss_scale), 123.)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_optimizer_errors(self):
opt = gradient_descent_v2.SGD(1.0)
opt = loss_scale_optimizer_v2.LossScaleOptimizerV1(opt, 'dynamic')
with self.assertRaisesRegex(
ValueError, '"opt" must not already be an instance of a '
'LossScaleOptimizer.'):
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(tf.config.optimizer.get_experimental_options()
.get('auto_mixed_precision', False))
@testing_utils.enable_v2_dtype_behavior
def test_error_if_policy_is_set(self):
with policy.policy_scope('mixed_float16'):
with self.assertRaisesRegex(ValueError,
'the global Keras dtype Policy has been set'):
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
# Test no error is thrown when the policy is currently the default.
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
# Test no error is thrown when the policy is a non-mixed policy.
with policy.policy_scope('float64'):
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
if __name__ == '__main__':
tf.test.main()
| 3,706 | 42.104651 | 86 | py |
keras | keras-master/keras/mixed_precision/policy_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Policies."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras import combinations
from keras import testing_utils
from keras.engine import base_layer_utils
from keras.mixed_precision import device_compatibility_check
from keras.mixed_precision import policy as mp_policy
from keras.optimizer_v2 import gradient_descent
from tensorflow.python.platform import tf_logging
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PolicyTest(tf.test.TestCase, parameterized.TestCase):
"""Tests Policies."""
@testing_utils.enable_v2_dtype_behavior
def test_dtype_attributes(self):
for dtype in 'int32', 'bool', 'float16', 'float32':
policy = mp_policy.Policy(dtype)
self.assertEqual(policy.name, dtype)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.variable_dtype, dtype)
for dtype in 'float16', 'bfloat16':
policy = mp_policy.Policy('mixed_' + dtype)
self.assertEqual(policy.name, 'mixed_' + dtype)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.variable_dtype, 'float32')
policy = mp_policy.Policy('_infer')
self.assertEqual(policy.compute_dtype, None)
self.assertEqual(policy.variable_dtype, None)
@testing_utils.enable_v2_dtype_behavior
def test_repr(self):
# Test Policy repr
for policy in ('float32', 'int8', 'mixed_float16', 'mixed_bfloat16',
'_infer'):
self.assertEqual(repr(mp_policy.Policy(policy)),
'<Policy "%s">' % policy)
# Test PolicyV1 repr
for policy in ('float32', 'int8', 'mixed_bfloat16', '_infer'):
self.assertEqual(repr(mp_policy.PolicyV1(policy)),
'<PolicyV1 "%s", loss_scale=None>' % policy)
self.assertEqual(
repr(mp_policy.PolicyV1('float16', loss_scale=2.)),
'<PolicyV1 "float16", loss_scale=FixedLossScale(2.0)>')
self.assertStartsWith(
repr(mp_policy.PolicyV1('mixed_float16')),
'<PolicyV1 "mixed_float16", loss_scale=DynamicLossScale(')
@testing_utils.enable_v2_dtype_behavior
def test_policy_errors(self):
# Test passing invalid strings
with self.assertRaisesRegex(
ValueError, 'Cannot convert value abc to a mixed precision Policy.'):
mp_policy.Policy('abc')
# Test passing a DType
with self.assertRaisesRegex(
TypeError, "'name' must be a string, not a DType. "
'Instead, pass DType.name. Got: float16'):
mp_policy.Policy(tf.float16)
# Test passing a non-DType invalid type
with self.assertRaisesRegex(TypeError,
"'name' must be a string, but got: 5"):
mp_policy.Policy(5)
# Test passing a now-removed policy ending in float32_vars
with self.assertRaisesRegex(
ValueError, 'Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow. Please use the \'mixed_float16\' or '
'\'mixed_bfloat16\' policy instead. Got policy name: '
'\'infer_float32_vars\''):
mp_policy.Policy('infer_float32_vars')
with self.assertRaisesRegex(
ValueError, 'Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow. Please use the \'mixed_float16\' policy '
'instead. Got policy name: \'float16_with_float32_vars\''):
mp_policy.Policy('float16_with_float32_vars')
with self.assertRaisesRegex(
ValueError, 'Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow. Please use the \'mixed_bfloat16\' policy '
'instead. Got policy name: \'bfloat16_with_float32_vars\''):
mp_policy.Policy('bfloat16_with_float32_vars')
with self.assertRaisesRegex(
ValueError, 'Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow. Got policy name: '
'\'int8_with_float32_vars\''):
mp_policy.Policy('int8_with_float32_vars')
@testing_utils.enable_v2_dtype_behavior
def test_loss_scale(self):
policy = mp_policy.PolicyV1('float32')
self.assertEqual(policy.loss_scale, None)
policy = mp_policy.PolicyV1('float32', loss_scale=None)
self.assertEqual(policy.loss_scale, None)
ls = tf.mixed_precision.experimental.DynamicLossScale()
policy = mp_policy.PolicyV1('float32', loss_scale=ls)
self.assertIs(policy.loss_scale, ls)
policy = mp_policy.PolicyV1('float32', loss_scale='dynamic')
self.assertIsInstance(policy.loss_scale, tf.mixed_precision.experimental.DynamicLossScale)
policy = mp_policy.PolicyV1('mixed_float16')
self.assertIsInstance(policy.loss_scale, tf.mixed_precision.experimental.DynamicLossScale)
policy = mp_policy.PolicyV1('mixed_float16', loss_scale=None)
self.assertEqual(policy.loss_scale, None)
policy = mp_policy.PolicyV1('mixed_bfloat16')
self.assertEqual(policy.loss_scale, None)
@testing_utils.enable_v2_dtype_behavior
def test_global_policy(self):
if base_layer_utils.v2_dtype_behavior_enabled():
default_policy = 'float32'
else:
default_policy = '_infer'
self.assertEqual(mp_policy.global_policy().name, default_policy)
try:
mp_policy.set_policy('mixed_float16')
self.assertEqual(mp_policy.global_policy().name, 'mixed_float16')
with tf.Graph().as_default(): # Policies are not associated with a graph
self.assertEqual(mp_policy.global_policy().name, 'mixed_float16')
mp_policy.set_policy('_infer')
self.assertEqual(mp_policy.global_policy().name, '_infer')
policy = mp_policy.Policy('mixed_bfloat16')
mp_policy.set_policy(policy)
self.assertIs(mp_policy.global_policy(), policy)
finally:
mp_policy.set_policy(None)
@testing_utils.enable_v2_dtype_behavior
def test_global_policy_dtype_error(self):
with self.assertRaisesRegex(
ValueError,
'set_policy can only be used to set the global policy to '
'floating-point policies, such as "float32" and "mixed_float16", but '
'got policy: int32'):
mp_policy.set_policy('int32')
with self.assertRaisesRegex(
ValueError,
'set_policy can only be used to set the global policy to '
'floating-point policies, such as "float32" and "mixed_float16", but '
'got policy: complex64'):
mp_policy.set_policy(mp_policy.Policy('complex64'))
@testing_utils.enable_v2_dtype_behavior
def test_loss_scale_warning(self):
with tf.compat.v1.test.mock.patch.object(tf_logging, 'warning') as mock_warn:
mp_policy.PolicyV1('float32', loss_scale=2.)
self.assertEqual(
mock_warn.call_args_list[0][0][0],
'Creating a Policy with a loss scale is only useful for float16 '
'policies. You passed loss_scale=2.0 for policy float32. Consider '
'not passing any loss_scale instead.')
for policy_name in 'float16', 'mixed_float16':
# Trigger any other warnings that occur only once
mp_policy.PolicyV1(policy_name, loss_scale=2.)
with tf.compat.v1.test.mock.patch.object(tf_logging, 'warning') as mock_warn:
mp_policy.PolicyV1(policy_name, loss_scale=2.)
mock_warn.assert_not_called()
@testing_utils.enable_v2_dtype_behavior
def test_device_compatibility_warning(self):
if not tf.executing_eagerly():
self.skipTest('Run in eager mode only.')
device_compatibility_check._logged_compatibility_check = False
with tf.compat.v1.test.mock.patch.object(tf_logging, 'warning') as mock_warn:
mp_policy.Policy('mixed_float16')
if tf.config.list_physical_devices('GPU'):
mock_warn.assert_not_called()
else:
self.assertRegex(
mock_warn.call_args[0][0],
r'Mixed precision compatibility check \(mixed_float16\): WARNING.*')
if tf.config.list_physical_devices('GPU'):
# Assert message is only logged once
with tf.compat.v1.test.mock.patch.object(tf_logging, 'warning') as mock_warn:
mp_policy.Policy('mixed_float16')
mock_warn.assert_not_called()
@testing_utils.enable_v2_dtype_behavior
def test_policy_scope(self):
if base_layer_utils.v2_dtype_behavior_enabled():
default_policy = 'float32'
else:
default_policy = '_infer'
with mp_policy.policy_scope('mixed_float16'):
self.assertEqual(mp_policy.global_policy().name, 'mixed_float16')
with mp_policy.policy_scope('_infer'):
self.assertEqual(mp_policy.global_policy().name, '_infer')
self.assertEqual(mp_policy.global_policy().name, 'mixed_float16')
self.assertEqual(mp_policy.global_policy().name, default_policy)
@testing_utils.enable_v2_dtype_behavior
def test_config(self):
for policy in (
mp_policy.Policy('float16'),
mp_policy.Policy('float32'),
mp_policy.Policy('int16'),
mp_policy.Policy('mixed_float16'),
mp_policy.Policy('mixed_bfloat16'),
mp_policy.Policy('_infer'),
):
config = policy.get_config()
new_policy = mp_policy.Policy.from_config(config)
# Comparing strings is the easiest way to ensure the policies are the
# same, as policy does not override the == operator.
self.assertEqual(str(policy), str(new_policy))
@testing_utils.enable_v2_dtype_behavior
def test_serialization(self):
# Test policies that are equivalent to a single dtype
for policy_name in 'float16', 'float32', 'int8', 'string', 'bool':
policy = mp_policy.Policy(policy_name)
config = mp_policy.serialize(policy)
self.assertEqual(config, policy_name)
new_policy = mp_policy.deserialize(config)
self.assertEqual(str(policy), str(new_policy))
# Test "_infer" policy
policy = mp_policy.Policy('_infer')
config = mp_policy.serialize(policy)
self.assertIsNone(config)
new_policy = mp_policy.deserialize(config)
self.assertEqual(str(policy), str(new_policy))
class MyPolicy(mp_policy.Policy):
pass
# Test policies that are not equivalent to a single dtype
for policy in (
mp_policy.Policy('mixed_float16'),
mp_policy.Policy('mixed_bfloat16'),
MyPolicy('float32')
):
config = mp_policy.serialize(policy)
self.assertEqual(config, {'class_name': policy.__class__.__name__,
'config': {'name': policy.name}})
new_policy = mp_policy.deserialize(config,
custom_objects={'MyPolicy': MyPolicy})
self.assertEqual(str(policy), str(new_policy))
# Test V1 policies that override the loss scale
for policy in (
mp_policy.PolicyV1('float32', loss_scale=2.),
mp_policy.PolicyV1('float32', loss_scale=None),
mp_policy.PolicyV1('mixed_float16', loss_scale=2.),
mp_policy.PolicyV1('mixed_float16', loss_scale=None),
mp_policy.PolicyV1('mixed_bfloat16', loss_scale=2.),
mp_policy.PolicyV1('mixed_bfloat16', loss_scale=None),
):
config = mp_policy.serialize(policy)
expected_loss_scale_config = None
if policy.loss_scale:
expected_loss_scale_config = {
'class_name': 'FixedLossScale',
'config': {'loss_scale_value': 2.}
}
self.assertEqual(
config, {
'class_name': policy.__class__.__name__,
'config': {
'name': policy.name,
'loss_scale': expected_loss_scale_config
}
})
@testing_utils.enable_v2_dtype_behavior
def test_error_if_graph_rewrite_enabled(self):
try:
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent.SGD(1.))
with self.assertRaisesRegex(
ValueError, 'cannot be set to "mixed_float16", .* the mixed '
'precision graph rewrite has already been enabled'):
mp_policy.set_policy('mixed_float16')
with mp_policy.policy_scope('float64'):
pass # Non-mixed policies are allowed
finally:
tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite()
@testing_utils.disable_v2_dtype_behavior
def test_v1_dtype_behavior(self):
# Setting global policies are not allowed with V1 dtype behavior
with self.assertRaisesRegex(
ValueError, 'global policy can only be set in TensorFlow 2'):
with mp_policy.policy_scope(mp_policy.Policy('_infer')):
pass
with self.assertRaisesRegex(
ValueError, 'global policy can only be set in TensorFlow 2'):
with mp_policy.policy_scope(mp_policy.Policy('float32')):
pass
with self.assertRaisesRegex(
ValueError, 'global policy can only be set in TensorFlow 2'):
with mp_policy.policy_scope(mp_policy.Policy('mixed_float16')):
pass
if __name__ == '__main__':
tf.test.main()
| 13,512 | 40.072948 | 94 | py |
keras | keras-master/keras/mixed_precision/loss_scale_optimizer_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
import os
from absl.testing import parameterized
from keras import combinations
from keras import optimizers
from keras.mixed_precision import loss_scale_optimizer
from keras.mixed_precision import test_util as mp_test_util
from keras.optimizer_v2 import adam
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import optimizer_v2
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import gradient_descent as legacy_sgd
# Disable not-callable lint error, as the linter is unable to detect that
# LossScale instances are callable.
# pylint: disable=not-callable
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = tf.distribute.get_strategy
def create_mirrored_strategy():
if tf.config.list_logical_devices('GPU'):
return tf.distribute.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return tf.distribute.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'Base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'Distribute',
'strategy_fn': create_mirrored_strategy
})
@test_util.with_control_flow_v2
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LossScaleOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _run_if_in_graph_mode(self, val):
# Running only in graph mode is useful, because optimizers sometimes return
# a value that, in Graph mode, is runnable with self.evaluate. But in Eager
# mode, the optimizer already does the computations and the return value
# cannot be run.
if not tf.executing_eagerly():
self.evaluate(val)
def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad):
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
expected_grad)
loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync
return lambda: opt.minimize(loss, var_list=[var])
@parameterized.named_parameters(*TESTCASES)
def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = tf.Variable([5.0])
opt = gradient_descent.SGD(2.0)
loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=loss_scale)
self.assertEqual(self.evaluate(opt.loss_scale), loss_scale)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
# We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale
# / strategy.num_replicas_in_sync will not be exact, which could lead to
# assertion failures due to rounding issues.
self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, loss_scale / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
def testFixedLossScaleAppliedToLossWithGetGradients(self):
with tf.Graph().as_default():
var = tf.Variable([2.0])
opt = gradient_descent.SGD(1.0)
loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=loss_scale)
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
loss_scale)
loss = grad_check_fn(var)
run_op = opt.get_gradients(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
# This will cause an assertion to run, as
# mp_test_util.create_identity_with_grad_check_fn added an assertion op.
self.evaluate(run_op)
def testDynamicAttrsWithFixedLossScale(self):
opt = gradient_descent.SGD()
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2.)
self.assertFalse(opt.dynamic)
self.assertIsNone(opt.dynamic_counter)
self.assertIsNone(opt.dynamic_growth_steps)
def testGetScaledLoss(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2.)
loss = tf.convert_to_tensor(5.)
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
loss = tf.convert_to_tensor(5., dtype='float16')
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
def testGetUnscaledGradients(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2)
scaled_grads = [
tf.convert_to_tensor(3.), None,
tf.convert_to_tensor(-4., dtype='float16')
]
grads = opt.get_unscaled_gradients(scaled_grads)
grads = [self.evaluate(g) if g is not None else g for g in grads]
self.assertEqual([1.5, None, -2.], grads)
def testGetUnscaledSparseGradients(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2)
sparse_scaled_grad = tf.IndexedSlices(
tf.convert_to_tensor([[4., 2.], [8., 5.]]),
tf.convert_to_tensor([1, 3], dtype='int32'),
dense_shape=tf.convert_to_tensor([5, 2],
dtype='int32'))
sparse_grad = opt.get_unscaled_gradients([sparse_scaled_grad])[0]
self.assertIsInstance(sparse_grad, tf.IndexedSlices)
self.assertAllEqual([[2., 1.], [4., 2.5]],
self.evaluate(sparse_grad.values))
@parameterized.named_parameters(*TESTCASES)
def testDynamicLossScale(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
expected_gradient = tf.Variable(learning_rate /
strategy.num_replicas_in_sync)
with strategy.scope():
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
self.assertEqual(opt.initial_scale, 2.)
self.assertIsInstance(opt.initial_scale, float)
self.assertEqual(opt.dynamic_growth_steps, 1)
self.assertIsInstance(opt.dynamic_growth_steps, int)
self.assertEqual(opt.initial_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(strategy, var, opt,
expected_gradient)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Loss scale will be double, so the expected gradient is also doubled.
self.evaluate(expected_gradient.assign(
2 * learning_rate / strategy.num_replicas_in_sync))
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# As before, the 2 is subtracted from the variable, making it's new value
# 1.
self.assertAllClose([1.], self.evaluate(var))
def testDynamicLossScaleDefaultValues(self):
opt = gradient_descent.SGD()
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
self.assertEqual(opt.initial_scale, 2 ** 15)
self.assertEqual(opt.dynamic_growth_steps, 2000)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2 ** 15)
# pylint: disable=cell-var-from-loop
@parameterized.named_parameters(*TESTCASES)
def testClipping(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
for clip_type in ('clipnorm', 'global_clipnorm', 'clipvalue'):
with strategy.scope(), self.subTest(clip_type=clip_type):
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate, **{clip_type: 2.0})
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
self.assertEqual(getattr(opt, clip_type), 2.0)
self.assertEqual(opt.initial_scale % strategy.num_replicas_in_sync, 0)
loss = lambda: var * 4 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
# Test running with clipped gradients
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The gradient is 4 but is clipped to 2, so the variable will be
# init_val - clipped_grad * lr == 5 - 2 * 2 == 1
self.assertAllClose([1.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 4)
# Test changing the clip amount and running again
setattr(opt, clip_type, 3.0)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The gradient is 4 but is clipped to 3, so the variable will be
# prev_var - clipped_grad * lr == 1 - 3 * 2 == -5
self.assertAllClose([-5.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 8)
# Test Inf gradients are still skipped instead of being clipped
loss = lambda: var * float('Inf')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertAllClose([-5.], self.evaluate(var)) # Var does not change
self.assertEqual(self.evaluate(opt.loss_scale), 4)
# pylint: enable=cell-var-from-loop
@parameterized.named_parameters(*TESTCASES)
def testDynamicUpdate(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
# Test optimizer with finite gradients
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Gradient is 2, so variable will have 2 subtracted from it
self.assertAllClose([-1.0, 0.0], self.evaluate(var))
# Loss scale has doubled from 2 to 4
self.assertEqual(4., self.evaluate(opt.loss_scale))
# Test optimizer with NaN gradients
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [-1.0, 0.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(2., self.evaluate(opt.loss_scale))
@parameterized.named_parameters(*TESTCASES)
def testDynamicLossScaleWithFloat16Loss(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
with strategy.scope():
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
def loss():
return tf.cast(var / strategy.num_replicas_in_sync, 'float16')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
def testNanOnOneReplicaOnly(self):
if not tf.test.is_gpu_available():
self.skipTest('Test requires GPU')
if (not tf.executing_eagerly() and
not tf.compat.v1.control_flow_v2_enabled()):
self.skipTest('b/181283011: GradientTape does not work properly with '
'V1 control flow, and opt.minimize uses GradientTape')
with create_mirrored_strategy().scope() as strategy:
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=2)
def loss():
rep_id = (tf.distribute.get_replica_context()
.replica_id_in_sync_group)
# The last element of last replica's gradient is NaN.
return tf.cond(
tf.constant(rep_id == 0), lambda: var * 2.,
lambda: var * tf.constant([1., float('NaN')]))
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [1.0, 2.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(1., self.evaluate(opt.loss_scale))
def testCustomAggregater(self):
def gradient_aggregator(grads_and_vars):
# Simulate an all-reduce where a replica has a NaN gradient by setting
# the last gradient to NaN
grads_and_vars = list(grads_and_vars)
last_grad, last_var = grads_and_vars[-1]
grads_and_vars[-1] = (last_grad * float('NaN'), last_var)
return grads_and_vars
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0, gradient_aggregator=gradient_aggregator)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=2)
loss = lambda: var * 2
run_op = opt.minimize(loss, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [1.0, 2.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(1., self.evaluate(opt.loss_scale))
@parameterized.named_parameters(*TESTCASES)
def testDynamicLossScaleWithSlots(self, strategy_fn):
strategy_obj = strategy_fn()
if (isinstance(strategy_obj, tf.distribute.MirroredStrategy) and
tf.compat.v1.control_flow_v2_enabled() and
not tf.executing_eagerly()):
self.skipTest('b/138667997')
with strategy_obj.scope() as strategy:
var = tf.Variable([1.0, 2.0])
# An SGD optimizer with momentum has slot variables.
opt = gradient_descent.SGD(1.0, momentum=1.)
initial_scale = 2.
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=initial_scale, dynamic_growth_steps=1)
loss = lambda: var / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The momentum accumulator starts at 0 and the gradient is 1. The
# accumulator is incremented by the gradient, so it is now 1. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 1.
self.assertAllClose([0.0, 1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 2)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The momentum accumulator was 1 before this step and the gradient is 1.
# The accumulator is incremented by the gradient, so it is now 2. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 2.
self.assertAllClose([-2., -1.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 4)
self.assertEqual(opt.get_slot_names(), ['momentum'])
def testIterations(self):
opt = gradient_descent.SGD(2.0)
lso = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=10.)
lso.iterations = 7
self.assertEqual(lso.iterations, 7)
self.assertEqual(opt.iterations, 7)
@parameterized.named_parameters(*TESTCASES)
def testIterationsIncremented(self, strategy_fn):
with strategy_fn().scope() as strategy:
# Test iterations is incremented in opt.minimize.
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
var = tf.Variable([5.0])
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, [var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), 3.0) # Grad is 2, so var is 5 - 2
self.assertEqual(self.evaluate(opt.iterations), 1)
# Test iterations is incremented in opt.minimize even if gradients aren't
# applied to variables due to NaN gradients.
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, [var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), 3.0)
self.assertEqual(self.evaluate(opt.iterations), 2)
def testWeightMethods(self):
with self.test_session():
var = tf.Variable([1.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2.,
dynamic_growth_steps=1)
run_op = opt.minimize(lambda: var * 2, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertLen(opt.weights, 1) # The 'iterations' weight
self.assertEqual(self.evaluate(opt.weights[0]), 1)
self.assertEqual(opt.get_weights()[0], 1)
self.assertEqual(self.evaluate(opt.variables()[0]), 1)
opt.set_weights([np.array(2.)])
self.assertEqual(self.evaluate(opt.variables()[0]), 2)
def testHyperParametersExposed(self):
with self.cached_session():
opt = adam.Adam(learning_rate=1.0, beta_1=0.5, beta_2=0.9)
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(lso.beta_1), 0.5)
self.assertIsInstance(lso.beta_1, tf.Variable)
self.assertEqual(self.evaluate(lso.lr), 1.0)
self.assertIs(lso.lr, opt.lr)
self.assertIs(lso.lr, lso.learning_rate)
lso.beta_1 = 0.25
self.assertEqual(self.evaluate(lso.beta_1), 0.25)
self.assertEqual(self.evaluate(opt.beta_1), 0.25)
self.assertIs(lso.beta_1, opt.beta_1)
opt.beta_1 = 0.75
self.assertEqual(self.evaluate(lso.beta_1), 0.75)
self.assertEqual(self.evaluate(opt.beta_1), 0.75)
self.assertIs(lso.beta_1, opt.beta_1)
lso.lr = 2.0
self.assertEqual(self.evaluate(lso.lr), 2.0)
self.assertEqual(self.evaluate(lso.learning_rate), 2.0)
self.assertEqual(self.evaluate(opt.lr), 2.0)
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertIs(lso.lr, opt.lr)
# Test setting attribute that is both attribute on LossScaleOptimizer and
# hyperparameter on wrapped optimizer.
class MyOpt(gradient_descent.SGD):
def __init__(self):
super().__init__()
self._set_hyper('loss_scale', 123.)
opt = MyOpt()
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
with self.assertRaises(AttributeError):
lso.loss_scale = 2.
def testArbitraryAttributesNotExposed(self):
opt = gradient_descent.SGD()
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
self.assertFalse(opt.nesterov)
with self.assertRaisesRegex(
AttributeError,
"'LossScaleOptimizer' object has no attribute 'nesterov'"):
lso.nesterov # pylint: disable=pointless-statement
lso.nesterov = True
self.assertTrue(lso.nesterov)
self.assertFalse(opt.nesterov)
def testDir(self):
lso = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
dir_result = dir(lso)
self.assertIn('learning_rate', dir_result) # Hyperparameter
self.assertIn('lr', dir_result) # Hyperparameter
self.assertIn('minimize', dir_result) # Attribute
self.assertIn('loss_scale', dir_result) # Attribute
self.assertNotIn('nesterov', dir_result) # Attribute on inner optimizer
self.assertIn('nesterov', dir(lso.inner_optimizer))
def testApplyGradientsGetsUnwrappedTensors(self):
# Tests that gradients passed to apply_gradients are not wrapped in a
# DistributionStrategy wrapper, such as PerReplica, but instead are raw
# Tensors. Optimizer subclasses that override apply_gradients() expect raw
# Tensors, even though the base Optimizer can handle PerReplica gradients.
outer_self = self
class MyOptimizer(gradient_descent.SGD):
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
for grad, _ in grads_and_vars:
outer_self.assertIsInstance(grad, tf.Tensor)
return super(MyOptimizer,
self).apply_gradients(grads_and_vars, name,
experimental_aggregate_gradients)
with create_mirrored_strategy().scope() as strategy:
var = tf.Variable([5.0])
opt = MyOptimizer(learning_rate=1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=1)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
strategy.experimental_run(run_fn)
@parameterized.named_parameters(*TESTCASES)
def testV1Optimizer(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
with strategy.scope():
# Test FixedLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale=2)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2)
self.assertEqual(opt.initial_scale, 2)
self.assertIsNone(opt.dynamic_growth_steps)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, 2 / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Test DynamicLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, 'dynamic')
self.assertEqual(opt.initial_scale, 2 ** 15)
self.assertEqual(opt.dynamic_growth_steps, 2000)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2 ** 15)
for s in strategy.experimental_local_results(opt.dynamic_counter):
self.assertEqual(self.evaluate(s), 0)
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertAllClose([5.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 2 ** 14)
for s in strategy.experimental_local_results(opt.dynamic_counter):
self.assertEqual(self.evaluate(s), 0)
@parameterized.named_parameters(*TESTCASES)
def testPassingV1LossScale(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
with strategy.scope():
# Test FixedLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
loss_scale = tf.mixed_precision.experimental.FixedLossScale(2.)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, 2 / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Test DynamicLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=4, increment_period=1, multiplier=2)
loss_scale._current_loss_scale.assign(2)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
self.assertEqual(opt.initial_scale, 4)
self.assertEqual(opt.dynamic_growth_steps, 1)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Current loss scale is not copied so loss scale is reinitialized to 4
self.assertEqual(self.evaluate(opt.loss_scale), 4)
for s in strategy.experimental_local_results(opt.dynamic_counter):
self.assertEqual(self.evaluate(s), 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, 4 / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertAllClose([3.], self.evaluate(var))
def testPassingV1LossScaleErrors(self):
opt = gradient_descent.SGD()
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(multiplier=4)
with self.assertRaisesRegex(
ValueError, 'When passing a DynamicLossScale to "loss_scale", '
'DynamicLossScale.multiplier must be 2. Got: '
'DynamicLossScale'):
loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
class MyLossScale(tf.mixed_precision.experimental.LossScale):
def __call__(self):
return 1.
def update(self, grads):
return None, True
def get_config(self):
return {}
with self.assertRaisesRegex(
TypeError, 'Passing a LossScale that is not a FixedLossScale or a '
'DynamicLossScale is no longer supported. Got:'):
loss_scale_optimizer.LossScaleOptimizerV1(opt, MyLossScale())
def testLossScaleDelegationWithWrapper(self):
# Test learning_rate is exposed when LossScaleOptimizer wraps another
# wrapper.
class MyOptimizer(optimizer_v2.OptimizerV2):
def __init__(self):
super().__init__('MyOptimizer')
self.inner_optimizer = adam.Adam(learning_rate=1.0)
@property
def learning_rate(self):
return self.inner_optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, value):
self.inner_optimizer.learning_rate = value
def get_config(self):
return {}
with self.cached_session():
opt = MyOptimizer()
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
# Force hyperparameters to be created
opt.learning_rate # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.learning_rate), 1.0)
self.assertEqual(
self.evaluate(opt.inner_optimizer.inner_optimizer.learning_rate), 1.0)
opt.learning_rate = 2.0
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(self.evaluate(
opt.inner_optimizer.inner_optimizer.learning_rate), 2.0)
@parameterized.named_parameters({
'testcase_name': 'SaveAndRestoreBase',
'strategy_fn': default_strategy_fn,
'save_with_ls': True,
'restore_with_ls': True,
}, {
'testcase_name': 'SaveAndRestoreDistribute',
'strategy_fn': create_mirrored_strategy,
'save_with_ls': True,
'restore_with_ls': True,
}, {
'testcase_name': 'SaveBase',
'strategy_fn': default_strategy_fn,
'save_with_ls': True,
'restore_with_ls': False,
}, {
'testcase_name': 'SaveDistribute',
'strategy_fn': create_mirrored_strategy,
'save_with_ls': True,
'restore_with_ls': False,
}, {
'testcase_name': 'RestoreBase',
'strategy_fn': default_strategy_fn,
'save_with_ls': False,
'restore_with_ls': True,
}, {
'testcase_name': 'RestoreDistribute',
'strategy_fn': create_mirrored_strategy,
'save_with_ls': False,
'restore_with_ls': True,
})
def testCheckpoint(self, strategy_fn, save_with_ls, restore_with_ls):
class MySGD(gradient_descent.SGD):
"""A custom optimizer that tracks an extra variable."""
def __init__(self, *args, **kwargs):
super(MySGD, self).__init__(*args, **kwargs)
self.my_var = tf.Variable(0.)
self._track_trackable(self.my_var, 'my_var')
strategy = strategy_fn()
replicas = strategy.num_replicas_in_sync
if (isinstance(strategy, tf.distribute.MirroredStrategy) and
not tf.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
with self.test_session(), strategy.scope():
# Build and run a simple model.
var = tf.Variable([2.0])
opt = inner_opt = MySGD(1., momentum=1.)
if save_with_ls:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=1.,
dynamic_growth_steps=2.)
run_fn = lambda: opt.minimize(lambda: var / replicas + 1., var_list=[var])
opt_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(strategy.experimental_local_results(opt_op))
# Assert values.
self.assertEqual(self.evaluate(var), 1.)
if save_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
slot_var = opt.get_slot(var, 'momentum')
self.assertEqual(self.evaluate(slot_var).item(), -1)
self.assertEqual(self.evaluate(opt.iterations), 1)
# Set optimizer variable to check arbitrary optimizer attributes can be
# saved/restored
self.evaluate(inner_opt.my_var.assign(1.))
# Save a checkpoint.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Create new model
var = tf.Variable([2.0])
opt = inner_opt = MySGD(1., momentum=1.)
if restore_with_ls:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=1.,
dynamic_growth_steps=2.)
# Restore new model.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
status = checkpoint.restore(save_path)
if save_with_ls:
status.assert_existing_objects_matched()
else:
status.assert_nontrivial_match()
# Assert restored values. We can only assert in eager mode since the
# variables are uninitialized in graph mode
if tf.executing_eagerly():
self.assertEqual(self.evaluate(var), 1.)
if save_with_ls and restore_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
elif restore_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.)
self.assertEqual(self.evaluate(opt.dynamic_counter), 0)
self.assertEqual(self.evaluate(opt.iterations), 1)
# Run the model again.
run_fn = lambda: opt.minimize(lambda: var / replicas + 1., var_list=[var])
opt_op = strategy.experimental_run(run_fn)
# Assert new values.
self.evaluate(tf.compat.v1.global_variables_initializer())
status.run_restore_ops()
self.evaluate(strategy.experimental_local_results(opt_op))
self.assertEqual(self.evaluate(var), -1)
slot_var = opt.get_slot(var, 'momentum')
self.assertEqual(self.evaluate(slot_var).item(), -2)
self.assertEqual(self.evaluate(opt.iterations), 2)
self.assertEqual(self.evaluate(inner_opt.my_var), 1)
# Restore model again to test restoring after slots are created
status = checkpoint.restore(save_path)
if save_with_ls and restore_with_ls:
status.assert_consumed()
elif save_with_ls:
status.assert_existing_objects_matched()
elif restore_with_ls:
status.assert_nontrivial_match()
status.run_restore_ops()
self.assertEqual(self.evaluate(var), 1)
self.assertEqual(self.evaluate(slot_var).item(), -1)
@combinations.generate(combinations.combine(
get_config=['v1', 'v2', 'tf2_3'], from_config=['v1', 'v2']))
def testGetConfigFixed(self, get_config, from_config):
# Get a config from LossScaleOptimizerV1, LossScaleOptimizer, or the
# LossScaleOptimizer from TF 2.3. Then restore the config into a
# LossScaleOptimizerV1 or LossScaleOptimizer
opt = gradient_descent.SGD(2., momentum=0.5)
if get_config == 'v1':
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, 2)
config = opt.get_config()
elif get_config == 'v2':
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=2)
config = opt.get_config()
else:
self.assertEqual(get_config, 'tf2_3')
config = {
'optimizer': {
'class_name': 'SGD',
'config': {
'learning_rate': 2.0,
'momentum': 0.5,
'decay': 0.0,
'nesterov': False,
'name': 'SGD',
}
},
'loss_scale': {
'class_name': 'FixedLossScale',
'config': {'loss_scale_value': 2.0}
},
}
if from_config == 'v1':
opt = loss_scale_optimizer.LossScaleOptimizerV1.from_config(config)
else:
self.assertEqual(from_config, 'v2')
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
# Test attributes on the optimizer
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.lr), 2.)
self.assertEqual(self.evaluate(opt.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.initial_scale, 2.)
self.assertIsNone(opt.dynamic_growth_steps)
self.assertIsNone(opt.dynamic_counter)
self.assertFalse(opt.dynamic)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.])
@combinations.generate(combinations.combine(
get_config=['v1', 'v2', 'tf2_3'], from_config=['v1', 'v2']))
def testGetConfigDynamic(self, get_config, from_config):
# Get a config from LossScaleOptimizerV1, LossScaleOptimizer, or the
# LossScaleOptimizer from TF 2.3. Then restore the config into a
# LossScaleOptimizerV1 or LossScaleOptimizer
opt = gradient_descent.SGD(2., momentum=0.5)
if get_config == 'v1':
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=2, increment_period=3)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
config = opt.get_config()
elif get_config == 'v2':
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=3)
config = opt.get_config()
else:
self.assertEqual(get_config, 'tf2_3')
config = {
'optimizer': {
'class_name': 'SGD',
'config': {
'learning_rate': 2.0,
'momentum': 0.5,
'decay': 0.0,
'nesterov': False,
'name': 'SGD',
}
},
'loss_scale': {
'class_name': 'DynamicLossScale',
'config': {
'initial_loss_scale': 2.0,
'increment_period': 3,
'multiplier': 2.0,
}
},
}
if from_config == 'v1':
opt = loss_scale_optimizer.LossScaleOptimizerV1.from_config(config)
else:
self.assertEqual(from_config, 'v2')
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
# Test attributes on the optimizer
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.lr), 2.)
self.assertEqual(self.evaluate(opt.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.initial_scale, 2.)
self.assertEqual(opt.dynamic_growth_steps, 3.)
self.assertTrue(opt.dynamic)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.])
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def test_from_config_with_invalid_multiplier(self):
config = {
'optimizer': {
'class_name': 'SGD',
'config': {
'learning_rate': 2.0,
'momentum': 0.5,
'decay': 0.0,
'nesterov': False,
'name': 'SGD',
}
},
'loss_scale': {
'class_name': 'DynamicLossScale',
'config': {
'initial_loss_scale': 2.0,
'increment_period': 3,
'multiplier': 4.0,
}
},
}
expected_error = ('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: DynamicLossScale\\(')
with self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizer.from_config(config)
with self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizerV1.from_config(config)
@parameterized.named_parameters({
'testcase_name': 'V2',
'use_v1': False,
}, {
'testcase_name': 'V1',
'use_v1': True,
},)
def testSerializationWithBuiltInOptimizer(self, use_v1):
opt = gradient_descent.SGD(2., momentum=0.5)
if use_v1:
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=2., increment_period=3.)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
else:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2.,
dynamic_growth_steps=3.)
config = optimizers.serialize(opt)
opt = optimizers.deserialize(config)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.dynamic_growth_steps, 3.)
self.assertTrue(opt.dynamic, 4.)
# Deserializing a LossScaleOptimizer always always results in a V2
# LossScaleOptimizer, even if serialized with a LossScaleOptimizerV1.
self.assertAllEqual(type(opt), loss_scale_optimizer.LossScaleOptimizer)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.])
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def testSerializationWithCustomOptimizer(self):
class MySGD(gradient_descent.SGD):
def __init__(self, *args, **kwargs):
super(MySGD, self).__init__(*args, **kwargs)
self.my_attribute = 123
opt = MySGD(2., momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2.,
dynamic_growth_steps=3.)
config = optimizers.serialize(opt)
custom_objects = {'MySGD': MySGD}
opt = optimizers.deserialize(config, custom_objects=custom_objects)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.dynamic_growth_steps, 3.)
self.assertEqual(opt.inner_optimizer.my_attribute, 123)
def testUnsupportedStrategy(self):
strategy = tf.distribute.experimental.CentralStorageStrategy()
expected_error = (
'Loss scaling is not supported with the tf.distribute.Strategy: '
'CentralStorageStrategy. Try using a different Strategy, e.g. a '
'MirroredStrategy')
with strategy.scope(), self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
opt = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
with strategy.scope():
var = tf.Variable(1.0)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
with self.assertRaisesRegex(ValueError, expected_error):
strategy.experimental_run(run_fn)
def testInvalidArgsWithFixedLossScale(self):
opt = gradient_descent.SGD()
with self.assertRaisesRegex(
ValueError, '"initial_scale" must be specified if "dynamic" is False'):
loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False)
opt = gradient_descent.SGD()
with self.assertRaisesRegex(
ValueError, '"dynamic_growth_steps" must be None if "dynamic" is '
'False, but got: 2'):
loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=1, dynamic_growth_steps=2)
def testDynamicMustBeBool(self):
opt = gradient_descent.SGD()
with self.assertRaisesRegex(
TypeError, '"dynamic" argument to LossScaleOptimizer.__init__ must be '
"a bool, but got: 'dynamic'"):
loss_scale_optimizer.LossScaleOptimizer(opt, 'dynamic')
def testErrorWhenNesting(self):
opt = gradient_descent.SGD()
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
with self.assertRaisesRegex(
TypeError, 'LossScaleOptimizer cannot wrap another LossScaleOptimizer'):
loss_scale_optimizer.LossScaleOptimizer(opt)
def testErrorWrappingSameOptimizerMultipleTimes(self):
inner_opt = gradient_descent.SGD()
loss_scale_optimizer.LossScaleOptimizer(inner_opt)
with self.assertRaisesRegex(
ValueError,
'"inner_optimizer" is already wrapped by a LossScaleOptimizer.'):
loss_scale_optimizer.LossScaleOptimizer(inner_opt)
def testErrorWhenWrappingLegacyKerasOptimizers(self):
sgd = legacy_sgd.SGD()
with self.assertRaisesRegex(
TypeError, 'not an instance of `tensorflow.python.keras.optimizers`'):
loss_scale_optimizer.LossScaleOptimizer(sgd)
if __name__ == '__main__':
tf.test.main()
| 46,710 | 41.775641 | 80 | py |
keras | keras-master/keras/mixed_precision/test_util.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains testing utilities related to mixed precision."""
import tensorflow.compat.v2 as tf
from keras import regularizers
from keras.engine import base_layer
def create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None):
"""Returns a function that asserts it's gradient has a certain value.
This serves as a hook to assert intermediate gradients have a certain value.
This returns an identity function. The identity's gradient function is also
the identity function, except it asserts that the gradient equals
`expected_gradient` and has dtype `expected_dtype`.
Args:
expected_gradient: The gradient function asserts that the gradient is this
value.
expected_dtype: The gradient function asserts the gradient has this dtype.
Returns:
An identity function whose gradient function asserts the gradient has a
certain value.
"""
@tf.custom_gradient
def _identity_with_grad_check(x):
"""Function that asserts it's gradient has a certain value."""
x = tf.identity(x)
def grad(dx):
"""Gradient function that asserts the gradient has a certain value."""
if expected_dtype:
assert dx.dtype == expected_dtype, (
'dx.dtype should be %s but is: %s' % (expected_dtype, dx.dtype))
expected_tensor = tf.convert_to_tensor(
expected_gradient, dtype=dx.dtype, name='expected_gradient')
# Control dependency is to ensure input is available. It's possible the
# dataset will throw a StopIteration to indicate there is no more data, in
# which case we don't want to run the assertion.
with tf.control_dependencies([x]):
assert_op = tf.compat.v1.assert_equal(dx, expected_tensor)
with tf.control_dependencies([assert_op]):
dx = tf.identity(dx)
return dx
return x, grad
# Keras sometimes has trouble serializing Lambda layers with a decorated
# function. So we define and return a non-decorated function.
def identity_with_grad_check(x):
return _identity_with_grad_check(x)
return identity_with_grad_check
def create_identity_with_nan_gradients_fn(have_nan_gradients):
"""Returns a function that optionally has NaN gradients.
This serves as a hook to introduce NaN gradients to a model. This returns an
identity function. The identity's gradient function will check if the boolean
tensor `have_nan_gradients` is True. If so, the gradient will be NaN.
Otherwise, the gradient will also be the identity.
Args:
have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN.
Otherwise, the gradient function is the identity function.
Returns:
An identity function whose gradient function will return NaNs, if
`have_nan_gradients` is True.
"""
@tf.custom_gradient
def _identity_with_nan_gradients(x):
"""Function whose gradient is NaN iff `have_nan_gradients` is True."""
x = tf.identity(x)
def grad(dx):
return tf.cond(
have_nan_gradients,
lambda: dx * float('NaN'),
lambda: dx
)
return x, grad
# Keras sometimes has trouble serializing Lambda layers with a decorated
# function. So we define and return a non-decorated function.
def identity_with_nan_gradients(x):
return _identity_with_nan_gradients(x)
return identity_with_nan_gradients
class AssertTypeLayer(base_layer.Layer):
"""A layer which asserts it's inputs are a certain type."""
def __init__(self, assert_type=None, **kwargs):
self._assert_type = (tf.as_dtype(assert_type).name if assert_type
else None)
super(AssertTypeLayer, self).__init__(**kwargs)
def assert_input_types(self, inputs):
"""Asserts `inputs` are of the correct type. Should be called in call()."""
if self._assert_type:
inputs_flattened = tf.nest.flatten(inputs)
for inp in inputs_flattened:
assert inp.dtype.base_dtype == self._assert_type, (
'Input tensor has type %s which does not match assert type %s' %
(inp.dtype.name, self._assert_type))
class MultiplyLayer(AssertTypeLayer):
"""A layer which multiplies its input by a scalar variable."""
def __init__(self,
regularizer=None,
activity_regularizer=None,
use_operator=False,
var_name='v',
**kwargs):
"""Initializes the MultiplyLayer.
Args:
regularizer: The weight regularizer on the scalar variable.
activity_regularizer: The activity regularizer.
use_operator: If True, add using the * operator. If False, add using
tf.multiply.
var_name: The name of the variable. It can be useful to pass a name other
than 'v', to test having the attribute name (self.v) being different
from the variable name.
**kwargs: Passed to AssertTypeLayer constructor.
"""
self._regularizer = regularizer
if isinstance(regularizer, dict):
self._regularizer = regularizers.deserialize(regularizer,
custom_objects=globals())
self._activity_regularizer = activity_regularizer
if isinstance(activity_regularizer, dict):
self._activity_regularizer = regularizers.deserialize(
activity_regularizer, custom_objects=globals())
self._use_operator = use_operator
self._var_name = var_name
super(MultiplyLayer, self).__init__(
activity_regularizer=self._activity_regularizer, **kwargs)
def build(self, _):
self.v = self.add_weight(
self._var_name, (), initializer='ones', regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
return self._multiply(inputs, self.v)
def _multiply(self, x, y):
if self._use_operator:
return x * y
else:
return tf.multiply(x, y)
def get_config(self):
config = super(MultiplyLayer, self).get_config()
config['regularizer'] = regularizers.serialize(self._regularizer)
config['activity_regularizer'] = regularizers.serialize(
self._activity_regularizer)
config['use_operator'] = self._use_operator
config['var_name'] = self._var_name
config['assert_type'] = self._assert_type
return config
class MultiplyLayerWithoutAutoCast(MultiplyLayer):
"""Same as MultiplyLayer, but does not use AutoCastVariables."""
def build(self, _):
dtype = self.dtype
if dtype in ('float16', 'bfloat16'):
dtype = 'float32'
self.v = self.add_weight(
'v', (),
initializer='ones',
dtype=dtype,
experimental_autocast=False,
regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert self.v.dtype in (tf.float32, tf.float64)
return self._multiply(inputs, tf.cast(self.v, inputs.dtype))
class IdentityRegularizer(regularizers.Regularizer):
def __call__(self, x):
assert x.dtype == tf.float32
return tf.identity(x)
def get_config(self):
return {}
class ReduceSumRegularizer(regularizers.Regularizer):
def __call__(self, x):
return tf.reduce_sum(x)
def get_config(self):
return {}
| 7,884 | 35.336406 | 80 | py |
keras | keras-master/keras/mixed_precision/autocast_variable_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AutoCastVariable."""
import tensorflow.compat.v2 as tf
import os
import threading
from absl.testing import parameterized
import numpy as np
from keras.mixed_precision import autocast_variable
from keras.optimizer_v2 import adadelta
from keras.optimizer_v2 import adagrad
from keras.optimizer_v2 import adam
from keras.optimizer_v2 import adamax
from keras.optimizer_v2 import ftrl
from keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from keras.optimizer_v2 import nadam
from keras.optimizer_v2 import rmsprop
maybe_distribute = tf.__internal__.test.combinations.combine(distribution=[
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2
])
def get_var(val, dtype, name=None):
return tf.Variable(val, dtype=dtype, name=name)
def set_cpu_logical_devices_to_at_least(num):
"""Create cpu logical devices of at least a given number."""
physical_devices = tf.config.list_physical_devices('CPU')
if not physical_devices:
raise RuntimeError('No CPU found')
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We create
# multiple logical devices for the last physical device so that we have `num`
# logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
logical_devices.append(tf.config.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
tf.config.set_logical_device_configuration(physical_devices[-1], logical_devices)
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['graph', 'eager']))
class AutoCastVariableTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
set_cpu_logical_devices_to_at_least(3)
super(AutoCastVariableTest, self).setUp()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_read(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(tf.identity(x).dtype, tf.float32)
# within auto cast scope of different dtype
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float16)
self.assertEqual(x.read_value().dtype, tf.float16)
self.assertEqual(tf.identity(x).dtype, tf.float16)
# within auto cast scope of same dtype
with autocast_variable.enable_auto_cast_variables(tf.float32):
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(tf.identity(x).dtype, tf.float32)
def test_sparse_reads(self):
x = get_var([1., 2], tf.float32)
# DistributedVariables do not support sparse_read or gather_nd, so we pass
# distribute=False
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(x.sparse_read([0]).dtype, tf.float32)
self.assertEqual(x.gather_nd([0]).dtype, tf.float32)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.sparse_read([0]).dtype, tf.float16)
self.assertEqual(x.gather_nd([0]).dtype, tf.float16)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_read_nested_scopes(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.read_value().dtype, tf.float16)
with autocast_variable.enable_auto_cast_variables(tf.float32):
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float16)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_dtype_is_not_string(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.assertEqual(x.dtype, tf.float32)
self.assertIsInstance(x.dtype, tf.DType)
self.assertEqual(x.true_dtype, tf.float32)
self.assertIsInstance(x.true_dtype, tf.DType)
dtype = tf.float16
with autocast_variable.enable_auto_cast_variables(dtype):
self.assertEqual(x.dtype, tf.float32)
self.assertIsInstance(x.dtype, tf.DType)
self.assertEqual(x.true_dtype, tf.float32)
self.assertIsInstance(x.true_dtype, tf.DType)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_method_delegations(self, distribution):
# Test AutoCastVariable correctly delegates Variable methods to the
# underlying variable.
with self.test_session(), distribution.scope():
for read_dtype in (tf.float32, tf.float16):
if tf.distribute.has_strategy() and not tf.executing_eagerly():
# MirroredVariable.assign will (incorrectly) return a Mirrored value
# instead of a MirroredVariable in graph mode.
# So we cannot properly wrap it in an AutoCastVariable.
evaluate = self.evaluate
else:
def evaluate(var):
self.assertIsInstance(var, autocast_variable.AutoCastVariable)
self.assertEqual(tf.identity(var).dtype, read_dtype) # pylint: disable=cell-var-from-loop
return self.evaluate(var)
x = get_var(7., tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x.value()), 7)
self.assertEqual(self.evaluate(x.read_value()), 7)
self.assertTrue(x.trainable)
self.assertEqual(x.synchronization, x._variable.synchronization)
self.assertEqual(x.aggregation, x._variable.aggregation)
self.assertEqual(self.evaluate(x.initialized_value()), 7)
if not tf.executing_eagerly():
if not tf.distribute.has_strategy():
# These functions are not supported for DistributedVariables
x.load(9)
self.assertEqual(x.eval(), 9)
self.assertEqual(self.evaluate(x.initial_value), 7)
self.assertEqual(x.op, x._variable.op)
self.assertEqual(x.graph, x._variable.graph)
if not tf.distribute.has_strategy():
# These attributes are not supported for DistributedVariables
self.assertIsNone(x.constraint)
self.assertEqual(x.initializer, x._variable.initializer)
self.assertEqual(evaluate(x.assign(8)), 8)
self.assertEqual(evaluate(x.assign_add(2)), 10)
self.assertEqual(evaluate(x.assign_sub(3)), 7)
self.assertEqual(x.name, x._variable.name)
self.assertEqual(x.device, x._variable.device)
self.assertEqual(x.shape, ())
self.assertEqual(x.get_shape(), ())
if not tf.distribute.has_strategy():
# Test scatter_* methods. These are not supported for
# DistributedVariables
x = get_var([7, 8], tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAllEqual(self.evaluate(x.value()), [7, 8])
def slices(val, index):
return tf.IndexedSlices(
values=tf.constant(val, dtype=tf.float32),
indices=tf.constant(index, dtype=tf.int32),
dense_shape=tf.constant([2], dtype=tf.int32))
self.assertAllEqual(evaluate(x.scatter_sub(slices(1., 0))), [6, 8])
self.assertAllEqual(evaluate(x.scatter_add(slices(1., 0))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_max(slices(9., 1))), [7, 9])
self.assertAllEqual(evaluate(x.scatter_min(slices(8., 1))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_mul(slices(2., 1))), [7, 16])
self.assertAllEqual(evaluate(x.scatter_div(slices(2., 1))), [7, 8])
self.assertAllEqual(
evaluate(x.scatter_update(slices(4., 1))), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_sub([[0], [1]], [1., 2.])), [6, 2])
self.assertAllEqual(
evaluate(x.scatter_nd_add([[0], [1]], [1., 2.])), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_update([[0], [1]], [1., 2.])), [1, 2])
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_operator_overloads(self, distribution):
with distribution.scope():
for read_dtype in (tf.float32, tf.float16):
x = get_var(7., tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAlmostEqual(8, self.evaluate(x + 1))
self.assertAlmostEqual(10, self.evaluate(3 + x))
self.assertAlmostEqual(14, self.evaluate(x + x))
self.assertAlmostEqual(5, self.evaluate(x - 2))
self.assertAlmostEqual(6, self.evaluate(13 - x))
self.assertAlmostEqual(0, self.evaluate(x - x))
self.assertAlmostEqual(14, self.evaluate(x * 2))
self.assertAlmostEqual(21, self.evaluate(3 * x))
self.assertAlmostEqual(49, self.evaluate(x * x))
self.assertAlmostEqual(3.5, self.evaluate(x / 2))
self.assertAlmostEqual(1.5, self.evaluate(10.5 / x))
self.assertAlmostEqual(3, self.evaluate(x // 2))
self.assertAlmostEqual(2, self.evaluate(15 // x))
if read_dtype == tf.float32:
# The "mod" operator does not support float16
self.assertAlmostEqual(1, self.evaluate(x % 2))
self.assertAlmostEqual(2, self.evaluate(16 % x))
self.assertTrue(self.evaluate(x < 12))
self.assertTrue(self.evaluate(x <= 12))
self.assertFalse(self.evaluate(x > 12))
self.assertFalse(self.evaluate(x >= 12))
self.assertFalse(self.evaluate(12 < x))
self.assertFalse(self.evaluate(12 <= x))
self.assertTrue(self.evaluate(12 > x))
self.assertTrue(self.evaluate(12 >= x))
self.assertAlmostEqual(343, self.evaluate(pow(x, 3)), places=4)
self.assertAlmostEqual(128, self.evaluate(pow(2, x)), places=4)
self.assertAlmostEqual(-7, self.evaluate(-x))
self.assertAlmostEqual(7, self.evaluate(abs(x)))
x = get_var([7, 8, 9], tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x[1]), 8)
if tf.__internal__.tf2.enabled() and tf.executing_eagerly():
self.assertAllEqual(x == [7., 8., 10.], [True, True, False])
self.assertAllEqual(x != [7., 8., 10.], [False, False, True])
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign(self, distribution):
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
v1 = tf.constant(3., dtype=tf.float32)
v2 = tf.constant(3., dtype=tf.float16)
def run_and_check():
# Assign float32 values
self.assertAllClose(3., self.evaluate(x.assign(v1)))
self.assertAllClose(3. * 2, self.evaluate(x.assign_add(v1)))
self.assertAllClose(3., self.evaluate(x.assign_sub(v1)))
# Attempt to assign float16 values
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign(v2))
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_add(v2))
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_sub(v2))
# Assign Python floats
self.assertAllClose(0., self.evaluate(x.assign(0.)))
self.assertAllClose(3., self.evaluate(x.assign(3.)))
self.assertAllClose(3. * 2, self.evaluate(x.assign_add(3.)))
self.assertAllClose(3., self.evaluate(x.assign_sub(3.)))
# Assign multiple times
# This currently doesn't work in graph mode if a strategy is used
if not tf.distribute.has_strategy() or tf.executing_eagerly():
assign = x.assign(1.)
self.assertAllClose(1., self.evaluate(assign))
self.assertAllClose(0., self.evaluate(assign.assign(0.)))
assign_add = x.assign_add(3.)
self.assertAllClose(3., self.evaluate(assign_add))
self.assertAllClose(3. * 3,
self.evaluate(x.assign_add(3.).assign_add(3.)))
self.assertAllClose(3. * 3, x)
assign_sub = x.assign_sub(3.)
self.assertAllClose(3. * 2, self.evaluate(assign_sub))
self.assertAllClose(0.,
self.evaluate(x.assign_sub(3.).assign_sub(3.)))
# Assign with read_value=False
self.assertIsNone(self.evaluate(x.assign(1., read_value=False)))
self.assertAllClose(1., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_add(2., read_value=False)))
self.assertAllClose(3., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_sub(3., read_value=False)))
self.assertAllClose(0., self.evaluate(x))
# Use the tf.assign functions instead of the var.assign methods.
self.assertAllClose(0., self.evaluate(tf.compat.v1.assign(x, 0.)))
self.assertAllClose(3., self.evaluate(tf.compat.v1.assign(x, 3.)))
self.assertAllClose(3. * 2,
self.evaluate(tf.compat.v1.assign_add(x, 3.)))
self.assertAllClose(3., self.evaluate(tf.compat.v1.assign_sub(x, 3.)))
run_and_check()
# reset x
self.evaluate(x.assign(0.))
# within auto cast scope.
with autocast_variable.enable_auto_cast_variables(tf.float16):
# assign still expect float32 value even if in float16 scope
run_and_check()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_tf_function(self, distribution):
if not tf.executing_eagerly():
self.skipTest('Test is not compatible with graph mode')
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def run_assign():
return x.assign(1.).assign_add(3.).assign_add(3.).assign_sub(2.)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertAllClose(5., self.evaluate(run_assign()))
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_op_attribute(self, distribution):
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
# Variable.op raises an AttributeError in Eager mode and is an op in graph
# mode. Variable.assign(...).op is None in Eager mode and an op in Graph
# mode or a tf.function. We test this is also true of AutoCastVariable.
if tf.executing_eagerly():
with self.assertRaises(AttributeError):
x.op # pylint: disable=pointless-statement
self.assertIsNone(x.assign(1.0).op)
self.assertIsNone(x.assign_add(1.0).op)
self.assertIsNone(x.assign_sub(1.0).op)
else:
self.assertIsNotNone(x.op)
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
@tf.function
def func():
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
func()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_tf_function_control_dependencies(self, distribution):
if not tf.executing_eagerly():
self.skipTest('Test is not compatible with graph mode')
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def func():
update = x.assign_add(1.)
with tf.control_dependencies([update]):
x.assign_add(1.)
func()
self.assertAllClose(2., self.evaluate(x))
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_stays_in_true_dtype(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not
# in fp32
small_val = np.finfo('float16').eps / 2
small_tensor = tf.constant(small_val, dtype=tf.float32)
with autocast_variable.enable_auto_cast_variables(tf.float16):
# Variable should be increased, despite it appearing to be the same
# float16 value.
self.evaluate(x.assign(1. + small_tensor))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x))
self.evaluate(x.assign(1.))
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.evaluate(x.assign_add(small_tensor))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x))
def test_thread_local_autocast_dtype(self):
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(tf.identity(x).dtype, tf.float16)
# New threads should not see the modified value of the autocast dtype.
var_dtype = None
def f():
nonlocal var_dtype
var_dtype = x._cast_dtype
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(var_dtype, tf.float32)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_checkpoint(self, distribution):
with self.test_session():
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.evaluate(x.assign(123.))
checkpoint = tf.train.Checkpoint(x=x)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
self.evaluate(x.assign(234.))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(self.evaluate(x), 123.)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_invalid_wrapped_variable(self, distribution):
with distribution.scope():
# Wrap a non-variable
with self.assertRaisesRegex(ValueError, 'variable must be of type'):
x = tf.constant([1.], dtype=tf.float32)
autocast_variable.create_autocast_variable(x)
# Wrap a non-floating point variable
with self.assertRaisesRegex(ValueError,
'variable must be a floating point'):
x = get_var(1, tf.int32)
autocast_variable.create_autocast_variable(x)
def test_repr(self):
# We do not test with DistributionStrategy because we do not want to rely on
# the exact __repr__ output of a DistributedVariable.
x = get_var(1., tf.float32, name='x')
x = autocast_variable.create_autocast_variable(x)
if tf.executing_eagerly():
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32, numpy="
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16, numpy="
)
else:
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32>"
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16>"
)
def test_repr_distributed(self):
strategy = tf.distribute.MirroredStrategy(['/cpu:1', '/cpu:2'])
with strategy.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
use_policy = getattr(strategy.extended, '_use_var_policy', False)
if use_policy:
self.assertRegex(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 '
'dtype_to_cast_to=float32 '
'inner_variable=DistributedVariable.*>')
else:
self.assertRegex(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 '
'dtype_to_cast_to=float32 '
'inner_variable=MirroredVariable.*>')
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(
optimizer_class=[
adadelta.Adadelta,
adagrad.Adagrad,
adam.Adam,
adamax.Adamax,
ftrl.Ftrl,
gradient_descent_v2.SGD,
nadam.Nadam,
rmsprop.RMSprop,
tf.compat.v1.train.GradientDescentOptimizer
],
use_tf_function=[False, True]))
def test_optimizer(self, optimizer_class, use_tf_function):
if use_tf_function and not tf.executing_eagerly():
self.skipTest('Test does not support graph mode with tf.function')
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
y = get_var(1., tf.float32)
opt = optimizer_class(learning_rate=1.)
def f():
# Minimize both the AutoCastVariable and the normal tf.Variable. Both
# variables should be updated to the same value.
op = opt.minimize(lambda: x + y, var_list=[x, y])
return None if tf.compat.v1.executing_eagerly_outside_functions() else op
if use_tf_function:
f = tf.function(f)
if tf.executing_eagerly():
f()
else:
op = f()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(op)
# Assert the AutoCastVariable has changed from its initial value
self.assertNotEqual(self.evaluate(x), 1.)
# Assert AutoCastVariable is updated correctly by comparing it to the normal
# variable
self.assertAlmostEqual(self.evaluate(x), self.evaluate(y))
if optimizer_class in (gradient_descent_v2.SGD,
tf.compat.v1.train.GradientDescentOptimizer):
# With SGD, the variables decreases by exactly 1
self.assertEqual(self.evaluate(x), 0)
if __name__ == '__main__':
tf.test.main()
| 24,615 | 41.810435 | 117 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.