repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
keras | keras-master/keras/layers/merge.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=not-callable
# pylint: disable=redefined-builtin
"""Layers that can merge several inputs into one."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine import base_layer_utils
from keras.engine.base_layer import Layer
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
"""
def __init__(self, **kwargs):
"""Initializes a Merge layer.
Args:
**kwargs: standard layer keyword arguments.
"""
super(_Merge, self).__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Args:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError(
'Inputs have incompatible shapes. '
f'Received shapes {shape1} and {shape2}')
output_shape.append(i)
return tuple(output_shape)
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple):
raise ValueError(
'A merge layer should be called on a list of inputs. '
f'Received: input_shape={input_shape} (not a list of shapes)')
if len(input_shape) < 2:
raise ValueError('A merge layer should be called '
'on a list of at least 2 inputs. '
f'Got {len(input_shape)} inputs. '
f'Full input_shape received: {input_shape}')
batch_sizes = {s[0] for s in input_shape if s} - {None}
if len(batch_sizes) > 1:
raise ValueError(
'Cannot merge tensors with different batch sizes. '
f'Got tensors with shapes {input_shape}')
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
def call(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise ValueError(
'A merge layer should be called on a list of inputs. '
f'Received: inputs={inputs} (not a list of tensors)')
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(backend.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = backend.ndim(x)
for _ in range(max_ndim - x_ndim):
x = tf.expand_dims(x, axis=1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
transposed = False
for x in inputs:
x_ndim = backend.ndim(x)
if x_ndim is None:
x_shape = tf.shape(x)
batch_size = x_shape[0]
new_shape = backend.concatenate(
[x_shape[1:],
tf.expand_dims(batch_size, axis=-1)])
x_transposed = tf.reshape(
x,
tf.stack(
[batch_size, tf.reduce_prod(x_shape[1:])], axis=0))
x_transposed = tf.transpose(x_transposed, perm=(1, 0))
x_transposed = tf.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(tf.transpose(x, perm=dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = backend.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the output too.
if y_ndim is None:
y_shape = tf.shape(y)
y_ndim = tf.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = backend.concatenate([
tf.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1]
])
y = tf.reshape(y, (-1, batch_size))
y = tf.transpose(y, perm=(1, 0))
y = tf.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = tf.transpose(y, perm=dims)
return y
else:
return self._merge_function(inputs)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
batch_sizes = {s[0] for s in input_shape if s is not None} - {None}
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f'`mask` should be a list. Received: mask={mask}')
if not isinstance(inputs, (tuple, list)):
raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}')
if len(mask) != len(inputs):
raise ValueError(
'The lists `inputs` and `mask` should have the same length. '
f'Received: inputs={inputs} of length {len(inputs)}, and '
f'mask={mask} of length {len(mask)}')
if all(m is None for m in mask):
return None
masks = [tf.expand_dims(m, axis=0) for m in mask if m is not None]
return backend.all(
backend.concatenate(masks, axis=0), axis=0, keepdims=False)
@keras_export('keras.layers.Add')
class Add(_Merge):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = tf.random.normal(input_shape)
>>> x2 = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Add()([x1, x2])
>>> print(y.shape)
(2, 3, 4)
Used in a functional model:
>>> input1 = tf.keras.layers.Input(shape=(16,))
>>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = tf.keras.layers.Input(shape=(32,))
>>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `added = tf.keras.layers.add([x1, x2])`
>>> added = tf.keras.layers.Add()([x1, x2])
>>> out = tf.keras.layers.Dense(4)(added)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
@keras_export('keras.layers.Subtract')
class Subtract(_Merge):
"""Layer that subtracts two inputs.
It takes as input a list of tensors of size 2,
both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),
also of the same shape.
Examples:
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
# Equivalent to subtracted = keras.layers.subtract([x1, x2])
subtracted = keras.layers.Subtract()([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
"""
@tf_utils.shape_type_conversion
def build(self, input_shape):
super(Subtract, self).build(input_shape)
if len(input_shape) != 2:
raise ValueError(
'A `Subtract` layer should be called on exactly 2 inputs. '
f'Received: input_shape={input_shape}')
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError(
'A `Subtract` layer should be called on exactly 2 inputs. '
f'Received: inputs={inputs}')
return inputs[0] - inputs[1]
@keras_export('keras.layers.Multiply')
class Multiply(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[ 0],
[ 6],
[14],
[24],
[36]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> multiplied = tf.keras.layers.Multiply()([x1, x2])
>>> multiplied.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = output * inputs[i]
return output
@keras_export('keras.layers.Average')
class Average(_Merge):
"""Layer that averages a list of inputs element-wise.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
Example:
>>> x1 = np.ones((2, 2))
>>> x2 = np.zeros((2, 2))
>>> y = tf.keras.layers.Average()([x1, x2])
>>> y.numpy().tolist()
[[0.5, 0.5], [0.5, 0.5]]
Usage in a functional model:
>>> input1 = tf.keras.layers.Input(shape=(16,))
>>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = tf.keras.layers.Input(shape=(32,))
>>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)
>>> avg = tf.keras.layers.Average()([x1, x2])
>>> out = tf.keras.layers.Dense(4)(avg)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
Raises:
ValueError: If there is a shape mismatch between the inputs and the shapes
cannot be broadcasted to match.
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output / len(inputs)
@keras_export('keras.layers.Maximum')
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[5],
[6],
[7],
[8],
[9]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> maxed = tf.keras.layers.Maximum()([x1, x2])
>>> maxed.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = tf.maximum(output, inputs[i])
return output
@keras_export('keras.layers.Minimum')
class Minimum(_Merge):
"""Layer that computes the minimum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[1],
[2],
[3],
[4]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> minned = tf.keras.layers.Minimum()([x1, x2])
>>> minned.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = tf.minimum(output, inputs[i])
return output
@keras_export('keras.layers.Concatenate')
class Concatenate(_Merge):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors, all of the same shape except
for the concatenation axis, and returns a single tensor that is the
concatenation of all inputs.
>>> x = np.arange(20).reshape(2, 2, 5)
>>> print(x)
[[[ 0 1 2 3 4]
[ 5 6 7 8 9]]
[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> print(y)
[[[20 21 22 23 24]]
[[25 26 27 28 29]]]
>>> tf.keras.layers.Concatenate(axis=1)([x, y])
<tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> concatted = tf.keras.layers.Concatenate()([x1, x2])
>>> concatted.shape
TensorShape([5, 16])
"""
def __init__(self, axis=-1, **kwargs):
"""Instantiates a Concatenate layer.
>>> x = np.arange(20).reshape(2, 2, 5)
>>> print(x)
[[[ 0 1 2 3 4]
[ 5 6 7 8 9]]
[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> print(y)
[[[20 21 22 23 24]]
[[25 26 27 28 29]]]
>>> tf.keras.layers.Concatenate(axis=1)([x, y])
<tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])>
Args:
axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments.
"""
super(Concatenate, self).__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._reshape_required = False
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple) or len(input_shape) < 1:
raise ValueError(
'A `Concatenate` layer should be called on a list of '
f'at least 1 input. Received: input_shape={input_shape}')
if all(shape is None for shape in input_shape):
return
reduced_inputs_shapes = [list(shape) for shape in input_shape]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) != 1:
err_msg = ('A `Concatenate` layer requires inputs with matching shapes '
'except for the concatenation axis. '
f'Received: input_shape={input_shape}')
# Make sure all the shapes have same ranks.
ranks = set(len(shape) for shape in shape_set)
if len(ranks) != 1:
raise ValueError(err_msg)
# Get the only rank for the set.
(rank,) = ranks
for axis in range(rank):
# Skip the Nones in the shape since they are dynamic, also the axis for
# concat has been removed above.
unique_dims = set(
shape[axis] for shape in shape_set if shape[axis] is not None)
if len(unique_dims) > 1:
raise ValueError(err_msg)
def _merge_function(self, inputs):
return backend.concatenate(inputs, axis=self.axis)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if ((not isinstance(input_shape, (tuple, list))) or
(not isinstance(input_shape[0], (tuple, list)))):
# The tf_utils.shape_type_conversion decorator turns tensorshapes
# into tuples, so we need to verify that `input_shape` is a list/tuple,
# *and* that the individual elements are themselves shape tuples.
raise ValueError(
'A `Concatenate` layer should be called on a list of inputs. '
f'Received: input_shape={input_shape}')
input_shapes = input_shape
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f'`mask` should be a list. Received mask={mask}')
if not isinstance(inputs, (tuple, list)):
raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}')
if len(mask) != len(inputs):
raise ValueError(
'The lists `inputs` and `mask` should have the same length. '
f'Received: inputs={inputs} of length {len(inputs)}, and '
f'mask={mask} of length {len(mask)}')
if all(m is None for m in mask):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(tf.ones_like(input_i, dtype='bool'))
elif backend.ndim(mask_i) < backend.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(tf.expand_dims(mask_i, axis=-1))
else:
masks.append(mask_i)
concatenated = backend.concatenate(masks, axis=self.axis)
return backend.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {
'axis': self.axis,
}
base_config = super(Concatenate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dot')
class Dot(_Merge):
"""Layer that computes a dot product between samples in two tensors.
E.g. if applied to a list of two tensors `a` and `b` of shape
`(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)`
where each entry `i` will be the dot product between
`a[i]` and `b[i]`.
>>> x = np.arange(10).reshape(1, 5, 2)
>>> print(x)
[[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]]
>>> y = np.arange(10, 20).reshape(1, 2, 5)
>>> print(y)
[[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> tf.keras.layers.Dot(axes=(1, 2))([x, y])
<tf.Tensor: shape=(1, 2, 2), dtype=int64, numpy=
array([[[260, 360],
[320, 445]]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2])
>>> dotted.shape
TensorShape([5, 1])
"""
def __init__(self, axes, normalize=False, **kwargs):
"""Initializes a layer that computes the element-wise dot product.
>>> x = np.arange(10).reshape(1, 5, 2)
>>> print(x)
[[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]]
>>> y = np.arange(10, 20).reshape(1, 2, 5)
>>> print(y)
[[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> tf.keras.layers.Dot(axes=(1, 2))([x, y])
<tf.Tensor: shape=(1, 2, 2), dtype=int64, numpy=
array([[[260, 360],
[320, 445]]])>
Args:
axes: Integer or tuple of integers,
axis or axes along which to take the dot product. If a tuple, should
be two integers corresponding to the desired axis from the first input
and the desired axis from the second input, respectively. Note that the
size of the two selected axes must match.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
"""
super(Dot, self).__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError(
'Invalid type for argument `axes`: it should be '
f'a list or an int. Received: axes={axes}')
if len(axes) != 2:
raise ValueError(
'Invalid format for argument `axes`: it should contain two '
f'elements. Received: axes={axes}')
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError(
'Invalid format for argument `axes`: list elements should be '
f'integers. Received: axes={axes}')
self.axes = axes
self.normalize = normalize
self.supports_masking = True
self._reshape_required = False
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple) or len(input_shape) != 2:
raise ValueError(
'A `Dot` layer should be called on a list of 2 inputs. '
f'Received: input_shape={input_shape}')
shape1 = input_shape[0]
shape2 = input_shape[1]
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError(
'Incompatible input shapes: '
f'axis values {shape1[axes[0]]} (at axis {axes[0]}) != '
f'{shape2[axes[1]]} (at axis {axes[1]}). '
f'Full input shapes: {shape1}, {shape2}')
def _merge_function(self, inputs):
base_layer_utils.no_ragged_support(inputs, self.name)
if len(inputs) != 2:
raise ValueError(
'A `Dot` layer should be called on exactly 2 inputs. '
f'Received: inputs={inputs}')
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % backend.ndim(x1), self.axes % backend.ndim(x2)]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % backend.ndim(inputs[i]))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = tf.linalg.l2_normalize(x1, axis=axes[0])
x2 = tf.linalg.l2_normalize(x2, axis=axes[1])
output = backend.batch_dot(x1, x2, axes)
return output
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2:
raise ValueError(
'A `Dot` layer should be called on a list of 2 inputs. '
f'Received: input_shape={input_shape}')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
'axes': self.axes,
'normalize': self.normalize,
}
base_config = super(Dot, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.add')
def add(inputs, **kwargs):
"""Functional interface to the `tf.keras.layers.Add` layer.
Args:
inputs: A list of input tensors (at least 2) with the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the sum of the inputs. It has the same shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = tf.random.normal(input_shape)
>>> x2 = tf.random.normal(input_shape)
>>> y = tf.keras.layers.add([x1, x2])
>>> print(y.shape)
(2, 3, 4)
Used in a functional model:
>>> input1 = tf.keras.layers.Input(shape=(16,))
>>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = tf.keras.layers.Input(shape=(32,))
>>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)
>>> added = tf.keras.layers.add([x1, x2])
>>> out = tf.keras.layers.Dense(4)(added)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Add(**kwargs)(inputs)
@keras_export('keras.layers.subtract')
def subtract(inputs, **kwargs):
"""Functional interface to the `Subtract` layer.
Args:
inputs: A list of input tensors (exactly 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the difference of the inputs.
Examples:
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
subtracted = keras.layers.subtract([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
"""
return Subtract(**kwargs)(inputs)
@keras_export('keras.layers.multiply')
def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer.
Example:
>>> x1 = np.arange(3.0)
>>> x2 = np.arange(3.0)
>>> tf.keras.layers.multiply([x1, x2])
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0., 1., 4.], ...)>
Usage in a functional model:
>>> input1 = tf.keras.layers.Input(shape=(16,))
>>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
>>> input2 = tf.keras.layers.Input(shape=(32,))
>>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
>>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)
>>> out = tf.keras.layers.Dense(4)(out)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
Args:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise product of the inputs.
"""
return Multiply(**kwargs)(inputs)
@keras_export('keras.layers.average')
def average(inputs, **kwargs):
"""Functional interface to the `tf.keras.layers.Average` layer.
Example:
>>> x1 = np.ones((2, 2))
>>> x2 = np.zeros((2, 2))
>>> y = tf.keras.layers.Average()([x1, x2])
>>> y.numpy().tolist()
[[0.5, 0.5], [0.5, 0.5]]
Usage in a functional model:
>>> input1 = tf.keras.layers.Input(shape=(16,))
>>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = tf.keras.layers.Input(shape=(32,))
>>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)
>>> avg = tf.keras.layers.Average()([x1, x2])
>>> out = tf.keras.layers.Dense(4)(avg)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
Args:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the average of the inputs.
Raises:
ValueError: If there is a shape mismatch between the inputs and the shapes
cannot be broadcasted to match.
"""
return Average(**kwargs)(inputs)
@keras_export('keras.layers.maximum')
def maximum(inputs, **kwargs):
"""Functional interface to compute maximum (element-wise) list of `inputs`.
This is equivalent to the `tf.keras.layers.Maximum` layer.
For example:
```python
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)
out = tf.keras.layers.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Args:
inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape.
"""
return Maximum(**kwargs)(inputs)
@keras_export('keras.layers.minimum')
def minimum(inputs, **kwargs):
"""Functional interface to the `Minimum` layer.
Args:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise minimum of the inputs.
"""
return Minimum(**kwargs)(inputs)
@keras_export('keras.layers.concatenate')
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
>>> x = np.arange(20).reshape(2, 2, 5)
>>> print(x)
[[[ 0 1 2 3 4]
[ 5 6 7 8 9]]
[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> print(y)
[[[20 21 22 23 24]]
[[25 26 27 28 29]]]
>>> tf.keras.layers.concatenate([x, y],
... axis=1)
<tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])>
Args:
inputs: A list of input tensors (at least 2).
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
@keras_export('keras.layers.dot')
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
| 32,808 | 32.274848 | 80 | py |
keras | keras-master/keras/layers/local_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for locally-connected layers."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from tensorflow.python.framework import test_util as tf_test_util
from keras import combinations
from keras import testing_utils
from keras.optimizer_v2 import rmsprop
from tensorflow.python.training.rmsprop import RMSPropOptimizer
_DATA_FORMAT_PADDING_IMPLEMENTATION = [{
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 3
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 3
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 3
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 3
}]
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnected1DLayersTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
for strides in [1]:
if padding == 'same' and strides != 1:
continue
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'padding': padding,
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected1D,
kwargs=kwargs,
input_shape=(num_samples, num_steps, input_dim))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d_regularization(self, data_format, padding,
implementation):
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_steps, input_dim))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnected2DLayersTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
kwargs = {
'filters': filters,
'kernel_size': 3,
'padding': padding,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_channels_first(self, data_format, padding,
implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
kwargs = {
'filters': filters,
'kernel_size': 3,
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_regularization(self, data_format, padding,
implementation):
num_samples = 2
filters = 3
stack_size = 4
num_row = 6
num_col = 7
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'implementation': implementation,
'padding': padding,
'data_format': data_format
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_row, num_col, stack_size))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnectedImplementationModeTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.parameters([
{'width': 1, 'data_format': 'channels_first'},
{'width': 1, 'data_format': 'channels_last'},
{'width': 6, 'data_format': 'channels_first'},
{'width': 6, 'data_format': 'channels_last'},
])
def test_locallyconnected_implementation(self, width, data_format):
with self.cached_session():
num_samples = 4
num_classes = 3
num_epochs = 2
np.random.seed(1)
tf_test_util.random_seed.set_seed(1)
targets = np.random.randint(0, num_classes, (num_samples,))
height = 7
filters = 2
inputs = get_inputs(data_format, filters, height, num_samples, width)
kernel_x = (3,)
kernel_y = () if width == 1 else (2,)
stride_x = (1,)
stride_y = () if width == 1 else (3,)
layers = 2
kwargs = {
'layers': layers,
'filters': filters,
'kernel_size': kernel_x + kernel_y,
'strides': stride_x + stride_y,
'data_format': data_format,
'num_classes': num_classes
}
model_1 = get_model(implementation=1, **kwargs)
model_2 = get_model(implementation=2, **kwargs)
model_3 = get_model(implementation=3, **kwargs)
# Build models.
model_1.train_on_batch(inputs, targets)
model_2.train_on_batch(inputs, targets)
model_3.train_on_batch(inputs, targets)
# Copy weights.
copy_model_weights(model_from=model_2, model_to=model_1)
copy_model_weights(model_from=model_2, model_to=model_3)
# Compare outputs at initialization.
out_1 = model_1(inputs)
out_2 = model_2(inputs)
out_3 = model_3(inputs)
self.assertAllCloseAccordingToType(
out_2, out_1, rtol=1e-5, atol=1e-5)
self.assertAllCloseAccordingToType(
out_2, out_3, rtol=1e-5, atol=1e-5)
self.assertAllCloseAccordingToType(
out_1, out_3, rtol=1e-5, atol=1e-5)
# Train.
model_1.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_2.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_3.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
# Compare outputs after a few training steps.
out_1 = model_1(inputs)
out_2 = model_2(inputs)
out_3 = model_3(inputs)
self.assertAllCloseAccordingToType(
out_2, out_1, atol=2e-4)
self.assertAllCloseAccordingToType(
out_2, out_3, atol=2e-4)
self.assertAllCloseAccordingToType(
out_1, out_3, atol=2e-4)
@parameterized.parameters([
{
'width': 1,
'data_format': 'channels_first'
},
{
'width': 1,
'data_format': 'channels_last'
},
{
'width': 6,
'data_format': 'channels_first'
},
{
'width': 6,
'data_format': 'channels_last'
},
])
def test_locallyconnected_save(self, width, data_format):
with self.cached_session():
num_samples = 4
num_classes = 3
num_epochs = 2
np.random.seed(1)
tf_test_util.random_seed.set_seed(1)
targets = np.random.randint(0, num_classes, (num_samples,))
height = 7
filters = 2
inputs = get_inputs(data_format, filters, height, num_samples, width)
kernel_x = (3,)
kernel_y = () if width == 1 else (2,)
stride_x = (1,)
stride_y = () if width == 1 else (3,)
layers = 2
kwargs = {
'layers': layers,
'filters': filters,
'kernel_size': kernel_x + kernel_y,
'strides': stride_x + stride_y,
'data_format': data_format,
'num_classes': num_classes
}
model_1 = get_model_saveable(implementation=1, **kwargs)
model_2 = get_model_saveable(implementation=2, **kwargs)
model_3 = get_model_saveable(implementation=3, **kwargs)
# Train.
model_1.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_2.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_3.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
out_1_before = model_1(inputs)
out_2_before = model_2(inputs)
out_3_before = model_3(inputs)
path_1 = os.path.join(self.get_temp_dir(), 'model_1_path')
model_1.save(path_1)
model_1 = keras.models.load_model(path_1, custom_objects={'xent': xent})
path_2 = os.path.join(self.get_temp_dir(), 'model_2_path')
model_2.save(path_2)
model_2 = keras.models.load_model(path_2, custom_objects={'xent': xent})
path_3 = os.path.join(self.get_temp_dir(), 'model_3_path')
model_3.save(path_3)
model_3 = keras.models.load_model(path_3, custom_objects={'xent': xent})
out_1_after = model_1(inputs)
out_2_after = model_2(inputs)
out_3_after = model_3(inputs)
self.assertAllCloseAccordingToType(out_1_before, out_1_after, atol=2e-4)
self.assertAllCloseAccordingToType(out_2_before, out_2_after, atol=2e-4)
self.assertAllCloseAccordingToType(out_3_before, out_3_after, atol=2e-4)
def test_make_2d(self):
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
np.random.seed(1)
for input_shape in input_shapes:
inputs = np.random.normal(0, 1, input_shape)
inputs_tf = keras.backend.variable(inputs)
split_dim = np.random.randint(0, inputs.ndim + 1)
shape_2d = (int(np.prod(inputs.shape[:split_dim])),
int(np.prod(inputs.shape[split_dim:])))
inputs_2d = np.reshape(inputs, shape_2d)
inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim)
inputs_2d_tf = keras.backend.get_value(inputs_2d_tf)
self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf)
def get_inputs(data_format, filters, height, num_samples, width):
if data_format == 'channels_first':
if width == 1:
input_shape = (filters, height)
else:
input_shape = (filters, height, width)
elif data_format == 'channels_last':
if width == 1:
input_shape = (height, filters)
else:
input_shape = (height, width, filters)
else:
raise NotImplementedError(data_format)
inputs = np.random.normal(0, 1,
(num_samples,) + input_shape).astype(np.float32)
return inputs
def xent(y_true, y_pred):
y_true = keras.backend.cast(
keras.backend.reshape(y_true, (-1,)),
tf.int32)
return tf.compat.v1.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true,
logits=y_pred)
def get_model(implementation,
filters,
kernel_size,
strides,
layers,
num_classes,
data_format):
model = keras.Sequential()
if len(kernel_size) == 1:
lc_layer = keras.layers.LocallyConnected1D
elif len(kernel_size) == 2:
lc_layer = keras.layers.LocallyConnected2D
else:
raise NotImplementedError(kernel_size)
for _ in range(layers):
model.add(lc_layer(
padding='valid',
kernel_initializer=keras.initializers.random_normal(),
bias_initializer=keras.initializers.random_normal(),
filters=filters,
strides=strides,
kernel_size=kernel_size,
activation=keras.activations.relu,
data_format=data_format,
implementation=implementation))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes))
model.compile(
optimizer=RMSPropOptimizer(0.01),
metrics=[keras.metrics.categorical_accuracy],
loss=xent
)
return model
def get_model_saveable(implementation, filters, kernel_size, strides, layers,
num_classes, data_format):
model = keras.Sequential()
if len(kernel_size) == 1:
lc_layer = keras.layers.LocallyConnected1D
elif len(kernel_size) == 2:
lc_layer = keras.layers.LocallyConnected2D
else:
raise NotImplementedError(kernel_size)
for _ in range(layers):
model.add(
lc_layer(
padding='valid',
kernel_initializer=keras.initializers.random_normal(),
bias_initializer=keras.initializers.random_normal(),
filters=filters,
strides=strides,
kernel_size=kernel_size,
activation=keras.activations.relu,
data_format=data_format,
implementation=implementation))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes))
model.compile(
optimizer=rmsprop.RMSProp(learning_rate=0.01),
metrics=[keras.metrics.categorical_accuracy],
loss=xent)
return model
def copy_lc_weights_2_to_1(lc_layer_2_from, lc_layer_1_to):
lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask
data_format = lc_layer_2_from.data_format
if data_format == 'channels_first':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (3, 0, 1, 2)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (4, 5, 0, 1, 2, 3)
else:
raise NotImplementedError(lc_layer_2_from)
elif data_format == 'channels_last':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (2, 0, 1, 3)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (3, 4, 0, 1, 2, 5)
else:
raise NotImplementedError(lc_layer_2_from)
else:
raise NotImplementedError(data_format)
lc_2_kernel_masked = keras.backend.permute_dimensions(
lc_2_kernel_masked, permutation)
lc_2_kernel_mask = tf.not_equal(
lc_2_kernel_masked, 0)
lc_2_kernel_flat = tf.compat.v1.boolean_mask(
lc_2_kernel_masked, lc_2_kernel_mask)
lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat,
lc_layer_1_to.kernel.shape)
lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped)
lc_2_bias = keras.backend.get_value(lc_2_bias)
lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias])
def copy_lc_weights_2_to_3(lc_layer_2_from, lc_layer_3_to):
lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask
lc_2_kernel_masked = keras.layers.local.make_2d(
lc_2_kernel_masked, split_dim=keras.backend.ndim(lc_2_kernel_masked) // 2)
lc_2_kernel_masked = keras.backend.transpose(lc_2_kernel_masked)
lc_2_kernel_mask = tf.not_equal(lc_2_kernel_masked, 0)
lc_2_kernel_flat = tf.compat.v1.boolean_mask(
lc_2_kernel_masked, lc_2_kernel_mask)
lc_2_kernel_flat = keras.backend.get_value(lc_2_kernel_flat)
lc_2_bias = keras.backend.get_value(lc_2_bias)
lc_layer_3_to.set_weights([lc_2_kernel_flat, lc_2_bias])
def copy_model_weights(model_from, model_to):
for l in range(len(model_from.layers)):
layer_from = model_from.layers[l]
layer_to = model_to.layers[l]
if (isinstance(
layer_from,
(keras.layers.LocallyConnected2D, keras.layers.LocallyConnected1D)) and
isinstance(layer_to, (keras.layers.LocallyConnected2D,
keras.layers.LocallyConnected1D))):
if layer_from.implementation == 2:
if layer_to.implementation == 1:
copy_lc_weights_2_to_1(layer_from, layer_to)
elif layer_to.implementation == 3:
copy_lc_weights_2_to_3(layer_from, layer_to)
else:
raise NotImplementedError
else:
raise NotImplementedError
elif isinstance(layer_from, keras.layers.Dense):
weights_2, bias_2 = layer_from.weights
weights_2 = keras.backend.get_value(weights_2)
bias_2 = keras.backend.get_value(bias_2)
layer_to.set_weights([weights_2, bias_2])
else:
continue
if __name__ == '__main__':
tf.test.main()
| 21,476 | 30.262009 | 80 | py |
keras | keras-master/keras/layers/convolutional_recurrent_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional recurrent layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
@keras_parameterized.run_all_keras_modes
class ConvLSTM1DTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len, input_channel,
input_num_row)
else:
inputs = np.random.rand(num_samples, sequence_len, input_num_row,
input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {
'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': num_row,
'padding': 'valid',
}
layer = keras.layers.ConvLSTM1D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM1D,
kwargs={
'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': num_row,
'padding': 'valid'
},
input_shape=inputs.shape)
@keras_parameterized.run_all_keras_modes
class ConvLSTM2DTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len,
input_channel,
input_num_row, input_num_col)
else:
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'},
input_shape=inputs.shape)
def test_conv_lstm_statefulness(self):
# Tests for statefulness
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
model = keras.models.Sequential()
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'filters': filters,
'kernel_size': (num_row, num_col),
'stateful': True,
'batch_input_shape': inputs.shape,
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones_like(inputs))
# train once so that the states change
model.train_on_batch(np.ones_like(inputs), np.random.random(out1.shape))
out2 = model.predict(np.ones_like(inputs))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out3.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(inputs))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out4.max(), out5.max())
def test_conv_lstm_regularizers(self):
# check regularizers
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'kernel_size': (num_row, num_col),
'stateful': True,
'filters': filters,
'batch_input_shape': inputs.shape,
'kernel_regularizer': keras.regularizers.L1L2(l1=0.01),
'recurrent_regularizer': keras.regularizers.L1L2(l1=0.01),
'activity_regularizer': 'l2',
'bias_regularizer': 'l2',
'kernel_constraint': 'max_norm',
'recurrent_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones(inputs.shape)))
self.assertEqual(len(layer.losses), 4)
def test_conv_lstm_dropout(self):
# check dropout
with self.cached_session():
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': 'channels_last',
'return_sequences': False,
'filters': 2,
'kernel_size': (3, 3),
'padding': 'same',
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(1, 2, 5, 5, 2))
def test_conv_lstm_cloning(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3)))
test_inputs = np.random.random((2, 4, 5, 5, 3))
reference_outputs = model.predict(test_inputs)
weights = model.get_weights()
# Use a new graph to clone the model
with self.cached_session():
clone = keras.models.clone_model(model)
clone.set_weights(weights)
outputs = clone.predict(test_inputs)
self.assertAllClose(reference_outputs, outputs, atol=1e-5)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping the test as OOM occurred with 1 GB budget.')
def test_conv_lstm_with_initial_state(self):
num_samples = 32
sequence_len = 5
encoder_inputs = keras.layers.Input((None, 32, 32, 3))
encoder = keras.layers.ConvLSTM2D(
filters=32, kernel_size=(3, 3), padding='same',
return_sequences=False, return_state=True)
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = keras.layers.Input((None, 32, 32, 4))
decoder_lstm = keras.layers.ConvLSTM2D(
filters=32, kernel_size=(3, 3), padding='same',
return_sequences=False, return_state=False)
decoder_outputs = decoder_lstm(decoder_inputs, initial_state=encoder_states)
output = keras.layers.Conv2D(
1, (3, 3), padding='same', activation='relu')(decoder_outputs)
model = keras.Model([encoder_inputs, decoder_inputs], output)
model.compile(
optimizer='sgd', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x_1 = np.random.rand(num_samples, sequence_len, 32, 32, 3)
x_2 = np.random.rand(num_samples, sequence_len, 32, 32, 4)
y = np.random.rand(num_samples, 32, 32, 1)
model.fit([x_1, x_2], y)
model.predict([x_1, x_2])
@keras_parameterized.run_all_keras_modes
class ConvLSTM3DTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_height = 3
num_width = 3
num_depth = 3
filters = 3
num_samples = 1
input_channel = 2
input_height = 5
input_width = 5
input_depth = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len, input_channel,
input_height, input_width, input_depth)
else:
inputs = np.random.rand(num_samples, sequence_len, input_height,
input_width, input_depth, input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {
'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': (num_height, num_width, num_depth),
'padding': 'same'
}
layer = keras.layers.ConvLSTM3D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM3D,
kwargs={
'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': (num_height, num_width, num_depth),
'padding': 'valid'
},
input_shape=inputs.shape)
if __name__ == '__main__':
tf.test.main()
| 12,106 | 33.591429 | 80 | py |
keras | keras-master/keras/layers/noise.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that operate regularization via the addition of noise."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import backend
from keras.engine.base_layer import Layer
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GaussianNoise')
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Args:
stddev: Float, standard deviation of the noise distribution.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return inputs + backend.random_normal(
shape=tf.shape(inputs),
mean=0.,
stddev=self.stddev,
dtype=inputs.dtype)
return backend.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'stddev': self.stddev}
base_config = super(GaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.GaussianDropout')
class GaussianDropout(Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random_normal(
shape=tf.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=inputs.dtype)
return backend.in_train_phase(noised, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.AlphaDropout')
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value.
Args:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(AlphaDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else tf.shape(inputs)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = tf.greater_equal(
backend.random_uniform(noise_shape, seed=seed), rate)
kept_idx = tf.cast(kept_idx, inputs.dtype)
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return backend.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(AlphaDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| 6,819 | 31.631579 | 109 | py |
keras | keras-master/keras/layers/advanced_activations.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that act as activation functions."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
def get_globals():
return globals()
@keras_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Usage:
>>> layer = tf.keras.layers.LeakyReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.9, -0.3, 0.0, 2.0]
>>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.3, -0.1, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha: Float >= 0. Negative slope coefficient. Default to 0.3.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
if alpha is None:
raise ValueError(
'The alpha value of a Leaky ReLU layer cannot be None, '
f'Expecting a float. Received: {alpha}')
self.supports_masking = True
self.alpha = backend.cast_to_floatx(alpha)
def call(self, inputs):
return backend.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.PReLU')
class PReLU(Layer):
"""Parametric Rectified Linear Unit.
It follows:
```
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
"""
def __init__(self,
alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
@tf_utils.shape_type_conversion
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs):
pos = backend.relu(inputs)
neg = -self.alpha * backend.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ELU')
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha: Scale for the negative factor.
"""
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
if alpha is None:
raise ValueError(
'Alpha of an ELU layer cannot be None, expecting a float. '
f'Received: {alpha}')
self.supports_masking = True
self.alpha = backend.cast_to_floatx(alpha)
def call(self, inputs):
return backend.elu(inputs, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ThresholdedReLU')
class ThresholdedReLU(Layer):
"""Thresholded Rectified Linear Unit.
It follows:
```
f(x) = x for x > theta
f(x) = 0 otherwise`
```
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
theta: Float >= 0. Threshold location of activation.
"""
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
if theta is None:
raise ValueError(
'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'
f' Received: {theta}')
if theta < 0:
raise ValueError('The theta value of a Thresholded ReLU layer '
f'should be >=0. Received: {theta}')
self.supports_masking = True
self.theta = backend.cast_to_floatx(theta)
def call(self, inputs):
theta = tf.cast(self.theta, inputs.dtype)
return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
def get_config(self):
config = {'theta': float(self.theta)}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using tf.float16
Args:
tensor_type: a dtype to determine the type.
Returns:
a large negative number.
"""
if tensor_type == tf.float16:
return tf.float16.min
return -1e9
@keras_export('keras.layers.Softmax')
class Softmax(Layer):
"""Softmax activation function.
Example without mask:
>>> inp = np.asarray([1., 2., 1.])
>>> layer = tf.keras.layers.Softmax()
>>> layer(inp).numpy()
array([0.21194157, 0.5761169 , 0.21194157], dtype=float32)
>>> mask = np.asarray([True, False, True], dtype=bool)
>>> layer(inp, mask).numpy()
array([0.5, 0. , 0.5], dtype=float32)
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
Call arguments:
inputs: The inputs, or logits to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The
mask specifies 1 to keep and 0 to mask.
Returns:
softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs, mask=None):
if mask is not None:
# Since mask is 1.0 for positions we want to keep and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1e.9 for masked positions.
adder = (1.0 - tf.cast(mask, inputs.dtype)) * (
_large_compatible_negative(inputs.dtype))
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return tf.exp(inputs - tf.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return backend.softmax(inputs, axis=self.axis[0])
return backend.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ReLU')
class ReLU(Layer):
"""Rectified Linear Unit activation function.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
```
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Usage:
>>> layer = tf.keras.layers.ReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
>>> layer = tf.keras.layers.ReLU(max_value=1.0)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 1.0]
>>> layer = tf.keras.layers.ReLU(negative_slope=1.0)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-3.0, -1.0, 0.0, 2.0]
>>> layer = tf.keras.layers.ReLU(threshold=1.5)
>>> output = layer([-3.0, -1.0, 1.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
max_value: Float >= 0. Maximum activation value. Default to None, which
means unlimited.
negative_slope: Float >= 0. Negative slope coefficient. Default to 0.
threshold: Float >= 0. Threshold value for thresholded activation. Default
to 0.
"""
def __init__(self, max_value=None, negative_slope=0., threshold=0., **kwargs):
super(ReLU, self).__init__(**kwargs)
if max_value is not None and max_value < 0.:
raise ValueError('max_value of a ReLU layer cannot be a negative '
f'value. Received: {max_value}')
if negative_slope is None or negative_slope < 0.:
raise ValueError('negative_slope of a ReLU layer cannot be a negative '
f'value. Received: {negative_slope}')
if threshold is None or threshold < 0.:
raise ValueError('threshold of a ReLU layer cannot be a negative '
f'value. Received: {threshold}')
self.supports_masking = True
if max_value is not None:
max_value = backend.cast_to_floatx(max_value)
self.max_value = max_value
self.negative_slope = backend.cast_to_floatx(negative_slope)
self.threshold = backend.cast_to_floatx(threshold)
def call(self, inputs):
# alpha is used for leaky relu slope in activations instead of
# negative_slope.
return backend.relu(inputs,
alpha=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold)
def get_config(self):
config = {
'max_value': self.max_value,
'negative_slope': self.negative_slope,
'threshold': self.threshold
}
base_config = super(ReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| 14,008 | 30.131111 | 80 | py |
keras | keras-master/keras/layers/wrappers.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-classes-have-attributes
"""Wrapper layers: layers that augment the functionality of another layer."""
import tensorflow.compat.v2 as tf
import copy
from keras import backend
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.layers.recurrent import _standardize_args
from keras.utils import generic_utils
from keras.utils import layer_utils
from keras.utils import tf_inspect
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Wrapper')
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Args:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
assert isinstance(layer, Layer)
self.layer = layer
super(Wrapper, self).__init__(**kwargs)
def build(self, input_shape=None):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
self.built = True
@property
def activity_regularizer(self):
if hasattr(self.layer, 'activity_regularizer'):
return self.layer.activity_regularizer
else:
return None
def get_config(self):
config = {'layer': generic_utils.serialize_keras_object(self.layer)}
base_config = super(Wrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
# Avoid mutating the input dict
config = copy.deepcopy(config)
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
return cls(layer, **config)
@keras_export('keras.layers.TimeDistributed')
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
Every input should be at least 3D, and the dimension of index one of the
first input will be considered to be the temporal dimension.
Consider a batch of 32 video samples, where each sample is a 128x128 RGB image
with `channels_last` data format, across 10 timesteps.
The batch input shape is `(32, 10, 128, 128, 3)`.
You can then use `TimeDistributed` to apply the same `Conv2D` layer to each
of the 10 timesteps, independently:
>>> inputs = tf.keras.Input(shape=(10, 128, 128, 3))
>>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3))
>>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs)
>>> outputs.shape
TensorShape([None, 10, 126, 126, 64])
Because `TimeDistributed` applies the same instance of `Conv2D` to each of the
timestamps, the same set of weights are used at each timestamp.
Args:
layer: a `tf.keras.layers.Layer` instance.
Call arguments:
inputs: Input tensor of shape (batch, time, ...) or nested tensors,
and each of which has shape (batch, time, ...).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
wrapped layer (only if the layer supports this argument).
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. This argument is passed to the
wrapped layer (only if the layer supports this argument).
Raises:
ValueError: If not initialized with a `tf.keras.layers.Layer` instance.
"""
def __init__(self, layer, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
'Please initialize `TimeDistributed` layer with a '
f'`tf.keras.layers.Layer` instance. Received: {layer}')
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
# It is safe to use the fast, reshape-based approach with all of our
# built-in Layers.
self._always_use_reshape = (
layer_utils.is_builtin_layer(layer) and
not getattr(layer, 'stateful', False))
def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
"""Finds non-specific dimensions in the static shapes.
The static shapes are replaced with the corresponding dynamic shapes of the
tensor.
Args:
init_tuple: a tuple, the first part of the output shape
tensor: the tensor from which to get the (static and dynamic) shapes
as the last part of the output shape
start_idx: int, which indicate the first dimension to take from
the static shape of the tensor
int_shape: an alternative static shape to take as the last part
of the output shape
Returns:
The new int_shape with the first part from init_tuple
and the last part from either `int_shape` (if provided)
or `tensor.shape`, where every `None` is replaced by
the corresponding dimension from `tf.shape(tensor)`.
"""
# replace all None in int_shape by backend.shape
if int_shape is None:
int_shape = backend.int_shape(tensor)[start_idx:]
if isinstance(int_shape, tf.TensorShape):
int_shape = int_shape.as_list()
if not any(not s for s in int_shape):
return init_tuple + tuple(int_shape)
shape = backend.shape(tensor)
int_shape = list(int_shape)
for i, s in enumerate(int_shape):
if not s:
int_shape[i] = shape[start_idx + i]
return init_tuple + tuple(int_shape)
def _remove_timesteps(self, dims):
dims = dims.as_list()
return tf.TensorShape([dims[0]] + dims[2:])
def build(self, input_shape):
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
input_dims = tf.nest.flatten(
tf.nest.map_structure(lambda x: x.ndims, input_shape))
if any(dim < 3 for dim in input_dims):
raise ValueError(
'`TimeDistributed` Layer should be passed an `input_shape ` '
f'with at least 3 dimensions, received: {input_shape}')
# Don't enforce the batch or time dimension.
self.input_spec = tf.nest.map_structure(
lambda x: InputSpec(shape=[None, None] + x.as_list()[2:]), input_shape)
child_input_shape = tf.nest.map_structure(self._remove_timesteps, input_shape)
child_input_shape = tf_utils.convert_shapes(child_input_shape)
super(TimeDistributed, self).build(tuple(child_input_shape))
self.built = True
def compute_output_shape(self, input_shape):
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
child_input_shape = tf.nest.map_structure(self._remove_timesteps, input_shape)
child_output_shape = self.layer.compute_output_shape(child_input_shape)
child_output_shape = tf_utils.convert_shapes(
child_output_shape, to_tuples=False)
timesteps = tf_utils.convert_shapes(input_shape)
timesteps = tf.nest.flatten(timesteps)[1]
def insert_timesteps(dims):
dims = dims.as_list()
return tf.TensorShape([dims[0], timesteps] + dims[1:])
return tf.nest.map_structure(insert_timesteps, child_output_shape)
def call(self, inputs, training=None, mask=None):
kwargs = {}
if generic_utils.has_arg(self.layer.call, 'training'):
kwargs['training'] = training
input_shape = tf.nest.map_structure(
lambda x: tf.TensorShape(backend.int_shape(x)), inputs)
batch_size = tf_utils.convert_shapes(input_shape)
batch_size = tf.nest.flatten(batch_size)[0]
if batch_size and not self._always_use_reshape:
inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)
is_ragged_input = row_lengths is not None
input_length = tf_utils.convert_shapes(input_shape)
input_length = tf.nest.flatten(input_length)[1]
# batch size matters, use rnn-based implementation
def step(x, _):
output = self.layer(x, **kwargs)
return output, []
_, outputs, _ = backend.rnn(
step,
inputs,
initial_states=[],
input_length=row_lengths[0] if is_ragged_input else input_length,
mask=mask,
unroll=False)
# pylint: disable=g-long-lambda
y = tf.nest.map_structure(
lambda output: backend.maybe_convert_to_ragged(
is_ragged_input, output, row_lengths), outputs)
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
is_ragged_input = tf.nest.map_structure(
lambda x: isinstance(x, tf.RaggedTensor), inputs)
is_ragged_input = tf.nest.flatten(is_ragged_input)
if all(is_ragged_input):
input_values = tf.nest.map_structure(lambda x: x.values, inputs)
input_row_lenghts = tf.nest.map_structure(
lambda x: x.nested_row_lengths()[0], inputs)
y = self.layer(input_values, **kwargs)
y = tf.nest.map_structure(tf.RaggedTensor.from_row_lengths, y,
input_row_lenghts)
elif any(is_ragged_input):
raise ValueError('All inputs has to be either ragged or not, '
f'but not mixed. Received: {inputs}')
else:
input_length = tf_utils.convert_shapes(input_shape)
input_length = tf.nest.flatten(input_length)[1]
if not input_length:
input_length = tf.nest.map_structure(lambda x: tf.shape(x)[1], inputs)
input_length = generic_utils.to_list(tf.nest.flatten(input_length))[0]
inner_input_shape = tf.nest.map_structure(
lambda x: self._get_shape_tuple((-1,), x, 2), inputs)
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
inputs = tf.__internal__.nest.map_structure_up_to(inputs, tf.reshape, inputs,
inner_input_shape)
# (num_samples * timesteps, ...)
if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None:
inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
kwargs['mask'] = backend.reshape(mask, inner_mask_shape)
y = self.layer(inputs, **kwargs)
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape)
# pylint: disable=g-long-lambda
output_shape = tf.nest.map_structure(
lambda tensor, int_shape: self._get_shape_tuple(
(-1, input_length), tensor, 1, int_shape[2:]), y, output_shape)
y = tf.__internal__.nest.map_structure_up_to(y, tf.reshape, y, output_shape)
if not tf.executing_eagerly():
# Set the static shape for the result since it might be lost during
# array_ops reshape, eg, some `None` dim in the result could be
# inferred.
tf.__internal__.nest.map_structure_up_to(
y, lambda tensor, shape: tensor.set_shape(shape), y,
self.compute_output_shape(input_shape))
return y
def compute_mask(self, inputs, mask=None):
"""Computes an output mask tensor for Embedding layer.
This is based on the inputs, mask, and the inner layer.
If batch size is specified:
Simply return the input `mask`. (An rnn-based implementation with
more than one rnn inputs is required but not supported in tf.keras yet.)
Otherwise we call `compute_mask` of the inner layer at each time step.
If the output mask at each time step is not `None`:
(E.g., inner layer is Masking or RNN)
Concatenate all of them and return the concatenation.
If the output mask at each time step is `None` and the input mask is not
`None`:(E.g., inner layer is Dense)
Reduce the input_mask to 2 dimensions and return it.
Otherwise (both the output mask and the input mask are `None`):
(E.g., `mask` is not used at all)
Return `None`.
Args:
inputs: Tensor with shape [batch size, timesteps, ...] indicating the
input to TimeDistributed. If static shape information is available for
"batch size", `mask` is returned unmodified.
mask: Either None (indicating no masking) or a Tensor indicating the
input mask for TimeDistributed. The shape can be static or dynamic.
Returns:
Either None (no masking), or a [batch size, timesteps, ...] Tensor with
an output mask for the TimeDistributed layer with the shape beyond the
second dimension being the value of the input mask shape(if the computed
output mask is none), an output mask with the shape beyond the first
dimension being the value of the mask shape(if mask is not None) or
output mask with the shape beyond the first dimension being the
value of the computed output shape.
"""
# cases need to call the layer.compute_mask when input_mask is None:
# Masking layer and Embedding layer with mask_zero
input_shape = tf.nest.map_structure(
lambda x: tf.TensorShape(backend.int_shape(x)), inputs)
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = tf_utils.convert_shapes(input_shape)
batch_size = tf.nest.flatten(batch_size)[0]
is_ragged_input = tf.nest.map_structure(
lambda x: isinstance(x, tf.RaggedTensor), inputs)
is_ragged_input = generic_utils.to_list(tf.nest.flatten(is_ragged_input))
if batch_size and not self._always_use_reshape or any(is_ragged_input):
# batch size matters, we currently do not handle mask explicitly, or if
# the layer always uses reshape approach, or the input is a ragged tensor.
return mask
inner_mask = mask
if inner_mask is not None:
inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
inner_mask = backend.reshape(inner_mask, inner_mask_shape)
inner_input_shape = tf.nest.map_structure(
lambda tensor: self._get_shape_tuple((-1,), tensor, 2), inputs)
inner_inputs = tf.__internal__.nest.map_structure_up_to(inputs, tf.reshape, inputs,
inner_input_shape)
output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
if output_mask is None:
if mask is None:
return None
# input_mask is not None, and output_mask is None:
# we should return a not-None mask
output_mask = mask
for _ in range(2, len(backend.int_shape(mask))):
output_mask = backend.any(output_mask, axis=-1)
else:
# output_mask is not None. We need to reshape it
input_length = tf_utils.convert_shapes(input_shape)
input_length = tf.nest.flatten(input_length)[1]
if not input_length:
input_length = tf.nest.map_structure(lambda x: backend.shape(x)[1], inputs)
input_length = tf.nest.flatten(input_length)[0]
output_mask_int_shape = backend.int_shape(output_mask)
if output_mask_int_shape is None:
# if the output_mask does not have a static shape,
# its shape must be the same as mask's
if mask is not None:
output_mask_int_shape = backend.int_shape(mask)
else:
input_shape = generic_utils.to_list(tf.nest.flatten(input_shape))[0]
output_mask_int_shape = backend.compute_output_shape(input_shape)[:-1]
output_mask_shape = self._get_shape_tuple(
(-1, input_length), output_mask, 1, output_mask_int_shape[1:])
output_mask = backend.reshape(output_mask, output_mask_shape)
return output_mask
@keras_export('keras.layers.Bidirectional')
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
Args:
layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or
`keras.layers.GRU`. It could also be a `keras.layers.Layer` instance
that meets the following criteria:
1. Be a sequence-processing layer (accepts 3D+ inputs).
2. Have a `go_backwards`, `return_sequences` and `return_state`
attribute (with the same semantics as for the `RNN` class).
3. Have an `input_spec` attribute.
4. Implement serialization via `get_config()` and `from_config()`.
Note that the recommended way to create new RNN layers is to write a
custom RNN cell and use it with `keras.layers.RNN`, instead of
subclassing `keras.layers.Layer` directly.
- When the `returns_sequences` is true, the output of the masked timestep
will be zero regardless of the layer's original `zero_output_for_mask`
value.
merge_mode: Mode by which outputs of the forward and backward RNNs will be
combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the
outputs will not be combined, they will be returned as a list. Default
value is 'concat'.
backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer`
instance to be used to handle backwards input processing.
If `backward_layer` is not provided, the layer instance passed as the
`layer` argument will be used to generate the backward layer
automatically.
Note that the provided `backward_layer` layer should have properties
matching those of the `layer` argument, in particular it should have the
same values for `stateful`, `return_states`, `return_sequences`, etc.
In addition, `backward_layer` and `layer` should have different
`go_backwards` argument values.
A `ValueError` will be raised if these requirements are not met.
Call arguments:
The call arguments for this layer are the same as those of the wrapped RNN
layer.
Beware that when passing the `initial_state` argument during the call of
this layer, the first half in the list of elements in the `initial_state`
list will be passed to the forward RNN call and the last half in the list
of elements will be passed to the backward RNN call.
Raises:
ValueError:
1. If `layer` or `backward_layer` is not a `Layer` instance.
2. In case of invalid `merge_mode` argument.
3. If `backward_layer` has mismatched properties compared to `layer`.
Examples:
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# With custom backward layer
model = Sequential()
forward_layer = LSTM(10, return_sequences=True)
backward_layer = LSTM(10, activation='relu', return_sequences=True,
go_backwards=True)
model.add(Bidirectional(forward_layer, backward_layer=backward_layer,
input_shape=(5, 10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(self,
layer,
merge_mode='concat',
weights=None,
backward_layer=None,
**kwargs):
if not isinstance(layer, Layer):
raise ValueError(
'Please initialize `Bidirectional` layer with a '
f'`tf.keras.layers.Layer` instance. Received: {layer}')
if backward_layer is not None and not isinstance(backward_layer, Layer):
raise ValueError(
'`backward_layer` need to be a `tf.keras.layers.Layer` instance. '
f'Received: {backward_layer}')
if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:
raise ValueError(f'Invalid merge mode. Received: {merge_mode}. '
'Merge mode should be one of '
'{"sum", "mul", "ave", "concat", None}')
# We don't want to track `layer` since we're already tracking the two copies
# of it we actually run.
self._setattr_tracking = False
super(Bidirectional, self).__init__(layer, **kwargs)
self._setattr_tracking = True
# Recreate the forward layer from the original layer config, so that it will
# not carry over any state from the layer.
self.forward_layer = self._recreate_layer_from_config(layer)
if backward_layer is None:
self.backward_layer = self._recreate_layer_from_config(
layer, go_backwards=True)
else:
self.backward_layer = backward_layer
# Keep the custom backward layer config, so that we can save it later. The
# layer's name might be updated below with prefix 'backward_', and we want
# to preserve the original config.
self._backward_layer_config = generic_utils.serialize_keras_object(
backward_layer)
self.forward_layer._name = 'forward_' + self.forward_layer.name
self.backward_layer._name = 'backward_' + self.backward_layer.name
self._verify_layer_config()
def force_zero_output_for_mask(layer):
# Force the zero_output_for_mask to be True if returning sequences.
if getattr(layer, 'zero_output_for_mask', None) is not None:
layer.zero_output_for_mask = layer.return_sequences
force_zero_output_for_mask(self.forward_layer)
force_zero_output_for_mask(self.backward_layer)
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[:nw // 2]
self.backward_layer.initial_weights = weights[nw // 2:]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.return_state = layer.return_state
self.supports_masking = True
self._trainable = True
self._num_constants = 0
self.input_spec = layer.input_spec
def _verify_layer_config(self):
"""Ensure the forward and backward layers have valid common property."""
if self.forward_layer.go_backwards == self.backward_layer.go_backwards:
raise ValueError(
'Forward layer and backward layer should have different '
'`go_backwards` value.'
f'forward_layer.go_backwards = {self.forward_layer.go_backwards},'
f'backward_layer.go_backwards = {self.backward_layer.go_backwards}')
common_attributes = ('stateful', 'return_sequences', 'return_state')
for a in common_attributes:
forward_value = getattr(self.forward_layer, a)
backward_value = getattr(self.backward_layer, a)
if forward_value != backward_value:
raise ValueError(
'Forward layer and backward layer are expected to have the same '
f'value for attribute "{a}", got "{forward_value}" for forward '
f'layer and "{backward_value}" for backward layer')
def _recreate_layer_from_config(self, layer, go_backwards=False):
# When recreating the layer from its config, it is possible that the layer
# is a RNN layer that contains custom cells. In this case we inspect the
# layer and pass the custom cell class as part of the `custom_objects`
# argument when calling `from_config`.
# See https://github.com/tensorflow/tensorflow/issues/26581 for more detail.
config = layer.get_config()
if go_backwards:
config['go_backwards'] = not config['go_backwards']
if 'custom_objects' in tf_inspect.getfullargspec(
layer.__class__.from_config).args:
custom_objects = {}
cell = getattr(layer, 'cell', None)
if cell is not None:
custom_objects[cell.__class__.__name__] = cell.__class__
# For StackedRNNCells
stacked_cells = getattr(cell, 'cells', [])
for c in stacked_cells:
custom_objects[c.__class__.__name__] = c.__class__
return layer.__class__.from_config(config, custom_objects=custom_objects)
else:
return layer.__class__.from_config(config)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
output_shape = self.forward_layer.compute_output_shape(input_shape)
if self.return_state:
state_shape = tf_utils.convert_shapes(output_shape[1:], to_tuples=False)
output_shape = tf_utils.convert_shapes(output_shape[0], to_tuples=False)
else:
output_shape = tf_utils.convert_shapes(output_shape, to_tuples=False)
if self.merge_mode == 'concat':
output_shape = output_shape.as_list()
output_shape[-1] *= 2
output_shape = tf.TensorShape(output_shape)
elif self.merge_mode is None:
output_shape = [output_shape, copy.copy(output_shape)]
if self.return_state:
if self.merge_mode is None:
return output_shape + state_shape + copy.copy(state_shape)
return [output_shape] + state_shape + copy.copy(state_shape)
return output_shape
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
"""`Bidirectional.__call__` implements the same API as the wrapped `RNN`."""
inputs, initial_state, constants = _standardize_args(
inputs, initial_state, constants, self._num_constants)
if isinstance(inputs, list):
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
if initial_state is None and constants is None:
return super(Bidirectional, self).__call__(inputs, **kwargs)
# Applies the same workaround as in `RNN.__call__`
additional_inputs = []
additional_specs = []
if initial_state is not None:
# Check if `initial_state` can be splitted into half
num_states = len(initial_state)
if num_states % 2 > 0:
raise ValueError(
'When passing `initial_state` to a Bidirectional RNN, '
'the state should be a list containing the states of '
'the underlying RNNs. '
f'Received: {initial_state}')
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
state_specs = tf.nest.map_structure(
lambda state: InputSpec(shape=backend.int_shape(state)),
initial_state)
self.forward_layer.state_spec = state_specs[:num_states // 2]
self.backward_layer.state_spec = state_specs[num_states // 2:]
additional_specs += state_specs
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
constants_spec = [InputSpec(shape=backend.int_shape(constant))
for constant in constants]
self.forward_layer.constants_spec = constants_spec
self.backward_layer.constants_spec = constants_spec
additional_specs += constants_spec
self._num_constants = len(constants)
self.forward_layer._num_constants = self._num_constants
self.backward_layer._num_constants = self._num_constants
is_keras_tensor = backend.is_keras_tensor(
tf.nest.flatten(additional_inputs)[0])
for tensor in tf.nest.flatten(additional_inputs):
if backend.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state of a Bidirectional'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = [None for _ in range(len(tf.nest.flatten(inputs)))
] + additional_specs
# Removing kwargs since the value are passed with input list.
kwargs['initial_state'] = None
kwargs['constants'] = None
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(Bidirectional, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(Bidirectional, self).__call__(inputs, **kwargs)
def call(self,
inputs,
training=None,
mask=None,
initial_state=None,
constants=None):
"""`Bidirectional.call` implements the same API as the wrapped `RNN`."""
kwargs = {}
if generic_utils.has_arg(self.layer.call, 'training'):
kwargs['training'] = training
if generic_utils.has_arg(self.layer.call, 'mask'):
kwargs['mask'] = mask
if generic_utils.has_arg(self.layer.call, 'constants'):
kwargs['constants'] = constants
if generic_utils.has_arg(self.layer.call, 'initial_state'):
if isinstance(inputs, list) and len(inputs) > 1:
# initial_states are keras tensors, which means they are passed in
# together with inputs as list. The initial_states need to be split into
# forward and backward section, and be feed to layers accordingly.
forward_inputs = [inputs[0]]
backward_inputs = [inputs[0]]
pivot = (len(inputs) - self._num_constants) // 2 + 1
# add forward initial state
forward_inputs += inputs[1:pivot]
if not self._num_constants:
# add backward initial state
backward_inputs += inputs[pivot:]
else:
# add backward initial state
backward_inputs += inputs[pivot:-self._num_constants]
# add constants for forward and backward layers
forward_inputs += inputs[-self._num_constants:]
backward_inputs += inputs[-self._num_constants:]
forward_state, backward_state = None, None
if 'constants' in kwargs:
kwargs['constants'] = None
elif initial_state is not None:
# initial_states are not keras tensors, eg eager tensor from np array.
# They are only passed in from kwarg initial_state, and should be passed
# to forward/backward layer via kwarg initial_state as well.
forward_inputs, backward_inputs = inputs, inputs
half = len(initial_state) // 2
forward_state = initial_state[:half]
backward_state = initial_state[half:]
else:
forward_inputs, backward_inputs = inputs, inputs
forward_state, backward_state = None, None
y = self.forward_layer(forward_inputs,
initial_state=forward_state, **kwargs)
y_rev = self.backward_layer(backward_inputs,
initial_state=backward_state, **kwargs)
else:
y = self.forward_layer(inputs, **kwargs)
y_rev = self.backward_layer(inputs, **kwargs)
if self.return_state:
states = y[1:] + y_rev[1:]
y = y[0]
y_rev = y_rev[0]
if self.return_sequences:
time_dim = 0 if getattr(self.forward_layer, 'time_major', False) else 1
y_rev = backend.reverse(y_rev, time_dim)
if self.merge_mode == 'concat':
output = backend.concatenate([y, y_rev])
elif self.merge_mode == 'sum':
output = y + y_rev
elif self.merge_mode == 'ave':
output = (y + y_rev) / 2
elif self.merge_mode == 'mul':
output = y * y_rev
elif self.merge_mode is None:
output = [y, y_rev]
else:
raise ValueError(
f'Unrecognized value for `merge_mode`. Received: {self.merge_mode}'
'Expected values are ["concat", "sum", "ave", "mul"]')
if self.return_state:
if self.merge_mode is None:
return output + states
return [output] + states
return output
def reset_states(self):
self.forward_layer.reset_states()
self.backward_layer.reset_states()
def build(self, input_shape):
with backend.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with backend.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
if self.return_sequences:
if not self.merge_mode:
output_mask = [mask, mask]
else:
output_mask = mask
else:
output_mask = [None, None] if not self.merge_mode else None
if self.return_state:
states = self.forward_layer.states
state_mask = [None for _ in states]
if isinstance(output_mask, list):
return output_mask + state_mask * 2
return [output_mask] + state_mask * 2
return output_mask
@property
def constraints(self):
constraints = {}
if hasattr(self.forward_layer, 'constraints'):
constraints.update(self.forward_layer.constraints)
constraints.update(self.backward_layer.constraints)
return constraints
def get_config(self):
config = {'merge_mode': self.merge_mode}
if self._num_constants:
config['num_constants'] = self._num_constants
if hasattr(self, '_backward_layer_config'):
config['backward_layer'] = self._backward_layer_config
base_config = super(Bidirectional, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Instead of updating the input, create a copy and use that.
config = copy.deepcopy(config)
num_constants = config.pop('num_constants', 0)
# Handle forward layer instantiation (as would parent class).
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
config['layer'] = deserialize_layer(
config['layer'], custom_objects=custom_objects)
# Handle (optional) backward layer instantiation.
backward_layer_config = config.pop('backward_layer', None)
if backward_layer_config is not None:
backward_layer = deserialize_layer(
backward_layer_config, custom_objects=custom_objects)
config['backward_layer'] = backward_layer
# Instantiate the wrapper, adjust it and return it.
layer = cls(**config)
layer._num_constants = num_constants
return layer
| 34,674 | 42.074534 | 100 | py |
keras | keras-master/keras/layers/noise_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for noise layers."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
@keras_parameterized.run_all_keras_modes
class NoiseLayersTest(keras_parameterized.TestCase):
def test_GaussianNoise(self):
testing_utils.layer_test(
keras.layers.GaussianNoise,
kwargs={'stddev': 1.},
input_shape=(3, 2, 3))
def test_GaussianDropout(self):
testing_utils.layer_test(
keras.layers.GaussianDropout,
kwargs={'rate': 0.5},
input_shape=(3, 2, 3))
def test_AlphaDropout(self):
testing_utils.layer_test(
keras.layers.AlphaDropout, kwargs={'rate': 0.2}, input_shape=(3, 2, 3))
@staticmethod
def _make_model(dtype, class_type):
assert dtype in (tf.float32, tf.float64)
assert class_type in ('gaussian_noise', 'gaussian_dropout', 'alpha_noise')
model = keras.Sequential()
model.add(keras.layers.Dense(8, input_shape=(32,), dtype=dtype))
if class_type == 'gaussian_noise':
layer = keras.layers.GaussianNoise(0.0003, dtype=dtype)
elif class_type == 'gaussian_dropout':
layer = keras.layers.GaussianDropout(0.1, dtype=dtype)
else:
layer = keras.layers.AlphaDropout(0.5, dtype=dtype)
model.add(layer)
return model
def _train_model(self, dtype, gtype):
model = self._make_model(dtype, gtype)
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((8, 32)), np.zeros((8, 8)))
def test_noise_float32(self):
self._train_model(tf.float32, 'gaussian_noise')
def test_noise_float64(self):
self._train_model(tf.float64, 'gaussian_noise')
def test_dropout_float32(self):
self._train_model(tf.float32, 'gaussian_dropout')
def test_dropout_float64(self):
self._train_model(tf.float64, 'gaussian_dropout')
def test_alpha_dropout_float32(self):
self._train_model(tf.float32, 'alpha_noise')
def test_alpha_dropout_float64(self):
self._train_model(tf.float64, 'alpha_noise')
if __name__ == '__main__':
tf.test.main()
| 2,851 | 31.044944 | 80 | py |
keras | keras-master/keras/layers/recurrent.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-classes-have-attributes
"""Recurrent layers and their base classes."""
import tensorflow.compat.v2 as tf
import collections
import functools
import warnings
import numpy as np
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.saving.saved_model import layer_serialization
from keras.utils import control_flow_util
from keras.utils import generic_utils
from keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export('keras.layers.StackedRNNCells')
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Examples:
```python
batch_size = 3
sentence_max_length = 5
n_features = 2
new_shape = (batch_size, sentence_max_length, n_features)
x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)
rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = tf.keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if 'call' not in dir(cell):
raise ValueError('All cells must have a `call` method. '
f'Received cell without a `call` method: {cell}')
if 'state_size' not in dir(cell):
raise ValueError('All cells must have a `state_size` attribute. '
f'Received cell without a `state_size`: {cell}')
self.cells = cells
# reverse_state_order determines whether the state size will be in a reverse
# order of the cells' state. User might want to set this to True to keep the
# existing behavior. This is only useful when use RNN(return_state=True)
# since the state will be returned as the same order of state_size.
self.reverse_state_order = kwargs.pop('reverse_state_order', False)
if self.reverse_state_order:
logging.warning('reverse_state_order=True in StackedRNNCells will soon '
'be deprecated. Please update the code to work with the '
'natural order of states if you rely on the RNN states, '
'eg RNN(return_state=True).')
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
return tuple(c.state_size for c in
(self.cells[::-1] if self.reverse_state_order else self.cells))
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif _is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
initial_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype))
else:
initial_states.append(_generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype))
return tuple(initial_states)
def call(self, inputs, states, constants=None, training=None, **kwargs):
# Recover per-cell states.
state_size = (self.state_size[::-1]
if self.reverse_state_order else self.state_size)
nested_states = tf.nest.pack_sequence_as(state_size, tf.nest.flatten(states))
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
states = states if tf.nest.is_nested(states) else [states]
# TF cell does not wrap the state into list when there is only one state.
is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
if generic_utils.has_arg(cell.call, 'training'):
kwargs['training'] = training
else:
kwargs.pop('training', None)
# Use the __call__ function for callable objects, eg layers, so that it
# will have the proper name scopes for the ops, etc.
cell_call_fn = cell.__call__ if callable(cell) else cell.call
if generic_utils.has_arg(cell.call, 'constants'):
inputs, states = cell_call_fn(inputs, states,
constants=constants, **kwargs)
else:
inputs, states = cell_call_fn(inputs, states, **kwargs)
new_nested_states.append(states)
return inputs, tf.nest.pack_sequence_as(state_size,
tf.nest.flatten(new_nested_states))
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
def get_batch_input_shape(batch_size, dim):
shape = tf.TensorShape(dim).as_list()
return tuple([batch_size] + shape)
for cell in self.cells:
if isinstance(cell, Layer) and not cell.built:
with backend.name_scope(cell.name):
cell.build(input_shape)
cell.built = True
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif _is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tf.nest.flatten(input_shape)[0]
if tf.nest.is_nested(output_dim):
input_shape = tf.nest.map_structure(
functools.partial(get_batch_input_shape, batch_size), output_dim)
input_shape = tuple(input_shape)
else:
input_shape = tuple([batch_size] + tf.TensorShape(output_dim).as_list())
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append(generic_utils.serialize_keras_object(cell))
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cells = []
for cell_config in config.pop('cells'):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
@keras_export('keras.layers.RNN')
class RNN(Layer):
"""Base class for recurrent layers.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Args:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- A `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- A `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state via other
means. The returned initial state should have a shape of
[batch_size, cell.state_size]. The cell might choose to create a
tensor full of zeros, or full of other values based on the cell's
implementation.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be `None` during the graph construction. Either
the `inputs` or the pair of `batch_size` and `dtype` are provided.
`batch_size` is a scalar tensor that represents the batch size
of the inputs. `dtype` is `tf.DType` that represents the dtype of
the inputs.
For backward compatibility, if this method is not implemented
by the cell, the RNN layer will create a zero filled tensor with the
size of [batch_size, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean (default `False`). Whether to return the last
output in the output sequence, or the full sequence.
return_state: Boolean (default `False`). Whether to return the last state
in addition to the output.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default `False`). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default `False`).
If True, the network will be unrolled, else a symbolic loop will be used.
Unrolling can speed-up a RNN, although it tends to be more
memory-intensive. Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean (default `False`).
Whether the output should use zeros for the masked timesteps. Note that
this field is only used when `return_sequences` is True and mask is
provided. It can useful if you want to reuse the raw output sequence of
the RNN without interference from the masked timesteps, eg, merging
bidirectional RNNs.
Call arguments:
inputs: Input tensor.
mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
N-D tensor with shape `[batch_size, timesteps, ...]` or
`[timesteps, batch_size, ...]` when time_major is True.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `[batch_size, state_size]`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: N-D tensor with shape
`[batch_size, timesteps, output_size]`, where `output_size` could
be a high dimension tensor shape, or
`[timesteps, batch_size, output_size]` when `time_major` is True.
- Else, N-D tensor with shape `[batch_size, output_size]`, where
`output_size` could be a high dimension tensor shape.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
Else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- Specify `shuffle=False` when calling `fit()`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = backend.dot(inputs, self.kernel)
output = h + backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if 'call' not in dir(cell):
raise ValueError('Argument `cell` should have a `call` method. '
f'The RNN was passed: cell={cell}')
if 'state_size' not in dir(cell):
raise ValueError('The RNN cell should have a `state_size` attribute '
'(tuple of integers, one integer per RNN state). '
f'Received: cell={cell}')
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked timestep.
self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False)
if 'input_shape' not in kwargs and (
'input_dim' in kwargs or 'input_length' in kwargs):
input_shape = (kwargs.pop('input_length', None),
kwargs.pop('input_dim', None))
kwargs['input_shape'] = input_shape
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for nested inputs, the structure
# of the input_spec will be the same as the input.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = 0
if stateful:
if tf.distribute.has_strategy():
raise ValueError('Stateful RNNs (created with `stateful=True`) '
'are not yet supported with tf.distribute.Strategy.')
@property
def _use_input_spec_as_call_signature(self):
if self.unroll:
# When the RNN layer is unrolled, the time step shape cannot be unknown.
# The input spec does not define the time step (because this layer can be
# called with any time step value, as long as it is not None), so it
# cannot be used as the call function signature when saving to SavedModel.
return False
return super(RNN, self)._use_input_spec_as_call_signature
@property
def states(self):
if self._states is None:
state = tf.nest.map_structure(lambda _: None, self.cell.state_size)
return state if tf.nest.is_nested(self.cell.state_size) else [state]
return self._states
@states.setter
# Automatic tracking catches "self._states" which adds an extra weight and
# breaks HDF5 checkpoints.
@tf.__internal__.tracking.no_automatic_dependency_tracking
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tf.TensorShape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = tf.nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tf.TensorShape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tf.TensorShape(
[time_step, batch] + output_dim)
else:
output_shape = tf.TensorShape(
[batch, time_step] + output_dim)
else:
output_shape = tf.TensorShape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = tf.nest.flatten(tf.nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tf.TensorShape(flat_state).as_list()
return tf.TensorShape(state_shape)
state_shape = tf.nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + tf.nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
# TODO(scottzhu): Should we accept multiple different masks?
mask = tf.nest.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
"""Convert input shape to InputSpec."""
if isinstance(shape, tf.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tf.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
def get_state_spec(shape):
state_spec_shape = tf.TensorShape(shape).as_list()
# append batch dim
state_spec_shape = [None] + state_spec_shape
return InputSpec(shape=tuple(state_spec_shape))
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tf.TensorShape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not tf.nest.is_nested(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
if self.input_spec is not None:
self.input_spec[0] = tf.nest.map_structure(get_input_spec, input_shape)
else:
self.input_spec = generic_utils.to_list(
tf.nest.map_structure(get_input_spec, input_shape))
step_input_shape = tf.nest.map_structure(get_step_input_shape, input_shape)
# allow cell (if layer) to build before we set or validate state_spec.
if isinstance(self.cell, Layer) and not self.cell.built:
with backend.name_scope(self.cell.name):
self.cell.build(step_input_shape)
self.cell.built = True
# set or validate state_spec
if _is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
if tf.nest.is_nested(state_size):
self.state_spec = tf.nest.map_structure(get_state_spec, state_size)
else:
self.state_spec = [
InputSpec(shape=[None] + tf.TensorShape(dim).as_list())
for dim in state_size
]
# ensure the generated state_spec is correct.
self._validate_state_spec(state_size, self.state_spec)
if self.stateful:
self.reset_states()
self.built = True
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that is
passed in `call()`.
Raises:
ValueError: When initial state spec is not compatible with the state size.
"""
validation_error = ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(init_state_specs, cell_state_sizes))
flat_cell_state_sizes = tf.nest.flatten(cell_state_sizes)
flat_state_specs = tf.nest.flatten(init_state_specs)
if len(flat_cell_state_sizes) != len(flat_state_specs):
raise validation_error
for cell_state_spec, cell_state_size in zip(flat_state_specs,
flat_cell_state_sizes):
if not tf.TensorShape(
# Ignore the first axis for init_state which is for batch
cell_state_spec.shape[1:]).is_compatible_with(
tf.TensorShape(cell_state_size)):
raise validation_error
@doc_controls.do_not_doc_inheritable
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)
if tf.nest.is_nested(inputs):
# The input are nested sequences. Use the first element in the seq to get
# batch size and dtype.
inputs = tf.nest.flatten(inputs)[0]
input_shape = tf.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
dtype)
# Keras RNN expect the states in a list, even if it's a single state tensor.
if not tf.nest.is_nested(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(inputs,
initial_state,
constants,
self._num_constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = tf.nest.map_structure(
lambda s: InputSpec(shape=backend.int_shape(s)), initial_state)
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=backend.int_shape(constant)) for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# additional_inputs can be empty if initial_state or constants are provided
# but empty (e.g. the cell is stateless).
flat_additional_inputs = tf.nest.flatten(additional_inputs)
is_keras_tensor = backend.is_keras_tensor(
flat_additional_inputs[0]) if flat_additional_inputs else True
for tensor in flat_additional_inputs:
if backend.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError(
'The initial state or constants of an RNN layer cannot be '
'specified via a mix of Keras tensors and non-Keras tensors '
'(a "Keras tensor" is a tensor that was returned by a Keras layer '
' or by `Input` during Functional model construction). '
f'Received: initial_state={initial_state}, constants={constants}')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
if self.built:
# Keep the input_spec since it has been populated in build() method.
full_input_spec = self.input_spec + additional_specs
else:
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = generic_utils.to_list(
tf.nest.map_structure(lambda _: None, inputs)) + additional_specs
# Perform the call with temporarily replaced input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
# Remove the additional_specs from input spec and keep the rest. It is
# important to keep since the input spec was populated by build(), and
# will be reused in the stateful=True.
self.input_spec = self.input_spec[:-len(additional_specs)]
return output
else:
if initial_state is not None:
kwargs['initial_state'] = initial_state
if constants is not None:
kwargs['constants'] = constants
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
self._maybe_reset_cell_dropout_mask(self.cell)
if isinstance(self.cell, StackedRNNCells):
for cell in self.cell.cells:
self._maybe_reset_cell_dropout_mask(cell)
if mask is not None:
# Time step masks must be the same for each input.
# TODO(scottzhu): Should we accept multiple different masks?
mask = tf.nest.flatten(mask)[0]
if tf.nest.is_nested(inputs):
# In the case of nested input, use the first element for shape check.
input_shape = backend.int_shape(tf.nest.flatten(inputs)[0])
else:
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
# TF RNN cells expect single tensor as state instead of list wrapped tensor.
is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None
# Use the __call__ function for callable objects, eg layers, so that it
# will have the proper name scopes for the ops, etc.
cell_call_fn = self.cell.__call__ if callable(self.cell) else self.cell.call
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError(
f'RNN cell {self.cell} does not support constants. '
f'Received: constants={constants}')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = cell_call_fn(
inputs, states, constants=constants, **kwargs)
if not tf.nest.is_nested(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = cell_call_fn(inputs, states, **kwargs)
if not tf.nest.is_nested(new_states):
new_states = [new_states]
return output, new_states
last_output, outputs, states = backend.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
if self.stateful:
updates = [
tf.compat.v1.assign(self_state, state) for self_state, state in zip(
tf.nest.flatten(self.states), tf.nest.flatten(states))
]
self.add_update(updates)
if self.return_sequences:
output = backend.maybe_convert_to_ragged(
is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards)
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def _process_inputs(self, inputs, initial_state, constants):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if (isinstance(inputs, collections.abc.Sequence)
and not isinstance(inputs, tuple)):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if not self._num_constants:
initial_state = inputs[1:]
else:
initial_state = inputs[1:-self._num_constants]
constants = inputs[-self._num_constants:]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if self.stateful:
if initial_state is not None:
# When layer is stateful and initial_state is provided, check if the
# recorded state is same as the default value (zeros). Use the recorded
# state if it is not same as the default.
non_zero_count = tf.add_n([tf.math.count_nonzero(s)
for s in tf.nest.flatten(self.states)])
# Set strict = True to keep the original structure of the state.
initial_state = tf.compat.v1.cond(non_zero_count > 0,
true_fn=lambda: self.states,
false_fn=lambda: initial_state,
strict=True)
else:
initial_state = self.states
elif initial_state is None:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError(f'Layer has {len(self.states)} '
f'states but was passed {len(initial_state)} initial '
f'states. Received: initial_state={initial_state}')
return inputs, initial_state, constants
def _validate_args_if_ragged(self, is_ragged_input, mask):
if not is_ragged_input:
return
if mask is not None:
raise ValueError(f'The mask that was passed in was {mask}, which '
'cannot be applied to RaggedTensor inputs. Please '
'make sure that there is no mask injected by upstream '
'layers.')
if self.unroll:
raise ValueError('The input received contains RaggedTensors and does '
'not support unrolling. Disable unrolling by passing '
'`unroll=False` in the RNN Layer constructor.')
def _maybe_reset_cell_dropout_mask(self, cell):
if isinstance(cell, DropoutRNNCellMixin):
cell.reset_dropout_mask()
cell.reset_recurrent_dropout_mask()
def reset_states(self, states=None):
"""Reset the recorded states for the stateful RNN layer.
Can only be used when RNN layer is constructed with `stateful` = `True`.
Args:
states: Numpy arrays that contains the value for the initial state, which
will be feed to cell at the first time step. When the value is None,
zero filled numpy array will be created based on the cell state size.
Raises:
AttributeError: When the RNN layer is not stateful.
ValueError: When the batch size of the RNN layer is unknown.
ValueError: When the input numpy array is not compatible with the RNN
layer state, either size wise or dtype wise.
"""
if not self.stateful:
raise AttributeError('Layer must be stateful.')
spec_shape = None
if self.input_spec is not None:
spec_shape = tf.nest.flatten(self.input_spec[0])[0].shape
if spec_shape is None:
# It is possible to have spec shape to be None, eg when construct a RNN
# with a custom cell, or standard RNN layers (LSTM/GRU) which we only know
# it has 3 dim input, but not its full shape spec before build().
batch_size = None
else:
batch_size = spec_shape[1] if self.time_major else spec_shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if tf.nest.flatten(self.states)[0] is None:
if getattr(self.cell, 'get_initial_state', None):
flat_init_state_values = tf.nest.flatten(self.cell.get_initial_state(
inputs=None, batch_size=batch_size,
dtype=self.dtype or backend.floatx()))
else:
flat_init_state_values = tf.nest.flatten(_generate_zero_filled_state(
batch_size, self.cell.state_size, self.dtype or backend.floatx()))
flat_states_variables = tf.nest.map_structure(
backend.variable, flat_init_state_values)
self.states = tf.nest.pack_sequence_as(self.cell.state_size,
flat_states_variables)
if not tf.nest.is_nested(self.states):
self.states = [self.states]
elif states is None:
for state, size in zip(tf.nest.flatten(self.states),
tf.nest.flatten(self.cell.state_size)):
backend.set_value(
state,
np.zeros([batch_size] + tf.TensorShape(size).as_list()))
else:
flat_states = tf.nest.flatten(self.states)
flat_input_states = tf.nest.flatten(states)
if len(flat_input_states) != len(flat_states):
raise ValueError(f'Layer {self.name} expects {len(flat_states)} '
f'states, but it received {len(flat_input_states)} '
f'state values. States received: {states}')
set_value_tuples = []
for i, (value, state) in enumerate(zip(flat_input_states,
flat_states)):
if value.shape != state.shape:
raise ValueError(
f'State {i} is incompatible with layer {self.name}: '
f'expected shape={(batch_size, state)} '
f'but found shape={value.shape}')
set_value_tuples.append((state, value))
backend.batch_set_value(set_value_tuples)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'time_major': self.time_major
}
if self._num_constants:
config['num_constants'] = self._num_constants
if self.zero_output_for_mask:
config['zero_output_for_mask'] = self.zero_output_for_mask
config['cell'] = generic_utils.serialize_keras_object(self.cell)
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)
num_constants = config.pop('num_constants', 0)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def _trackable_saved_model_saver(self):
return layer_serialization.RNNSavedModelSaver(self)
@keras_export('keras.layers.AbstractRNNCell')
class AbstractRNNCell(Layer):
"""Abstract object representing an RNN cell.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This is the base class for implementing RNN cells with custom behavior.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`.
Examples:
```python
class MinimalRNNCell(AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self).__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = backend.dot(inputs, self.kernel)
output = h + backend.dot(prev_output, self.recurrent_kernel)
return output, output
```
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def call(self, inputs, states):
"""The function that contains the logic for one RNN step calculation.
Args:
inputs: the input tensor, which is a slide from the overall RNN input by
the time dimension (usually the second dimension).
states: the state tensor from previous step, which has the same shape
as `(batch, state_size)`. In the case of timestep 0, it will be the
initial state user specified, or zero filled tensor otherwise.
Returns:
A tuple of two tensors:
1. output tensor for the current timestep, with size `output_size`.
2. state tensor for next step, which has the shape of `state_size`.
"""
raise NotImplementedError
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@doc_controls.do_not_generate_docs
class DropoutRNNCellMixin:
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN cell
by multiple inheritance. Any cell that mix with class should have following
fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
self._create_non_trackable_mask_cache()
super(DropoutRNNCellMixin, self).__init__(*args, **kwargs)
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _create_non_trackable_mask_cache(self):
"""Create the cache for dropout and recurrent dropout mask.
Note that the following two masks will be used in "graph function" mode,
e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
tensors will be generated differently than in the "graph function" case,
and they will be cached.
Also note that in graph mode, we still cache those masks only because the
RNN could be created with `unroll=True`. In that case, the `cell.call()`
function will be invoked multiple times, and we want to ensure same mask
is used every time.
Also the caches are created without tracking. Since they are not picklable
by python when deepcopy, we don't want `layer._obj_reference_counts_dict`
to track it by default.
"""
self._dropout_mask_cache = backend.ContextValueCache(
self._create_dropout_mask)
self._recurrent_dropout_mask_cache = backend.ContextValueCache(
self._create_recurrent_dropout_mask)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it `call()` method so
that the cached mask is cleared before calling the `cell.call()`. The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._dropout_mask_cache.clear()
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._recurrent_dropout_mask_cache.clear()
def _create_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
tf.ones_like(inputs),
self.dropout,
training=training,
count=count)
def _create_recurrent_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
tf.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count)
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)
def __getstate__(self):
# Used for deepcopy. The caching can't be pickled by python, since it will
# contain tensor and graph.
state = super(DropoutRNNCellMixin, self).__getstate__()
state.pop('_dropout_mask_cache', None)
state.pop('_recurrent_dropout_mask_cache', None)
return state
def __setstate__(self, state):
state['_dropout_mask_cache'] = backend.ContextValueCache(
self._create_dropout_mask)
state['_recurrent_dropout_mask_cache'] = backend.ContextValueCache(
self._create_recurrent_dropout_mask)
super(DropoutRNNCellMixin, self).__setstate__(state)
@keras_export('keras.layers.SimpleRNNCell')
class SimpleRNNCell(DropoutRNNCellMixin, Layer):
"""Cell class for SimpleRNN.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.SimpleRNN` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state from
the previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))
output = rnn(inputs) # The output has shape `[32, 4]`.
rnn = tf.keras.layers.RNN(
tf.keras.layers.SimpleRNNCell(4),
return_sequences=True,
return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = rnn(inputs)
```
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
if units < 0:
raise ValueError(f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.')
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
default_caching_device = _caching_device(self)
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0] if tf.nest.is_nested(states) else states
dp_mask = self.get_dropout_mask_for_cell(inputs, training)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
prev_output, training)
if dp_mask is not None:
h = backend.dot(inputs * dp_mask, self.kernel)
else:
h = backend.dot(inputs, self.kernel)
if self.bias is not None:
h = backend.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output = prev_output * rec_dp_mask
output = h + backend.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
new_state = [output] if tf.nest.is_nested(states) else output
return output, new_state
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
config.update(_config_for_enable_caching_device(self))
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SimpleRNN')
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
Default: 0.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the
recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state
in addition to the output. Default: `False`
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False` entry
indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
simple_rnn = tf.keras.layers.SimpleRNN(4)
output = simple_rnn(inputs) # The output has shape `[32, 4]`.
simple_rnn = tf.keras.layers.SimpleRNN(
4, return_sequences=True, return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = simple_rnn(inputs)
```
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
config.update(_config_for_enable_caching_device(self.cell))
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
@keras_export(v1=['keras.layers.GRUCell'])
class GRUCell(DropoutRNNCellMixin, Layer):
"""Cell class for the GRU layer.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (cuDNN compatible).
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
reset_after=False,
**kwargs):
if units < 0:
raise ValueError(f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.')
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 1)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
default_caching_device = _caching_device(self)
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] if tf.nest.is_nested(states) else states # previous memory
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=3)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = tf.unstack(self.bias)
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = backend.dot(inputs_z, self.kernel[:, :self.units])
x_r = backend.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = backend.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = backend.bias_add(x_z, input_bias[:self.units])
x_r = backend.bias_add(x_r, input_bias[self.units: self.units * 2])
x_h = backend.bias_add(x_h, input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = backend.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = backend.dot(
h_tm1_r, self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = backend.bias_add(recurrent_z, recurrent_bias[:self.units])
recurrent_r = backend.bias_add(
recurrent_r, recurrent_bias[self.units:self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = backend.dot(
h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = backend.bias_add(
recurrent_h, recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = backend.dot(
r * h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = backend.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = backend.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = tf.split(matrix_x, 3, axis=-1)
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = backend.bias_add(matrix_inner, recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = backend.dot(
h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z, recurrent_r, recurrent_h = tf.split(
matrix_inner, [self.units, self.units, -1], axis=-1)
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * recurrent_h
else:
recurrent_h = backend.dot(
r * h_tm1, self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
new_state = [h] if tf.nest.is_nested(states) else h
return h, new_state
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
config.update(_config_for_enable_caching_device(self))
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@keras_export(v1=['keras.layers.GRU'])
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (cuDNN compatible).
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
implementation = kwargs.pop('implementation', 1)
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
config.update(_config_for_enable_caching_device(self.cell))
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@keras_export(v1=['keras.layers.LSTMCell'])
class LSTMCell(DropoutRNNCellMixin, Layer):
"""Cell class for the LSTM layer.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
if units < 0:
raise ValueError(f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.')
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 1)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.state_size = [self.units, self.units]
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
default_caching_device = _caching_device(self)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return backend.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.get('ones')((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(x_f + backend.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))
c = f * c_tm1 + i * self.activation(x_c + backend.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = tf.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = backend.dot(inputs_i, k_i)
x_f = backend.dot(inputs_f, k_f)
x_c = backend.dot(inputs_c, k_c)
x_o = backend.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = tf.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = backend.bias_add(x_i, b_i)
x_f = backend.bias_add(x_f, b_f)
x_c = backend.bias_add(x_c, b_c)
x_o = backend.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
z = backend.dot(inputs, self.kernel)
z += backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = backend.bias_add(z, self.bias)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(_config_for_enable_caching_device(self))
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(_generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@keras_export('keras.experimental.PeepholeLSTMCell')
class PeepholeLSTMCell(LSTMCell):
"""Equivalent to LSTMCell class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al., 2002](
http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
[Sak et al., 2014](https://research.google.com/pubs/archive/43905.pdf)
Example:
```python
# Create 2 PeepholeLSTMCells
peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]]
# Create a layer composed sequentially of the peephole LSTM cells.
layer = RNN(peephole_lstm_cells)
input = keras.Input((timesteps, input_dim))
output = layer(input)
```
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
warnings.warn('`tf.keras.experimental.PeepholeLSTMCell` is deprecated '
'and will be removed in a future version. '
'Please use tensorflow_addons.rnn.PeepholeLSTMCell '
'instead.')
super(PeepholeLSTMCell, self).__init__(
units=units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=kwargs.pop('implementation', 1),
**kwargs)
def build(self, input_shape):
super(PeepholeLSTMCell, self).build(input_shape)
# The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='input_gate_peephole_weights',
initializer=self.kernel_initializer)
self.forget_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='forget_gate_peephole_weights',
initializer=self.kernel_initializer)
self.output_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='output_gate_peephole_weights',
initializer=self.kernel_initializer)
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(x_f + backend.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(x_c + backend.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +
self.output_gate_peephole_weights * c)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0 +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(z1 +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)
return c, o
@keras_export(v1=['keras.layers.LSTM'])
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
implementation = kwargs.pop('implementation', 1)
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(_config_for_enable_caching_device(self.cell))
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return backend.dropout(ones, rate)
if count > 1:
return [
backend.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return backend.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(inputs, initial_state, constants, num_constants):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Args:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants as eager tensor.
#
# For either case, the real input is the first item in the list, which
# could be a nested structure itself. Then followed by initial_states, which
# could be a list of items, or list of list if the initial_state is complex
# structure, and finally followed by constants which is a flat list.
assert initial_state is None and constants is None
if num_constants:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[:1]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tf.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state. '
f'Received: batch_size={batch_size_tensor}, dtype={dtype}')
def create_zeros(unnested_state_size):
flat_dims = tf.TensorShape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return tf.zeros(init_state_size, dtype=dtype)
if tf.nest.is_nested(state_size):
return tf.nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
def _caching_device(rnn_cell):
"""Returns the caching device for the RNN variable.
This is useful for distributed training, when variable is not located as same
device as the training worker. By enabling the device cache, this allows
worker to read the variable once and cache locally, rather than read it every
time step from remote when it is needed.
Note that this is assuming the variable that cell needs for each time step is
having the same value in the forward path, and only gets updated in the
backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the
cell body relies on any variable that gets updated every time step, then
caching device will cause it to read the stall value.
Args:
rnn_cell: the rnn cell instance.
"""
if tf.executing_eagerly():
# caching_device is not supported in eager mode.
return None
if not getattr(rnn_cell, '_enable_caching_device', False):
return None
# Don't set a caching device when running in a loop, since it is possible that
# train steps could be wrapped in a tf.while_loop. In that scenario caching
# prevents forward computations in loop iterations from re-reading the
# updated weights.
if control_flow_util.IsInWhileLoop(tf.compat.v1.get_default_graph()):
logging.warning(
'Variable read device caching has been disabled because the '
'RNN is in tf.while_loop loop context, which will cause '
'reading stalled value in forward path. This could slow down '
'the training due to duplicated variable reads. Please '
'consider updating your code to remove tf.while_loop if possible.')
return None
if (rnn_cell._dtype_policy.compute_dtype !=
rnn_cell._dtype_policy.variable_dtype):
logging.warning(
'Variable read device caching has been disabled since it '
'doesn\'t work with the mixed precision API. This is '
'likely to cause a slowdown for RNN training due to '
'duplicated read of variable for each timestep, which '
'will be significant in a multi remote worker setting. '
'Please consider disabling mixed precision API if '
'the performance has been affected.')
return None
# Cache the value on the device that access the variable.
return lambda op: op.device
def _config_for_enable_caching_device(rnn_cell):
"""Return the dict config for RNN cell wrt to enable_caching_device field.
Since enable_caching_device is a internal implementation detail for speed up
the RNN variable read when running on the multi remote worker setting, we
don't want this config to be serialized constantly in the JSON. We will only
serialize this field when a none default value is used to create the cell.
Args:
rnn_cell: the RNN cell for serialize.
Returns:
A dict which contains the JSON config for enable_caching_device value or
empty dict if the enable_caching_device value is same as the default value.
"""
default_enable_caching_device = tf.compat.v1.executing_eagerly_outside_functions()
if rnn_cell._enable_caching_device != default_enable_caching_device:
return {'enable_caching_device': rnn_cell._enable_caching_device}
return {}
| 125,596 | 39.410875 | 100 | py |
keras | keras-master/keras/layers/lstm_v2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 LSTM layer."""
import tensorflow.compat.v2 as tf
import copy
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import rewriter_config_pb2
import keras
from tensorflow.python.framework import test_util as tf_test_util
from keras import keras_parameterized
from keras import testing_utils
from keras.layers import recurrent as rnn_v1
from keras.layers import recurrent_v2 as rnn
from keras.utils import np_utils
from tensorflow.python.platform import tf_logging as logging
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = tf.compat.v1.GraphOptions(rewrite_options=_rewrites)
_config = tf.compat.v1.ConfigProto(graph_options=_graph_options)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'tanh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = rnn.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias)
self.assertFalse(layer._could_use_gpu_kernel)
@testing_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = rnn.LSTM(1, activation=tf.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = rnn.LSTM(1, recurrent_activation=tf.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = rnn.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = rnn.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = rnn.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [
keras.backend.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)
]
layer = rnn.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = rnn.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
initial_weight_count = len(layer.weights)
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
self.assertEqual(initial_weight_count, len(layer.weights))
# Variables in "states" shouldn't show up in .weights
layer.states = tf.nest.map_structure(tf.Variable, values)
layer.reset_states()
self.assertEqual(initial_weight_count, len(layer.weights))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = rnn.LSTM(units)(
inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
masked = keras.layers.Masking()(inputs)
layer = rnn.LSTM(units, return_state=True, stateful=True)
outputs = layer(masked)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = rnn.LSTM(
units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = rnn.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = rnn.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
@testing_utils.run_v2_only
def test_lstm_v2_feature_parity_with_canonical_lstm(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=87654321)
y_train = np_utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
masked_input = keras.layers.Masking()(inputs)
lstm_layer = rnn_v1.LSTM(rnn_state_size,
recurrent_activation='sigmoid')
output = lstm_layer(masked_input)
lstm_model = keras.models.Model(inputs, output)
weights = lstm_model.get_weights()
y_1 = lstm_model.predict(x_train)
lstm_model.compile('rmsprop', 'mse')
lstm_model.fit(x_train, y_train)
y_2 = lstm_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
cudnn_layer = rnn.LSTM(rnn_state_size)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
@parameterized.named_parameters(('v0', 0), ('v1', 1), ('v2', 2))
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_implementation_mode_LSTM(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'implementation': implementation_mode
},
input_shape=(num_samples, timesteps, embedding_dim))
layer_class = rnn.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
layer_class = rnn.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_masking_with_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
lstm_model = build_model(rnn_v1.LSTM)
y_ref = lstm_model.predict(x_train)
weights = lstm_model.get_weights()
lstm_v2_model = build_model(rnn.LSTM)
lstm_v2_model.set_weights(weights)
y = lstm_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = np_utils.to_categorical(y_train, output_shape)
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_lstm_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=tf.float32)
layer = rnn.LSTM(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_lstm_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
with testing_utils.device(should_use_gpu=False):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that cuDNN uses 'sigmoid' as activation, so the LSTM V2 uses
# 'sigmoid' as default. Construct the canonical LSTM with sigmoid to achieve
# the same output.
with testing_utils.device(should_use_gpu=True):
layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid')
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
# Remove the extra cudnn bias since canonical lstm will not use it.
canonical_model.set_weights(weights[:3])
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2)
self.assertAllClose(y_2, y_3)
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'return_sequences': True
},
input_shape=(num_samples, timesteps, embedding_dim))
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support float64 yet.')
@testing_utils.run_v2_only
def test_float64_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'return_sequences': True,
'dtype': 'float64'
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = rnn.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_LSTM_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.LSTM(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1
},
input_shape=(num_samples, timesteps, embedding_dim))
def test_bidirectional(self):
batch = 128
timestep = 20
vocab_size = 1000
model = keras.Sequential([
keras.layers.Embedding(vocab_size, 64),
keras.layers.Bidirectional(rnn.LSTM(
64, return_sequences=True)),
keras.layers.Bidirectional(rnn.LSTM(32)),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
x = np.random.randint(0, vocab_size, size=(batch, timestep))
y = np.random.randint(0, 1, size=(batch))
model.fit(x, y, epochs=1, shuffle=False)
model.evaluate(x, y)
model.predict(x)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
@testing_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask(self):
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(np.bool)
mask[:, masksteps:] = 0
# Test for V1 behavior.
lstm_v1 = rnn_v1.LSTM(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked_v1 = lstm_v1(inputs, mask=tf.constant(mask))
outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])
self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1)
# Test for V2 behavior.
lstm = rnn.LSTM(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked = lstm(inputs, mask=tf.constant(mask))
outputs_trimmed = lstm(inputs[:, :masksteps])
self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
@tf_test_util.enable_output_all_intermediates
def test_v1_session_behavior(self):
with tf.compat.v1.get_default_graph().as_default():
# See b/139132348 for more details.
x = np.random.uniform(size=(100, 4, 8))
y = np.random.uniform(size=(100, 1))
dataset = tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(100).batch(32)
inp = keras.layers.Input(shape=(4, 8))
layer = rnn.LSTM(1)(inp)
layer = keras.layers.Dense(1)(layer)
model = keras.models.Model(inp, layer)
model.compile(loss='mse', optimizer='sgd')
model.fit(dataset)
def test_with_fully_masked_inputs(self):
num_samples = 8
timestep = 5
embedding_dim = 4
vocab_size = 20
units = 2
inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
# Set the first inputs to be fully zero.
inputs[0, :] = 0.0
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
vocab_size,
embedding_dim,
mask_zero=True,
input_length=timestep,
batch_input_shape=(num_samples, timestep)))
layer = rnn.LSTM(units)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Make sure it doesn't crash with cudnn kernel.
model.predict(inputs)
# TODO (b/169895267): test with xla_gpu is disabled.
def test_deepcopy(self):
if not tf.executing_eagerly():
self.skipTest('v2-only test')
original_layer = rnn.LSTM(5)
copied_layer = copy.deepcopy(original_layer)
self.assertEqual(copied_layer.units, 5)
self.assertEqual(original_layer.get_config(), original_layer.get_config())
# Copy layer before layer call on inputs without weight initialization.
inputs = np.random.normal(size=[32, 10, 8]).astype(np.float32)
original_layer = rnn.LSTM(4)
copied_layer = copy.deepcopy(original_layer)
outputs = original_layer(inputs)
copied_outputs = copied_layer(inputs)
self.assertNotAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs))
# Copy layer after layer call on inputs with weight initialization.
original_layer = rnn.LSTM(4)
outputs = original_layer(inputs)
copied_layer = copy.deepcopy(original_layer)
copied_outputs = copied_layer(inputs)
self.assertAllClose(self.evaluate(outputs), self.evaluate(copied_outputs))
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMGraphRewriteTest(keras_parameterized.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly())
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history['loss'][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_LSTM_runtime(self):
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
@testing_utils.run_v2_only
def test_LSTM_runtime_with_mask(self):
# Masking will affect which backend is selected based on whether the mask
# is strictly right padded.
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32)
masked_inputs = keras.layers.Masking()(inputs)
outputs, runtime = layer(masked_inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train)
# Verify unpadded data.
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Update x/y to be right padded by setting the last timestep to 0
x_train[:, -1, :] = 0
y_train[:, -1] = 0
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Further update x/y to be mix padded (masks in the middle), and verify
# only cpu kernel can be selected.
x_train[:, -3, :] = 0
y_train[:, -3] = 0
_, runtime_value = model.predict(x_train)
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_LSTM_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32)
zeros = tf.zeros([self.batch, self.output_shape])
dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
a = tf.constant(0)
b = tf.constant(1)
# Will always run the lstm layer.
outputs, runtime = tf.cond(
tf.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
class LSTMPerformanceTest(tf.test.Benchmark):
def _measure_performance(self, test_config, model, x_train, y_train):
batch = test_config['batch']
epoch = test_config['epoch']
warmup_epoch = test_config['warmup_epoch']
# warm up the model
model.fit(x_train, y_train, batch_size=batch, epochs=warmup_epoch)
start_time = time.time()
model.fit(x_train, y_train, batch_size=batch, epochs=epoch - warmup_epoch)
end_time = time.time()
return (end_time - start_time) / (epoch - warmup_epoch)
def _time_performance_run_cudnn_lstm(self, test_config, x_train, y_train):
# Get the performance number for standard Cudnn LSTM
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
cudnn_lstm_layer = keras.layers.CuDNNLSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
outputs = cudnn_lstm_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'CuDNN LSTM', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_unifed_lstm_gpu(
self, test_config, x_train, y_train):
# Get performance number for lstm_v2 with grappler swap the impl
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'LSTM V2', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_normal_lstm(
self, test_config, x_train, y_train):
# Get performance number for standard LSTM on GPU.
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn_v1.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'Normal LSTM', sec_per_epoch)
return sec_per_epoch
def _benchmark_performance_with_standard_cudnn_impl(self):
if not tf.test.is_gpu_available():
self.skipTest('performance test will only run on GPU')
mode = 'eager' if tf.executing_eagerly() else 'graph'
batch = 64
num_batch = 10
test_config = {
'input_shape': 128,
'rnn_state_size': 64,
'output_shape': 64,
'timestep': 50,
'batch': batch,
'epoch': 20,
# The performance for warmup epoch is ignored.
'warmup_epoch': 1,
}
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=(batch * num_batch),
test_samples=0,
input_shape=(test_config['timestep'], test_config['input_shape']),
num_classes=test_config['output_shape'])
y_train = np_utils.to_categorical(y_train, test_config['output_shape'])
cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
test_config, x_train, y_train)
lstm_v2_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu(
test_config, x_train, y_train)
normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm(
test_config, x_train, y_train)
cudnn_vs_v2 = cudnn_sec_per_epoch / lstm_v2_sec_per_epoch
v2_vs_normal = normal_lstm_sec_per_epoch / lstm_v2_sec_per_epoch
self.report_benchmark(name='keras_cudnn_lstm_' + mode,
wall_time=cudnn_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_lstm_v2_' + mode,
wall_time=lstm_v2_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_canonical_lstm_' + mode,
wall_time=normal_lstm_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
logging.info('Expect the performance of LSTM V2 is within 80% of '
'cuDNN LSTM, got {0:.2f}%'.format(cudnn_vs_v2 * 100))
logging.info('Expect the performance of LSTM V2 is more than 5 times'
' of normal LSTM, got {0:.2f}'.format(v2_vs_normal))
def benchmark_performance_graph(self):
with tf.compat.v1.get_default_graph().as_default():
with tf.compat.v1.Session(config=_config):
self._benchmark_performance_with_standard_cudnn_impl()
def benchmark_performance_eager(self):
with tf.__internal__.eager_context.eager_mode():
self._benchmark_performance_with_standard_cudnn_impl()
if __name__ == '__main__':
tf.test.main()
| 40,152 | 34.439541 | 80 | py |
keras | keras-master/keras/layers/layers_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Tests for layers.__init__."""
import tensorflow.compat.v2 as tf
from keras import layers
class LayersTest(tf.test.TestCase):
def test_keras_private_symbol(self):
if tf.__internal__.tf2.enabled():
normalization_parent = layers.Normalization.__module__.split('.')[-1]
self.assertEqual('normalization', normalization_parent)
self.assertTrue(layers.BatchNormalization._USE_V2_BEHAVIOR)
else:
self.assertFalse(layers.BatchNormalization._USE_V2_BEHAVIOR)
if __name__ == '__main__':
tf.test.main()
| 1,269 | 35.285714 | 80 | py |
keras | keras-master/keras/layers/convolutional_transpose_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional transpose layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
@keras_parameterized.run_all_keras_modes
class Conv2DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session():
testing_utils.layer_test(
keras.layers.Conv2DTranspose,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('strides_output_padding', {'strides': (2, 2), 'output_padding': (1, 1)}),
)
def test_conv2d_transpose(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or tf.test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_conv2d_transpose_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session():
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_transpose_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session():
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv2d_transpose_dilation(self):
testing_utils.layer_test(keras.layers.Conv2DTranspose,
kwargs={'filters': 2,
'kernel_size': 3,
'padding': 'same',
'data_format': 'channels_last',
'dilation_rate': (2, 2)},
input_shape=(2, 5, 6, 3))
input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32)
# pylint: disable=too-many-function-args
expected_output = np.float32([
[192, 228, 192, 228],
[336, 372, 336, 372],
[192, 228, 192, 228],
[336, 372, 336, 372]
]).reshape((1, 4, 4, 1))
testing_utils.layer_test(keras.layers.Conv2DTranspose,
input_data=input_data,
kwargs={'filters': 1,
'kernel_size': 3,
'padding': 'same',
'data_format': 'channels_last',
'dilation_rate': (2, 2),
'kernel_initializer': 'ones'},
expected_output=expected_output)
@keras_parameterized.run_all_keras_modes
class Conv3DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session():
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('strides_output_padding', {'strides': (2, 2, 2),
'output_padding': (1, 1, 1)}),
)
def test_conv3d_transpose(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3, 3)
if 'data_format' not in kwargs or tf.test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_conv3d_transpose_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session():
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_transpose_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session():
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv3d_transpose_dynamic_shape(self):
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
with self.cached_session():
# Won't raise error here.
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs={
'data_format': 'channels_last',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, None, None, None, 3),
input_data=input_data)
if tf.test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs={
'data_format': 'channels_first',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, 3, None, None, None),
input_data=input_data)
if __name__ == '__main__':
tf.test.main()
| 7,476 | 34.103286 | 80 | py |
keras | keras-master/keras/layers/einsum_dense.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based einsum dense layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import re
from keras import activations
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.experimental.EinsumDense")
class EinsumDense(Layer):
"""A layer that uses tf.einsum as the backing computation.
This layer can perform einsum calculations of arbitrary dimensionality.
Args:
equation: An equation describing the einsum to perform. This equation must
be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or
`ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis
expression sequence.
output_shape: The expected shape of the output tensor (excluding the batch
dimension and any dimensions represented by ellipses). You can specify
None for any dimension that is unknown or can be inferred from the input
shape.
activation: Activation function to use. If you don't specify anything, no
activation is applied (that is, a "linear" activation: `a(x) = x`).
bias_axes: A string containing the output dimension(s) to apply a bias to.
Each character in the `bias_axes` string should correspond to a character
in the output portion of the `equation` string.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation")..
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
bias_constraint: Constraint function applied to the bias vector.
Examples:
**Biased dense layer with einsums**
This example shows how to instantiate a standard Keras dense layer using
einsum operations. This example is equivalent to
`tf.keras.layers.Dense(64, use_bias=True)`.
>>> layer = EinsumDense("ab,bc->ac", output_shape=64, bias_axes="c")
>>> input_tensor = tf.keras.Input(shape=[32])
>>> output_tensor = layer(input_tensor)
>>> output_tensor
<... shape=(None, 64) dtype=...>
**Applying a dense layer to a sequence**
This example shows how to instantiate a layer that applies the same dense
operation to every element in a sequence. Here, the 'output_shape' has two
values (since there are two non-batch dimensions in the output); the first
dimension in the output_shape is `None`, because the sequence dimension `b`
has an unknown shape.
>>> layer = EinsumDense("abc,cd->abd",
... output_shape=(None, 64),
... bias_axes="d")
>>> input_tensor = tf.keras.Input(shape=[32, 128])
>>> output_tensor = layer(input_tensor)
>>> output_tensor
<... shape=(None, 32, 64) dtype=...>
**Applying a dense layer to a sequence using ellipses**
This example shows how to instantiate a layer that applies the same dense
operation to every element in a sequence, but uses the ellipsis notation
instead of specifying the batch and sequence dimensions.
Because we are using ellipsis notation and have specified only one axis, the
output_shape arg is a single value. When instantiated in this way, the layer
can handle any number of sequence dimensions - including the case where no
sequence dimension exists.
>>> layer = EinsumDense("...x,xy->...y", output_shape=64, bias_axes="y")
>>> input_tensor = tf.keras.Input(shape=[32, 128])
>>> output_tensor = layer(input_tensor)
>>> output_tensor
<... shape=(None, 32, 64) dtype=...>
"""
def __init__(self,
equation,
output_shape,
activation=None,
bias_axes=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(EinsumDense, self).__init__(**kwargs)
self.equation = equation
if isinstance(output_shape, int):
self.partial_output_shape = [output_shape]
else:
self.partial_output_shape = list(output_shape)
self.bias_axes = bias_axes
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
shape_data = _analyze_einsum_string(self.equation,
self.bias_axes,
input_shape,
self.partial_output_shape)
kernel_shape, bias_shape, self.full_output_shape = shape_data
self.kernel = self.add_weight(
"kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if bias_shape is not None:
self.bias = self.add_weight(
"bias",
shape=bias_shape,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
super(EinsumDense, self).build(input_shape)
def compute_output_shape(self, _):
return tf.TensorShape(self.full_output_shape)
def get_config(self):
config = {
"output_shape": self.partial_output_shape,
"equation": self.equation,
"activation": activations.serialize(self.activation),
"bias_axes": self.bias_axes,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer":
regularizers.serialize(self.activity_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super(EinsumDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
ret = tf.einsum(self.equation, inputs, self.kernel)
if self.bias is not None:
ret += self.bias
if self.activation is not None:
ret = self.activation(ret)
return ret
def _analyze_einsum_string(equation, bias_axes, input_shape, output_shape):
"""Analyzes an einsum string to determine the required weight shape."""
dot_replaced_string = re.sub(r"\.\.\.", "0", equation)
# This is the case where no ellipses are present in the string.
split_string = re.match("([a-zA-Z]+),([a-zA-Z]+)->([a-zA-Z]+)",
dot_replaced_string)
if split_string:
return _analyze_split_string(split_string, bias_axes, input_shape,
output_shape)
# This is the case where ellipses are present on the left.
split_string = re.match("0([a-zA-Z]+),([a-zA-Z]+)->0([a-zA-Z]+)",
dot_replaced_string)
if split_string:
return _analyze_split_string(
split_string, bias_axes, input_shape, output_shape, left_elided=True)
# This is the case where ellipses are present on the right.
split_string = re.match("([a-zA-Z]{2,})0,([a-zA-Z]+)->([a-zA-Z]+)0",
dot_replaced_string)
if split_string:
return _analyze_split_string(split_string, bias_axes, input_shape,
output_shape)
raise ValueError(
f"Invalid einsum equation '{equation}'. Equations must be in the form "
"[X],[Y]->[Z], ...[X],[Y]->...[Z], or [X]...,[Y]->[Z]....")
def _analyze_split_string(split_string,
bias_axes,
input_shape,
output_shape,
left_elided=False):
"""Analyze an pre-split einsum string to find the weight shape."""
input_spec = split_string.group(1)
weight_spec = split_string.group(2)
output_spec = split_string.group(3)
elided = len(input_shape) - len(input_spec)
if isinstance(output_shape, int):
output_shape = [output_shape]
else:
output_shape = list(output_shape)
output_shape.insert(0, input_shape[0])
if elided > 0 and left_elided:
for i in range(1, elided):
# We already inserted the 0th input dimension at dim 0, so we need to
# start at location 1 here.
output_shape.insert(1, input_shape[i])
elif elided > 0 and not left_elided:
for i in range(len(input_shape) - elided, len(input_shape)):
output_shape.append(input_shape[i])
if left_elided:
# If we have beginning dimensions elided, we need to use negative indexing
# to determine where in the input dimension our values are.
input_dim_map = {
dim: (i + elided) - len(input_shape) for i, dim in enumerate(input_spec)
}
# Because we've constructed the full output shape already, we don't need
# to do negative indexing.
output_dim_map = {dim: (i + elided) for i, dim in enumerate(output_spec)}
else:
input_dim_map = {dim: i for i, dim in enumerate(input_spec)}
output_dim_map = {dim: i for i, dim in enumerate(output_spec)}
for i, dim in enumerate(input_spec):
input_shape_at_dim = input_shape[i]
if dim in output_dim_map:
output_shape_at_dim = output_shape[output_dim_map[dim]]
if (output_shape_at_dim is not None and
output_shape_at_dim != input_shape_at_dim):
raise ValueError(
"Input shape and output shape do not match at shared "
f"dimension '{dim}'. Input shape is {input_shape_at_dim}, "
"and output shape "
f"is {output_shape[output_dim_map[dim]]}.")
for dim in output_spec:
if dim not in input_spec and dim not in weight_spec:
raise ValueError(
f"Dimension '{dim}' was specified in the output '{output_spec}' but "
f"has no corresponding dim in the input spec '{input_spec}' or "
f"weight spec '{output_spec}'")
weight_shape = []
for dim in weight_spec:
if dim in input_dim_map:
weight_shape.append(input_shape[input_dim_map[dim]])
elif dim in output_dim_map:
weight_shape.append(output_shape[output_dim_map[dim]])
else:
raise ValueError(
f"Weight dimension '{dim}' did not have a match in either "
f"the input spec '{input_spec}' or the output spec '{output_spec}'. "
"For this layer, the weight must be fully specified.")
if bias_axes is not None:
num_left_elided = elided if left_elided else 0
idx_map = {
char: output_shape[i + num_left_elided]
for i, char in enumerate(output_spec)
}
for char in bias_axes:
if char not in output_spec:
raise ValueError(
f"Bias dimension '{char}' was requested, but is not part "
f"of the output spec '{output_spec}'")
first_bias_location = min([output_spec.find(char) for char in bias_axes])
bias_output_spec = output_spec[first_bias_location:]
bias_shape = [
idx_map[char] if char in bias_axes else 1 for char in bias_output_spec
]
if not left_elided:
for _ in range(elided):
bias_shape.append(1)
else:
bias_shape = None
return weight_shape, bias_shape, output_shape
| 12,943 | 38.950617 | 80 | py |
keras | keras-master/keras/layers/advanced_activations_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for advanced activation layers."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
@keras_parameterized.run_all_keras_modes
class AdvancedActivationsTest(keras_parameterized.TestCase):
def test_leaky_relu(self):
for alpha in [0., .5]:
testing_utils.layer_test(keras.layers.LeakyReLU,
kwargs={'alpha': alpha},
input_shape=(2, 3, 4),
supports_masking=True)
def test_prelu(self):
testing_utils.layer_test(keras.layers.PReLU, kwargs={},
input_shape=(2, 3, 4),
supports_masking=True)
def test_prelu_share(self):
testing_utils.layer_test(keras.layers.PReLU,
kwargs={'shared_axes': 1},
input_shape=(2, 3, 4),
supports_masking=True)
def test_elu(self):
for alpha in [0., .5, -1.]:
testing_utils.layer_test(keras.layers.ELU,
kwargs={'alpha': alpha},
input_shape=(2, 3, 4),
supports_masking=True)
def test_thresholded_relu(self):
testing_utils.layer_test(keras.layers.ThresholdedReLU,
kwargs={'theta': 0.5},
input_shape=(2, 3, 4),
supports_masking=True)
def test_softmax(self):
testing_utils.layer_test(keras.layers.Softmax,
kwargs={'axis': 1},
input_shape=(2, 3, 4),
supports_masking=True)
def test_relu(self):
testing_utils.layer_test(keras.layers.ReLU,
kwargs={'max_value': 10},
input_shape=(2, 3, 4),
supports_masking=True)
x = keras.backend.ones((3, 4))
if not tf.executing_eagerly():
# Test that we use `leaky_relu` when appropriate in graph mode.
self.assertTrue(
'LeakyRelu' in keras.layers.ReLU(negative_slope=0.2)(x).name)
# Test that we use `relu` when appropriate in graph mode.
self.assertTrue('Relu' in keras.layers.ReLU()(x).name)
# Test that we use `relu6` when appropriate in graph mode.
self.assertTrue('Relu6' in keras.layers.ReLU(max_value=6)(x).name)
def test_relu_with_invalid_max_value(self):
with self.assertRaisesRegex(
ValueError, 'max_value of a ReLU layer cannot be a negative '
'value. Received: -10'):
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'max_value': -10},
input_shape=(2, 3, 4),
supports_masking=True)
def test_relu_with_invalid_negative_slope(self):
with self.assertRaisesRegex(
ValueError, 'negative_slope of a ReLU layer cannot be a negative '
'value. Received: None'):
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'negative_slope': None},
input_shape=(2, 3, 4),
supports_masking=True)
with self.assertRaisesRegex(
ValueError, 'negative_slope of a ReLU layer cannot be a negative '
'value. Received: -10'):
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'negative_slope': -10},
input_shape=(2, 3, 4),
supports_masking=True)
def test_relu_with_invalid_threshold(self):
with self.assertRaisesRegex(
ValueError, 'threshold of a ReLU layer cannot be a negative '
'value. Received: None'):
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'threshold': None},
input_shape=(2, 3, 4),
supports_masking=True)
with self.assertRaisesRegex(
ValueError, 'threshold of a ReLU layer cannot be a negative '
'value. Received: -10'):
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'threshold': -10},
input_shape=(2, 3, 4),
supports_masking=True)
@keras_parameterized.run_with_all_model_types
def test_layer_as_activation(self):
layer = keras.layers.Dense(1, activation=keras.layers.ReLU())
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2)
def test_leaky_relu_with_invalid_alpha(self):
# Test case for GitHub issue 46993.
with self.assertRaisesRegex(
ValueError, 'The alpha value of a Leaky ReLU layer '
'cannot be None. Expecting a float. Received: None'):
testing_utils.layer_test(
keras.layers.LeakyReLU,
kwargs={'alpha': None},
input_shape=(2, 3, 4),
supports_masking=True)
def test_leaky_elu_with_invalid_alpha(self):
# Test case for GitHub issue 46993.
with self.assertRaisesRegex(
ValueError, 'Alpha of an ELU layer cannot be None, '
'expecting a float. Received: None'):
testing_utils.layer_test(
keras.layers.ELU,
kwargs={'alpha': None},
input_shape=(2, 3, 4),
supports_masking=True)
def test_threshold_relu_with_invalid_theta(self):
with self.assertRaisesRegex(
ValueError, 'Theta of a Thresholded ReLU layer cannot '
'be None, expecting a float. Received: None'):
testing_utils.layer_test(
keras.layers.ThresholdedReLU,
kwargs={'theta': None},
input_shape=(2, 3, 4),
supports_masking=True)
with self.assertRaisesRegex(
ValueError, 'The theta value of a Thresholded ReLU '
'layer should be >=0. Received: -10'):
testing_utils.layer_test(
keras.layers.ThresholdedReLU,
kwargs={'theta': -10},
input_shape=(2, 3, 4),
supports_masking=True)
if __name__ == '__main__':
tf.test.main()
| 6,766 | 35.978142 | 80 | py |
keras | keras-master/keras/layers/recurrent_v2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent v2 layers functionality other than GRU, LSTM.
See also: lstm_v2_test.py, gru_v2_test.py.
"""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers import embeddings
from keras.layers import recurrent_v2 as rnn_v2
@keras_parameterized.run_all_keras_modes
class RNNV2Test(keras_parameterized.TestCase):
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_device_placement(self, layer):
if not tf.test.is_gpu_available():
self.skipTest('Need GPU for testing.')
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
# Test when GPU is available but not used, the graph should be properly
# created with CPU ops.
with testing_utils.device(should_use_gpu=False):
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
layer(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_reset_dropout_mask_between_batch(self, layer):
# See https://github.com/tensorflow/tensorflow/issues/29187 for more details
batch_size = 8
timestep = 12
embedding_dim = 10
units = 5
layer = layer(units, dropout=0.5, recurrent_dropout=0.5)
inputs = np.random.random((batch_size, timestep, embedding_dim)).astype(
np.float32)
previous_dropout, previous_recurrent_dropout = None, None
for _ in range(5):
layer(inputs, training=True)
dropout = layer.cell.get_dropout_mask_for_cell(inputs, training=True)
recurrent_dropout = layer.cell.get_recurrent_dropout_mask_for_cell(
inputs, training=True)
if previous_dropout is not None:
self.assertNotAllClose(self.evaluate(previous_dropout),
self.evaluate(dropout))
previous_dropout = dropout
if previous_recurrent_dropout is not None:
self.assertNotAllClose(self.evaluate(previous_recurrent_dropout),
self.evaluate(recurrent_dropout))
previous_recurrent_dropout = recurrent_dropout
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_recurrent_dropout_with_stateful_RNN(self, layer):
# See https://github.com/tensorflow/tensorflow/issues/27829 for details.
# The issue was caused by using inplace mul for a variable, which was a
# warning for RefVariable, but an error for ResourceVariable in 2.0
keras.models.Sequential([
layer(128, stateful=True, return_sequences=True, dropout=0.2,
batch_input_shape=[32, None, 5], recurrent_dropout=0.2)
])
def test_recurrent_dropout_saved_model(self):
if not tf.executing_eagerly():
self.skipTest('v2-only test')
inputs = keras.Input(shape=(784, 3), name='digits')
x = keras.layers.GRU(64, activation='relu', name='GRU', dropout=0.1)(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense')(x)
outputs = keras.layers.Dense(
10, activation='softmax', name='predictions')(
x)
model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer')
model.save(os.path.join(self.get_temp_dir(), 'model'), save_format='tf')
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_ragged(self, layer):
vocab_size = 100
inputs = tf.ragged.constant(
np.random.RandomState(0).randint(0, vocab_size, [128, 25]))
embedder = embeddings.Embedding(input_dim=vocab_size, output_dim=16)
embedded_inputs = embedder(inputs)
lstm = layer(32)
lstm(embedded_inputs)
if __name__ == '__main__':
tf.test.main()
| 4,906 | 38.256 | 80 | py |
keras | keras-master/keras/layers/__init__.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-bad-import-order,g-direct-tensorflow-import,disable=g-import-not-at-top
from tensorflow.python import tf2
# Generic layers.
from keras.engine.input_layer import Input
from keras.engine.input_layer import InputLayer
from keras.engine.input_spec import InputSpec
from keras.engine.base_layer import Layer
from keras.engine.base_preprocessing_layer import PreprocessingLayer
# Image preprocessing layers.
from keras.layers.preprocessing.image_preprocessing import CenterCrop
from keras.layers.preprocessing.image_preprocessing import RandomCrop
from keras.layers.preprocessing.image_preprocessing import RandomFlip
from keras.layers.preprocessing.image_preprocessing import RandomContrast
from keras.layers.preprocessing.image_preprocessing import RandomHeight
from keras.layers.preprocessing.image_preprocessing import RandomRotation
from keras.layers.preprocessing.image_preprocessing import RandomTranslation
from keras.layers.preprocessing.image_preprocessing import RandomWidth
from keras.layers.preprocessing.image_preprocessing import RandomZoom
from keras.layers.preprocessing.image_preprocessing import Resizing
from keras.layers.preprocessing.image_preprocessing import Rescaling
# Preprocessing layers.
from keras.layers.preprocessing.category_crossing import CategoryCrossing
from keras.layers.preprocessing.category_encoding import CategoryEncoding
from keras.layers.preprocessing.discretization import Discretization
from keras.layers.preprocessing.hashing import Hashing
from keras.layers.preprocessing.integer_lookup import IntegerLookup
from keras.layers.preprocessing.normalization import Normalization
from keras.layers.preprocessing.string_lookup import StringLookup
from keras.layers.preprocessing.text_vectorization import TextVectorization
# Advanced activations.
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.advanced_activations import PReLU
from keras.layers.advanced_activations import ELU
from keras.layers.advanced_activations import ReLU
from keras.layers.advanced_activations import ThresholdedReLU
from keras.layers.advanced_activations import Softmax
# Convolution layers.
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv1DTranspose
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.convolutional import Conv3DTranspose
from keras.layers.convolutional import SeparableConv1D
from keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from keras.layers.convolutional import Convolution1D
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import Convolution3D
from keras.layers.convolutional import Convolution2DTranspose
from keras.layers.convolutional import Convolution3DTranspose
from keras.layers.convolutional import SeparableConvolution1D
from keras.layers.convolutional import SeparableConvolution2D
from keras.layers.convolutional import DepthwiseConv1D
from keras.layers.convolutional import DepthwiseConv2D
# Image processing layers.
from keras.layers.convolutional import UpSampling1D
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import UpSampling3D
from keras.layers.convolutional import ZeroPadding1D
from keras.layers.convolutional import ZeroPadding2D
from keras.layers.convolutional import ZeroPadding3D
from keras.layers.convolutional import Cropping1D
from keras.layers.convolutional import Cropping2D
from keras.layers.convolutional import Cropping3D
# Core layers.
from keras.layers.core.activation import Activation
from keras.layers.core.activity_regularization import ActivityRegularization
from keras.layers.core.dense import Dense
from keras.layers.core.dropout import Dropout
from keras.layers.core.flatten import Flatten
from keras.layers.core.lambda_layer import Lambda
from keras.layers.core.masking import Masking
from keras.layers.core.permute import Permute
from keras.layers.core.repeat_vector import RepeatVector
from keras.layers.core.reshape import Reshape
from keras.layers.core.spatial_dropout import SpatialDropout1D
from keras.layers.core.spatial_dropout import SpatialDropout2D
from keras.layers.core.spatial_dropout import SpatialDropout3D
from keras.layers.core.tf_op_layer import ClassMethod
from keras.layers.core.tf_op_layer import InstanceMethod
from keras.layers.core.tf_op_layer import InstanceProperty
from keras.layers.core.tf_op_layer import SlicingOpLambda
from keras.layers.core.tf_op_layer import TFOpLambda
# Dense Attention layers.
from keras.layers.dense_attention import AdditiveAttention
from keras.layers.dense_attention import Attention
# Embedding layers.
from keras.layers.embeddings import Embedding
# Einsum-based dense layer/
from keras.layers.einsum_dense import EinsumDense
# Multi-head Attention layer.
from keras.layers.multi_head_attention import MultiHeadAttention
# Locally-connected layers.
from keras.layers.local import LocallyConnected1D
from keras.layers.local import LocallyConnected2D
# Merge layers.
from keras.layers.merge import Add
from keras.layers.merge import Subtract
from keras.layers.merge import Multiply
from keras.layers.merge import Average
from keras.layers.merge import Maximum
from keras.layers.merge import Minimum
from keras.layers.merge import Concatenate
from keras.layers.merge import Dot
from keras.layers.merge import add
from keras.layers.merge import subtract
from keras.layers.merge import multiply
from keras.layers.merge import average
from keras.layers.merge import maximum
from keras.layers.merge import minimum
from keras.layers.merge import concatenate
from keras.layers.merge import dot
# Noise layers.
from keras.layers.noise import AlphaDropout
from keras.layers.noise import GaussianNoise
from keras.layers.noise import GaussianDropout
# Normalization layers.
from keras.layers.normalization.layer_normalization import LayerNormalization
from keras.layers.normalization.batch_normalization import SyncBatchNormalization
if tf.__internal__.tf2.enabled():
from keras.layers.normalization.batch_normalization import BatchNormalization
from keras.layers.normalization.batch_normalization_v1 import BatchNormalization as BatchNormalizationV1
BatchNormalizationV2 = BatchNormalization
else:
from keras.layers.normalization.batch_normalization_v1 import BatchNormalization
from keras.layers.normalization.batch_normalization import BatchNormalization as BatchNormalizationV2
BatchNormalizationV1 = BatchNormalization
# Kernelized layers.
from keras.layers.kernelized import RandomFourierFeatures
# Pooling layers.
from keras.layers.pooling import MaxPooling1D
from keras.layers.pooling import MaxPooling2D
from keras.layers.pooling import MaxPooling3D
from keras.layers.pooling import AveragePooling1D
from keras.layers.pooling import AveragePooling2D
from keras.layers.pooling import AveragePooling3D
from keras.layers.pooling import GlobalAveragePooling1D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers.pooling import GlobalAveragePooling3D
from keras.layers.pooling import GlobalMaxPooling1D
from keras.layers.pooling import GlobalMaxPooling2D
from keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from keras.layers.pooling import MaxPool1D
from keras.layers.pooling import MaxPool2D
from keras.layers.pooling import MaxPool3D
from keras.layers.pooling import AvgPool1D
from keras.layers.pooling import AvgPool2D
from keras.layers.pooling import AvgPool3D
from keras.layers.pooling import GlobalAvgPool1D
from keras.layers.pooling import GlobalAvgPool2D
from keras.layers.pooling import GlobalAvgPool3D
from keras.layers.pooling import GlobalMaxPool1D
from keras.layers.pooling import GlobalMaxPool2D
from keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from keras.layers.recurrent import RNN
from keras.layers.recurrent import AbstractRNNCell
from keras.layers.recurrent import StackedRNNCells
from keras.layers.recurrent import SimpleRNNCell
from keras.layers.recurrent import PeepholeLSTMCell
from keras.layers.recurrent import SimpleRNN
if tf.__internal__.tf2.enabled():
from keras.layers.recurrent_v2 import GRU
from keras.layers.recurrent_v2 import GRUCell
from keras.layers.recurrent_v2 import LSTM
from keras.layers.recurrent_v2 import LSTMCell
from keras.layers.recurrent import GRU as GRUV1
from keras.layers.recurrent import GRUCell as GRUCellV1
from keras.layers.recurrent import LSTM as LSTMV1
from keras.layers.recurrent import LSTMCell as LSTMCellV1
GRUV2 = GRU
GRUCellV2 = GRUCell
LSTMV2 = LSTM
LSTMCellV2 = LSTMCell
else:
from keras.layers.recurrent import GRU
from keras.layers.recurrent import GRUCell
from keras.layers.recurrent import LSTM
from keras.layers.recurrent import LSTMCell
from keras.layers.recurrent_v2 import GRU as GRUV2
from keras.layers.recurrent_v2 import GRUCell as GRUCellV2
from keras.layers.recurrent_v2 import LSTM as LSTMV2
from keras.layers.recurrent_v2 import LSTMCell as LSTMCellV2
GRUV1 = GRU
GRUCellV1 = GRUCell
LSTMV1 = LSTM
LSTMCellV1 = LSTMCell
# Convolutional-recurrent layers.
from keras.layers.convolutional_recurrent import ConvLSTM1D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.convolutional_recurrent import ConvLSTM3D
# cuDNN recurrent layers.
from keras.layers.cudnn_recurrent import CuDNNLSTM
from keras.layers.cudnn_recurrent import CuDNNGRU
# Wrapper functions.
from keras.layers.wrappers import Wrapper
from keras.layers.wrappers import Bidirectional
from keras.layers.wrappers import TimeDistributed
# RNN Cell wrappers.
from keras.layers.rnn_cell_wrapper_v2 import DeviceWrapper
from keras.layers.rnn_cell_wrapper_v2 import DropoutWrapper
from keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper
# Serialization functions.
from keras.layers import serialization
from keras.layers.serialization import deserialize
from keras.layers.serialization import serialize
class VersionAwareLayers:
"""Utility to be used internally to access layers in a V1/V2-aware fashion.
When using layers within the Keras codebase, under the constraint that
e.g. `layers.BatchNormalization` should be the `BatchNormalization` version
corresponding to the current runtime (TF1 or TF2), do not simply access
`layers.BatchNormalization` since it would ignore e.g. an early
`compat.v2.disable_v2_behavior()` call. Instead, use an instance
of `VersionAwareLayers` (which you can use just like the `layers` module).
"""
def __getattr__(self, name):
serialization.populate_deserializable_objects()
if name in serialization.LOCAL.ALL_OBJECTS:
return serialization.LOCAL.ALL_OBJECTS[name]
return super(VersionAwareLayers, self).__getattr__(name)
| 11,606 | 41.672794 | 106 | py |
keras | keras-master/keras/layers/wrappers_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
import tensorflow.compat.v2 as tf
import copy
from absl.testing import parameterized
import numpy as np
import keras
from tensorflow.python.framework import test_util as tf_test_util
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import base_layer_utils
from keras.layers import core
from keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper
from keras.utils import generic_utils
from tensorflow.python.training.tracking import util as trackable_util
class _RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(_RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(_RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class _ResidualLSTMCell(keras.layers.LSTMCell):
def call(self, inputs, states, training=None):
output, states = super(_ResidualLSTMCell, self).call(inputs, states)
return output + inputs, states
class _AddOneCell(keras.layers.AbstractRNNCell):
"""Increments inputs and state by one on each call."""
@property
def state_size(self):
return 1
@property
def output_size(self):
return 1
def call(self, inputs, state):
inputs = tf.reduce_mean(inputs, axis=1, keepdims=True)
outputs = inputs + 1.0
state = tf.nest.map_structure(lambda t: t + 1.0, state)
return outputs, state
class TimeDistributedTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# check whether the model variables are present in the
# trackable list of objects
checkpointed_object_ids = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_object_ids)
def test_timedistributed_static_batch_size(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_invalid_init(self):
x = tf.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegex(
ValueError, 'Please initialize `TimeDistributed` layer with a '
'`tf.keras.layers.Layer` instance.'):
keras.layers.TimeDistributed(x)
def test_timedistributed_conv2d(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1',
activity_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 2)
def test_TimeDistributed_learning_phase(self):
with self.cached_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(keras.layers.Dropout(.999))(
x, training=True)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
def test_TimeDistributed_batchnorm(self):
with self.cached_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
self.assertEqual(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 2
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None))) # N by t_1 by t_2 by 6
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(7, return_sequences=True)))
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(8, return_sequences=False)))
model.add(keras.layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4),
dtype='int32')
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(model_input,
np.random.random((10, 1)), epochs=1, batch_size=10)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
func = keras.backend.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_TimeDistributed_with_masking_layer(self):
# test with Masking layer
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Masking(mask_value=0.,), input_shape=(None, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(5)))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.
model.compile(optimizer='rmsprop', loss='mse')
model.fit(model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [
model.layers[1].compute_mask(model.layers[1].input, mask_outputs[-1])
]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertEqual((mask_outputs_val[0]).all(), model_input.all())
self.assertEqual((mask_outputs_val[1]).all(), model_input.all())
def test_TimeDistributed_with_different_time_shapes(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph_1 = keras.backend.placeholder(shape=(None, 10, 13))
out_1 = time_dist(ph_1)
self.assertEqual(out_1.shape.as_list(), [None, 10, 5])
ph_2 = keras.backend.placeholder(shape=(None, 1, 13))
out_2 = time_dist(ph_2)
self.assertEqual(out_2.shape.as_list(), [None, 1, 5])
ph_3 = keras.backend.placeholder(shape=(None, 1, 18))
with self.assertRaisesRegex(ValueError, 'is incompatible with'):
time_dist(ph_3)
def test_TimeDistributed_with_invalid_dimensions(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph = keras.backend.placeholder(shape=(None, 10))
with self.assertRaisesRegex(
ValueError,
'`TimeDistributed` Layer should be passed an `input_shape `'):
time_dist(ph)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_TimeDistributed_reshape(self):
class NoReshapeLayer(keras.layers.Layer):
def call(self, inputs):
return inputs
# Built-in layers that aren't stateful use the reshape implementation.
td1 = keras.layers.TimeDistributed(keras.layers.Dense(5))
self.assertTrue(td1._always_use_reshape)
# Built-in layers that are stateful don't use the reshape implementation.
td2 = keras.layers.TimeDistributed(
keras.layers.RNN(keras.layers.SimpleRNNCell(10), stateful=True))
self.assertFalse(td2._always_use_reshape)
# Custom layers are not allowlisted for the fast reshape implementation.
td3 = keras.layers.TimeDistributed(NoReshapeLayer())
self.assertFalse(td3._always_use_reshape)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_TimeDistributed_output_shape_return_types(self):
class TestLayer(keras.layers.Layer):
def call(self, inputs):
return tf.concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tf.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
output_shape = tf.TensorShape(output_shape)
return output_shape
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.TimeDistributed(layer())
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 8])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 8])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
# TODO(scottzhu): check why v1 session failed.
def test_TimeDistributed_with_mask_first_implementation(self):
np.random.seed(100)
rnn_layer = keras.layers.LSTM(4, return_sequences=True, stateful=True)
data = np.array([[[[1.0], [1.0]], [[0.0], [1.0]]],
[[[1.0], [0.0]], [[1.0], [1.0]]],
[[[1.0], [0.0]], [[1.0], [1.0]]]])
x = keras.layers.Input(shape=(2, 2, 1), batch_size=3)
x_masking = keras.layers.Masking()(x)
y = keras.layers.TimeDistributed(rnn_layer)(x_masking)
model_1 = keras.models.Model(x, y)
model_1.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
output_with_mask = model_1.predict(data, steps=1)
y = keras.layers.TimeDistributed(rnn_layer)(x)
model_2 = keras.models.Model(x, y)
model_2.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
output = model_2.predict(data, steps=1)
self.assertNotAllClose(output_with_mask, output, atol=1e-7)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
layer=[keras.layers.LSTM,
keras.layers.Dense]))
def test_TimeDistributed_with_ragged_input(self, layer):
if tf.executing_eagerly():
self.skipTest('b/143103634')
np.random.seed(100)
layer = layer(4)
ragged_data = tf.ragged.constant(
[[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]], [[6.0], [6.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]], [[9.0], [9.0]]]],
ragged_rank=1)
x_ragged = keras.Input(shape=(None, 2, 1), dtype='float32', ragged=True)
y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
model_1 = keras.models.Model(x_ragged, y_ragged)
model_1._run_eagerly = testing_utils.should_run_eagerly()
output_ragged = model_1.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(None, 2, 1), dtype='float32')
masking = keras.layers.Masking()(x_dense)
y_dense = keras.layers.TimeDistributed(layer)(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
model_2._run_eagerly = testing_utils.should_run_eagerly()
output_dense = model_2.predict(dense_data, steps=1)
output_ragged = convert_ragged_tensor_value(output_ragged)
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
@keras_parameterized.run_all_keras_modes
def test_TimeDistributed_with_ragged_input_with_batch_size(self):
np.random.seed(100)
layer = keras.layers.Dense(16)
ragged_data = tf.ragged.constant(
[[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]], [[6.0], [6.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]], [[9.0], [9.0]]]],
ragged_rank=1)
# Use the first implementation by specifying batch_size
x_ragged = keras.Input(shape=(None, 2, 1), batch_size=3, dtype='float32',
ragged=True)
y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
model_1 = keras.models.Model(x_ragged, y_ragged)
output_ragged = model_1.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(None, 2, 1), batch_size=3, dtype='float32')
masking = keras.layers.Masking()(x_dense)
y_dense = keras.layers.TimeDistributed(layer)(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
output_ragged = convert_ragged_tensor_value(output_ragged)
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
def test_TimeDistributed_set_static_shape(self):
layer = keras.layers.TimeDistributed(keras.layers.Conv2D(16, (3, 3)))
inputs = keras.Input(batch_shape=(1, None, 32, 32, 1))
outputs = layer(inputs)
# Make sure the batch dim is not lost after array_ops.reshape.
self.assertListEqual(outputs.shape.as_list(), [1, None, 30, 30, 16])
@keras_parameterized.run_all_keras_modes
def test_TimeDistributed_with_mimo(self):
dense_1 = keras.layers.Dense(8)
dense_2 = keras.layers.Dense(16)
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.dense_1 = dense_1
self.dense_2 = dense_2
def call(self, inputs):
return self.dense_1(inputs[0]), self.dense_2(inputs[1])
def compute_output_shape(self, input_shape):
output_shape_1 = self.dense_1.compute_output_shape(input_shape[0])
output_shape_2 = self.dense_2.compute_output_shape(input_shape[1])
return output_shape_1, output_shape_2
np.random.seed(100)
layer = TestLayer()
data_1 = tf.constant([[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]]]])
data_2 = tf.constant([[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]]]])
x1 = keras.Input(shape=(None, 2, 1), dtype='float32')
x2 = keras.Input(shape=(None, 2, 1), dtype='float32')
y1, y2 = keras.layers.TimeDistributed(layer)([x1, x2])
model_1 = keras.models.Model([x1, x2], [y1, y2])
model_1.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
output_1 = model_1.predict((data_1, data_2), steps=1)
y1 = dense_1(x1)
y2 = dense_2(x2)
model_2 = keras.models.Model([x1, x2], [y1, y2])
output_2 = model_2.predict((data_1, data_2), steps=1)
self.assertAllClose(output_1, output_2)
model_1.fit(
x=[np.random.random((10, 2, 2, 1)),
np.random.random((10, 2, 2, 1))],
y=[np.random.random((10, 2, 2, 8)),
np.random.random((10, 2, 2, 16))],
epochs=1,
batch_size=3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BidirectionalTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(['sum', 'concat', 'ave', 'mul'])
def test_bidirectional(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_object_ids = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_object_ids)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_invalid_init(self):
x = tf.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegex(
ValueError,
'Please initialize `Bidirectional` layer with a '
'`tf.keras.layers.Layer` instance.'):
keras.layers.Bidirectional(x)
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
def run_test():
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
bidi_rnn = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)
self.assertTrue(bidi_rnn.stateful)
output = bidi_rnn(inputs)
model = keras.models.Model(inputs, output)
y_1 = model.predict(x, batch_size=1)
model.reset_states()
y_2 = model.predict(x, batch_size=1)
self.assertAllClose(y_1, y_2)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
if tf.executing_eagerly():
run_test()
else:
tf_test_util.enable_output_all_intermediates(run_test)()
@parameterized.parameters(['sum', 'mul', 'ave', 'concat', None])
def test_Bidirectional_merged_value(self, merge_mode):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], _to_list(layer(inputs)))
f_forward = keras.backend.function([inputs],
[layer.forward_layer(inputs)])
f_backward = keras.backend.function(
[inputs],
[keras.backend.reverse(layer.backward_layer(inputs), 1)])
y_merged = f_merged(x)
y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
# test return_state
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
f_backward = keras.backend.function([inputs],
layer.backward_layer(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(x)
y_forward = f_forward(x)
y_backward = f_backward(x)
y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
self.assertAllClose(state_birnn, state_inner, atol=1e-5)
@parameterized.parameters([True, False])
def test_Bidirectional_with_time_major_input(self, time_major):
batch_size, time, input_dim = 2, 3, 1
inputs = tf.zeros((batch_size, time, input_dim))
# length is [1 2]. Within the batch, the first element has 1 step, and the
# second element as 2 steps.
lengths = tf.range(1, 1 + batch_size)
mask = tf.sequence_mask(lengths, maxlen=time, dtype=tf.float32)
forward_cell = _AddOneCell(name='forward')
backward_cell = _AddOneCell(name='backward')
layer = keras.layers.Bidirectional(
layer=keras.layers.RNN(
forward_cell, time_major=time_major, return_sequences=True),
backward_layer=keras.layers.RNN(
backward_cell, time_major=time_major, return_sequences=True,
go_backwards=True))
# Switch to time-major.
if time_major:
inputs = tf.transpose(inputs, [1, 0, 2])
mask = tf.transpose(mask, [1, 0])
keras_outputs = layer(inputs, mask=mask)
if time_major:
keras_outputs = tf.transpose(keras_outputs, [1, 0, 2])
# expect the first element in batch has 1 step and second element in batch
# has 2 steps.
expected_result = np.array([[[1., 1.], [0., 0.], [0., 0.]],
[[1., 1.], [1., 1.], [0., 0.]]])
self.assertAllClose(expected_result, keras_outputs)
def test_Bidirectional_dropout(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'sum'
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs, training=True))
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
y1 = _to_list(model.predict(x))
y2 = _to_list(model.predict(x))
for x1, x2 in zip(y1, y2):
self.assertAllClose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = keras.layers.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = keras.layers.Input((timesteps, dim))
with self.assertRaises(ValueError):
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = keras.layers.Bidirectional(rnn(units))(input2,
initial_state=state)
model = keras.models.Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
model.predict(inputs)
def test_Bidirectional_state_reuse_with_np_input(self):
# See https://github.com/tensorflow/tensorflow/issues/28761 for more detail.
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = np.random.rand(samples, timesteps, dim).astype(np.float32)
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
input2 = np.random.rand(samples, timesteps, dim).astype(np.float32)
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state)
def test_Bidirectional_trainable(self):
# test layers that need learning_phase to be set
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 6
def test_Bidirectional_updates(self):
if tf.executing_eagerly():
self.skipTest('layer.updates is only available in graph mode.')
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_update = x * x
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert not layer.updates
# TODO(b/128684069): Remove when Wrapper sublayers are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
layer.forward_layer.add_update(x_reachable_update, inputs=x)
layer.forward_layer.add_update(1, inputs=None)
layer.backward_layer.add_update(x_reachable_update, inputs=x)
layer.backward_layer.add_update(1, inputs=None)
assert len(layer.updates) == 4
def test_Bidirectional_losses(self):
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(
3,
kernel_regularizer='l1',
bias_regularizer='l1',
activity_regularizer='l1'))
_ = layer(x)
assert len(layer.losses) == 6
loss = x * x
layer.forward_layer.add_loss(loss)
layer.backward_layer.add_loss(loss, inputs=x)
assert len(layer.losses) == 8
def test_Bidirectional_with_constants(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, c])
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_with_constants_layer_passing_initial_state(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
s_for = keras.Input((32,))
s_bac = keras.Input((32,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)),
np.zeros((6, 32)),
np.zeros((6, 32)),
np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_fw_np = np.random.random((6, 32))
s_bk_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Verify that state is used
y_np_2_different_s = model.predict(
[x_np, s_fw_np + 10., s_bk_np + 10., c_np])
assert np.mean(y_np - y_np_2_different_s) != 0
# Test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, s_for, s_bac, c])
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
@parameterized.parameters([keras.layers.LSTM, keras.layers.GRU])
def test_Bidirectional_output_shape(self, rnn):
input_shape = [None, 2, 1]
num_state = 4 if rnn == keras.layers.LSTM else 2
wrapper = keras.layers.Bidirectional(rnn(3))
output_shape = wrapper.compute_output_shape(input_shape)
self.assertEqual(output_shape.as_list(), [None, 6])
wrapper = keras.layers.Bidirectional(rnn(3, return_state=True))
output_shape = wrapper.compute_output_shape(input_shape)
# 1 for output and the rest for forward and backward states
self.assertLen(output_shape, 1 + num_state)
self.assertEqual(output_shape[0].as_list(), [None, 6])
for shape in output_shape[1:]:
self.assertEqual(shape.as_list(), [None, 3])
wrapper = keras.layers.Bidirectional(rnn(3, return_state=True),
merge_mode=None)
output_shape = wrapper.compute_output_shape(input_shape)
# 1 for forward output and 1 for backward output, and the rest for states
self.assertLen(output_shape, 2 + num_state)
for shape in output_shape:
self.assertEqual(shape.as_list(), [None, 3])
def test_Bidirectional_output_shape_return_types(self):
class TestLayer(keras.layers.SimpleRNN):
def call(self, inputs):
return tf.concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tf.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
return tf.TensorShape(output_shape)
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.Bidirectional(layer(1))
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 16])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 16])
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_Bidirectional_last_output_with_masking(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2. Last output should be same as state,
# not zeroed.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 5)
self.assertEqual(outputs[0].shape.as_list(), [None, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 5)
self.assertAllClose(y[0], np.concatenate([y[1], y[3]], axis=1))
@parameterized.parameters([keras.layers.LSTM, keras.layers.GRU])
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_Bidirectional_sequence_output_with_masking(self, rnn):
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2, and expect the output of timestep 2
# is also 0s.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_sequences=True),
merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 1)
self.assertEqual(outputs[0].shape.as_list(), [None, timesteps, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 1)
self.assertAllClose(y[0][0, 2], np.zeros(units * 2))
@parameterized.parameters(['sum', 'concat'])
def test_custom_backward_layer(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
forward_layer = rnn(output_dim)
backward_layer = rnn(output_dim, go_backwards=True)
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
forward_layer,
merge_mode=mode,
backward_layer=backward_layer,
input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_object_ids = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_object_ids)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape((None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_custom_backward_layer_error_check(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units)
with self.assertRaisesRegex(ValueError,
'should have different `go_backwards` value.'):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
for attr in ('stateful', 'return_sequences', 'return_state'):
kwargs = {attr: True}
backward_layer = rnn(units, go_backwards=True, **kwargs)
with self.assertRaisesRegex(
ValueError, 'expected to have the same value for attribute "' + attr):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
def test_custom_backward_layer_serialization(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
layer_from_config = keras.layers.Bidirectional.from_config(config)
new_config = layer_from_config.get_config()
self.assertDictEqual(config, new_config)
def test_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
layer = keras.layers.Bidirectional(rnn(units, name='rnn'))
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'rnn')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_rnn')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_rnn')
def test_custom_backward_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'lstm')
self.assertEqual(config['backward_layer']['config']['name'], 'lstm_1')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_lstm')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_lstm_1')
def test_rnn_with_customized_cell(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = _ResidualLSTMCell(units)
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
def test_rnn_with_customized_cell_stacking(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = [_ResidualLSTMCell(units), _ResidualLSTMCell(units)]
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@testing_utils.run_v2_only
def test_wrapped_rnn_cell(self):
# See https://github.com/tensorflow/tensorflow/issues/26581.
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = keras.layers.LSTMCell(units)
cell = ResidualWrapper(cell)
rnn = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(rnn, merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@parameterized.parameters(['ave', 'concat', 'mul'])
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm RNN does not support ragged tensors yet.')
def test_Bidirectional_ragged_input(self, merge_mode):
np.random.seed(100)
rnn = keras.layers.LSTM
units = 3
x = tf.ragged.constant(
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]],
ragged_rank=1)
x = tf.cast(x, 'float32')
# pylint: disable=g-long-lambda
with self.cached_session():
if merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: tf.concat(
(y, y_rev), axis=-1)
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: (y * y_rev)
# pylint: enable=g-long-lambda
inputs = keras.Input(
shape=(None, 3), batch_size=4, dtype='float32', ragged=True)
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
# TODO(kaftan): after KerasTensor refactor TF op layers should work
# with many composite tensors, and this shouldn't need to be a lambda
# layer.
reverse_layer = core.Lambda(tf.reverse, arguments=dict(axis=[1]))
f_backward = keras.backend.function(
[inputs],
reverse_layer(layer.backward_layer(inputs)))
y_merged = f_merged(x)
y_expected = merge_func(
convert_ragged_tensor_value(f_forward(x)),
convert_ragged_tensor_value(f_backward(x)))
y_merged = convert_ragged_tensor_value(y_merged)
self.assertAllClose(y_merged.flat_values, y_expected.flat_values)
def test_Bidirectional_nested_state_reuse(self):
if not tf.executing_eagerly():
self.skipTest('Only test eager mode.')
x = tf.random.normal([4, 8, 16])
layer = keras.layers.Bidirectional(
keras.layers.RNN([keras.layers.LSTMCell(5),
keras.layers.LSTMCell(5)],
return_sequences=True,
return_state=True))
y = layer(x)
self.assertAllClose(layer([x] + y[1:]), layer(x, initial_state=y[1:]))
def test_full_input_spec(self):
# See https://github.com/tensorflow/tensorflow/issues/38403
inputs = keras.layers.Input(batch_shape=(1, 1, 1))
fw_state = keras.layers.Input(batch_shape=(1, 1))
bw_state = keras.layers.Input(batch_shape=(1, 1))
states = [fw_state, bw_state]
bidirectional_rnn = keras.layers.Bidirectional(
keras.layers.SimpleRNN(1, stateful=True))
rnn_output = bidirectional_rnn(inputs, initial_state=states)
model = keras.Model([inputs, fw_state, bw_state], rnn_output)
output1 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))])
output2 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))])
model.reset_states()
output3 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))])
self.assertAllClose(output1, output3)
self.assertNotAllClose(output1, output2)
class ExampleWrapper(keras.layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, *args, **kwargs):
return self.layer(inputs, *args, **kwargs)
class WrapperTest(parameterized.TestCase):
def test_wrapper_from_config_no_mutation(self):
wrapper = ExampleWrapper(keras.layers.Dense(1))
config = wrapper.get_config()
config_copy = config.copy()
self.assertEqual(config, config_copy)
wrapper_from_config = ExampleWrapper.from_config(config)
new_config = wrapper.get_config()
self.assertEqual(new_config, config_copy)
self.assertEqual(config, config_copy)
def _to_list(ls):
if isinstance(ls, list):
return ls
else:
return [ls]
def convert_ragged_tensor_value(inputs):
if isinstance(inputs, tf.compat.v1.ragged.RaggedTensorValue):
flat_values = tf.convert_to_tensor(
value=inputs.flat_values,
name='flat_values')
return tf.RaggedTensor.from_nested_row_splits(
flat_values, inputs.nested_row_splits, validate=False)
return inputs
if __name__ == '__main__':
tf.test.main()
| 51,716 | 36.476087 | 80 | py |
keras | keras-master/keras/layers/einsum_dense_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based einsum dense layer."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from keras import testing_utils
from keras.layers import einsum_dense
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
"testcase_name": "_1d_end_weight",
"equation": "ab,b->a",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": [],
"expected_weight_shape": [32],
"expected_bias_shape": None,
"expected_output_shape": (None,)
}, {
"testcase_name": "_2d_middle_weight",
"equation": "ab,bc->ac",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 64)
}, {
"testcase_name": "_3d_bert",
"equation": "abc,cde->abde",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [4],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_2_bias",
"equation": "abc,cde->abde",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 1],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_1_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "be",
"input_shape": (None, 7, 2),
"output_shape": (7, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [7, 1, 4],
"expected_output_shape": (None, 7, 3, 4)
}, {
"testcase_name": "_3d_bert_projection",
"equation": "BFNH,NHD->BFD",
"bias_axes": None,
"input_shape": (None, 1, 2, 3),
"output_shape": (1, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 4)
}, {
"testcase_name": "_2d_bert",
"equation": "abc,cd->abd",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (1, 4),
"expected_weight_shape": [2, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 4)
}, {
"testcase_name": "_embedding_1d",
"equation": "i,d->id",
"bias_axes": None,
"input_shape": (None,),
"output_shape": (2),
"expected_weight_shape": [2],
"expected_bias_shape": None,
"expected_output_shape": (None, 2)
}, {
"testcase_name": "_xlnet_lm",
"equation": "ibd,nd->ibn",
"bias_axes": None,
"input_shape": (None, None, 1),
"output_shape": (None, 2),
"expected_weight_shape": [2, 1],
"expected_bias_shape": None,
"expected_output_shape": (None, None, 2)
}, {
"testcase_name": "_2d_precast",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 64)
}, {
"testcase_name": "_2d_precast_multiple_elided_dims",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (None, None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, None, 64)
}, {
"testcase_name": "_3d_precast",
"equation": "...c,cde->...de",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_precast_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [4],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_precast_2_bias",
"equation": "...c,cde->...de",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 1],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_precast_2_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "de",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 4],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_2d_postcast",
"equation": "bc...,cd->bd...",
"bias_axes": None,
"input_shape": (None, 1, 2, 3),
"output_shape": (4),
"expected_weight_shape": [1, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 4, 2, 3)
}, {
"testcase_name": "_3d_postcast",
"equation": "bc...,cde->bde...",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 3, 4, 2)
}, {
"testcase_name": "_3d_postcast_1_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [3, 1, 1],
"expected_output_shape": (None, 3, 4, 2)
}, {
"testcase_name": "_3d_postcast_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [4, 1],
"expected_output_shape": (None, 3, 4, 2)
}, {
"testcase_name": "_3d_postcast_1_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "de",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [3, 4, 1],
"expected_output_shape": (None, 3, 4, 2)
})
class TestEinsumDenseLayer(keras_parameterized.TestCase):
def test_weight_shapes(self, equation, bias_axes, input_shape, output_shape,
expected_weight_shape, expected_bias_shape,
expected_output_shape):
del expected_output_shape # Not used in this test.
weight_shape, bias_shape, _ = einsum_dense._analyze_einsum_string(
equation, bias_axes, input_shape, output_shape)
self.assertAllEqual(expected_weight_shape, weight_shape)
self.assertAllEqual(expected_bias_shape, bias_shape)
def test_layer_creation(self, equation, bias_axes, input_shape, output_shape,
expected_weight_shape, expected_bias_shape,
expected_output_shape):
# Keras elides the 0-dimension of the input shape when constructing inputs.
non_batch_input_shape = list(input_shape)[1:]
input_tensor = keras.Input(shape=non_batch_input_shape)
layer = einsum_dense.EinsumDense(
equation=equation, output_shape=output_shape, bias_axes=bias_axes)
output_tensor = layer(input_tensor)
self.assertAllEqual(expected_weight_shape, layer.kernel.shape.as_list())
if expected_bias_shape is None:
self.assertIsNone(layer.bias)
else:
self.assertAllEqual(expected_bias_shape, layer.bias.shape.as_list())
self.assertAllEqual(expected_output_shape, output_tensor.shape.as_list())
@keras_parameterized.run_all_keras_modes
class TestEinsumLayerAPI(keras_parameterized.TestCase):
def test_layer_api(self):
input_data = np.array([[1.0, 2.0], [3.0, 4.0]])
kwargs = {
"equation": "...b,bc->...c",
"bias_axes": "c",
"output_shape": 4,
"bias_initializer": keras.initializers.constant(0.03),
"kernel_initializer": keras.initializers.constant(0.5),
"dtype": input_data.dtype
}
expected_output = np.array([[1.53, 1.53, 1.53, 1.53],
[3.53, 3.53, 3.53, 3.53]])
output_data = testing_utils.layer_test(
einsum_dense.EinsumDense,
kwargs=kwargs,
input_shape=(None, 2),
input_data=input_data)
self.assertAllClose(expected_output, output_data)
def test_unspecified_bias_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(
equation="ab,bc->ac", output_shape=64, bias_axes="y")
with self.assertRaisesRegex(
ValueError, ".*is not part of the output spec.*"):
_ = layer(input_tensor)
def test_incompatible_input_output_shape_fails(self):
input_tensor = keras.Input(shape=(32, 64))
layer = einsum_dense.EinsumDense(
equation="abc,cd->abd", output_shape=(10, 96))
with self.assertRaisesRegex(
ValueError, ".*Input shape and output shape do not match at shared "
"dimension 'b'.*"):
_ = layer(input_tensor)
def test_unspecified_output_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(equation="ab,bc->cd", output_shape=64)
with self.assertRaisesRegex(
ValueError, ".*Dimension 'd' was specified in the output 'cd' but has "
"no corresponding dim.*"):
_ = layer(input_tensor)
def test_unspecified_weight_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(equation="ab,zd->ad", output_shape=64)
with self.assertRaisesRegex(ValueError,
".*Weight dimension 'z' did not have a match "):
_ = layer(input_tensor)
if __name__ == "__main__":
tf.test.main()
| 11,282 | 35.047923 | 83 | py |
keras | keras-master/keras/layers/kernelized.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Keras layers that implement explicit (approximate) kernel feature maps."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import initializers
from keras.engine import base_layer
from keras.engine import input_spec
from tensorflow.python.util.tf_export import keras_export
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
@keras_export('keras.layers.experimental.RandomFourierFeatures')
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that projects its inputs into a random feature space.
This layer implements a mapping from input space to a space with `output_dim`
dimensions, which approximates shift-invariant kernels. A kernel function
`K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`.
Many popular Radial Basis Functions (RBF), including Gaussian and
Laplacian kernels, are shift-invariant.
The implementation of this layer is based on the following paper:
["Random Features for Large-Scale Kernel Machines"](
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
by Ali Rahimi and Ben Recht.
The distribution from which the parameters of the random features map (layer)
are sampled determines which shift-invariant kernel the layer approximates
(see paper for more details). You can use the distribution of your
choice. The layer supports out-of-the-box
approximations of the following two RBF kernels:
- Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
- Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
**Note:** Unlike what is described in the paper and unlike what is used in
the Scikit-Learn implementation, the output of this layer does not apply
the `sqrt(2 / D)` normalization factor.
**Usage:** Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss), etc.
Examples:
A kernel multinomial logistic regression model with Gaussian kernel for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy']
)
```
A quasi-SVM classifier for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10),
])
model.compile(
optimizer='adam',
loss='hinge',
metrics=['categorical_accuracy']
)
```
To use another kernel, just replace the layer creation line with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Args:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string identifier or a Keras `Initializer` instance.
Currently only 'gaussian' and 'laplacian' are supported string
identifiers (case insensitive). Note that the kernel matrix is not
trainable.
scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
a default value is used: if the kernel initializer is set to "gaussian",
`scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0.
Both the approximation error of the kernel and the classification quality
are sensitive to this parameter. If `trainable` is set to `True`, this
parameter is learned end-to-end during training and the provided value
serves as the initial value.
**Note:** When features from this layer are fed to a linear model,
by making `scale` trainable, the resulting optimization problem is
no longer convex (even if the loss function used by the linear model
is convex).
trainable: Whether the scaling parameter of the layer should be trainable.
Defaults to `False`.
name: String, name to use for this layer.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
f'`output_dim` should be a positive integer. Received: {output_dim}')
if isinstance(kernel_initializer, str):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
f'Unsupported `kernel_initializer`: {kernel_initializer} '
f'Expected one of: {_SUPPORTED_RBF_KERNEL_TYPES}')
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
f'Received: {scale}')
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
# TODO(pmol): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. '
f'Received input with rank {input_shape.ndims} instead. '
f'Full input shape received: {input_shape}')
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the input tensor should be defined. '
f'Found `None`. Full input shape received: {input_shape}')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
self.unscaled_kernel = self.add_weight(
name='unscaled_kernel',
shape=(input_dim, self.output_dim),
dtype=tf.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='bias',
shape=(self.output_dim,),
dtype=tf.float32,
initializer=tf.compat.v1.random_uniform_initializer(
minval=0.0, maxval=2 * np.pi, dtype=tf.float32),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
self.kernel_scale = self.add_weight(
name='kernel_scale',
shape=(1,),
dtype=tf.float32,
initializer=tf.compat.v1.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
inputs = tf.cast(inputs, tf.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = tf.raw_ops.MatMul(a=inputs, b=kernel)
outputs = tf.nn.bias_add(outputs, self.bias)
return tf.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The last dimension of the input tensor should be defined. '
f'Found `None`. Full input shape received: {input_shape}')
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if not isinstance(kernel_initializer, str):
kernel_initializer = initializers.serialize(kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, str):
if initializer.lower() == 'gaussian':
random_features_initializer = tf.compat.v1.random_normal_initializer(
stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = tf.compat.v1.constant_initializer(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
f'Unsupported `kernel_initializer`: "{initializer}" '
f'Expected one of: {_SUPPORTED_RBF_KERNEL_TYPES}')
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, str) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
| 10,673 | 38.828358 | 80 | py |
keras | keras-master/keras/layers/gru_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU layer."""
import tensorflow.compat.v2 as tf
import copy
from absl.testing import parameterized
import numpy as np
import keras
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.utils import np_utils
@keras_parameterized.run_all_keras_modes
class GRULayerTest(keras_parameterized.TestCase):
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Double type is not yet supported in ROCm')
@testing_utils.run_v2_only
def test_float64_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_recurrent_dropout_with_implementation_restriction(self):
layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2)
# The implementation is force to 1 due to the limit of recurrent_dropout.
self.assertEqual(layer.implementation, 1)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_reset_after_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=num_samples,
test_samples=0,
input_shape=(timesteps, embedding_dim),
num_classes=units)
y_train = np_utils.to_categorical(y_train, units)
inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
gru_layer = keras.layers.GRU(units,
reset_after=True)
output = gru_layer(inputs)
gru_model = keras.models.Model(inputs, output)
gru_model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
gru_model.fit(x_train, y_train)
gru_model.predict(x_train)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='MIOpen only supports packed input output')
def test_with_masking_layer_GRU(self):
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='MIOpen only supports packed input output')
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.GRUCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
_, state = cell(np.ones((batch_size, 20), dtype=np.float32), initial_state)
self.assertEqual(state.shape, initial_state.shape)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class GRULayerGenericTest(tf.test.TestCase):
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_from_config_GRU(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_deep_copy_GRU(self):
cell = keras.layers.GRUCell(5)
copied_cell = copy.deepcopy(cell)
self.assertEqual(copied_cell.units, 5)
self.assertEqual(cell.get_config(), copied_cell.get_config())
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
if __name__ == '__main__':
tf.test.main()
| 9,543 | 32.370629 | 80 | py |
keras | keras-master/keras/layers/embeddings_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for embedding layers."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
import keras
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.mixed_precision import policy
import numpy as np
import tensorflow.compat.v2 as tf
class EmbeddingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_embedding(self):
if tf.test.is_gpu_available():
self.skipTest('Only test embedding on CPU.')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'input_length': 2},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True},
input_shape=(3, 4, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True,
'input_length': (None, 2)},
input_shape=(3, 4, 2),
input_dtype='int32',
expected_output_dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_embedding_correctness(self):
layer = keras.layers.Embedding(output_dim=2, input_dim=2)
model = keras.models.Sequential([layer])
layer.set_weights([np.array([[1, 1], [2, 2]])])
model.run_eagerly = testing_utils.should_run_eagerly()
outputs = model.predict(np.array([[0, 1, 0]], dtype='int32'))
self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]])
def test_embedding_incorrect_dimension(self):
with self.assertRaises(ValueError):
keras.layers.Embedding(input_dim=0, output_dim=1)
with self.assertRaises(ValueError):
keras.layers.Embedding(input_dim=1, output_dim=0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_eager_gpu_cpu(self):
l = keras.layers.Embedding(output_dim=2, input_dim=2)
l.build((None, 2))
inputs = keras.backend.constant([[0, 1, 0]], dtype='int32')
with tf.GradientTape() as tape:
output = l(inputs)
gs = tape.gradient(output, l.weights)
opt = tf.compat.v1.train.AdagradOptimizer(0.1)
opt.apply_gradients(zip(gs, l.weights))
self.assertAllEqual(len(gs), 1)
@keras_parameterized.run_all_keras_modes
def test_embedding_with_ragged_input(self):
layer = keras.layers.Embedding(
input_dim=3,
output_dim=2,
weights=[np.array([[0., 0.], [1., 1.], [2., 2.]])])
inputs = keras.layers.Input(
shape=(None,), dtype=tf.float32, ragged=True)
# pylint: disable=unnecessary-lambda
outputs = keras.layers.Lambda(lambda args: keras.backend.identity(args))(
inputs)
# pylint: enable=unnecessary-lambda
outputs = layer(outputs)
model = keras.Model(inputs, outputs)
model.run_eagerly = testing_utils.should_run_eagerly()
outputs = model.predict(
tf.ragged.constant([[1., 2., 2.], [0.], [1., 2.]],
ragged_rank=1))
self.assertAllClose(
outputs,
tf.ragged.constant(
[[[1., 1.], [2., 2.], [2., 2.]], [[0., 0.]], [[1., 1.], [2., 2.]]],
ragged_rank=1))
@testing_utils.enable_v2_dtype_behavior
def test_mixed_precision_embedding(self):
try:
policy.set_policy('mixed_float16')
layer = keras.layers.Embedding(input_dim=5, output_dim=2)
self.assertEqual(layer._dtype_policy.name, 'mixed_float16')
outputs = layer(np.array([0, 1, 2]))
self.assertEqual(outputs.dtype, 'float16')
finally:
policy.set_policy('float32')
if __name__ == '__main__':
tf.test.main()
| 4,863 | 33.992806 | 80 | py |
keras | keras-master/keras/layers/serialization_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer serialization utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras import combinations
from keras.layers import recurrent as rnn_v1
from keras.layers import recurrent_v2 as rnn_v2
from keras.layers.normalization import batch_normalization as batchnorm_v2
from keras.layers.normalization import batch_normalization_v1 as batchnorm_v1
class SerializableInt(int):
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LayerSerializationTest(parameterized.TestCase, tf.test.TestCase):
def test_serialize_deserialize(self):
layer = keras.layers.Dense(
3, activation='relu', kernel_initializer='ones', bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L2)
if tf.__internal__.tf2.enabled():
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.OnesV2)
else:
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.Ones)
self.assertEqual(new_layer.units, 3)
def test_implicit_serialize_deserialize_fails_without_object(self):
layer = keras.layers.Dense(
SerializableInt(3),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
# Because we're passing an unknown class here, deserialization should fail
# unless we add SerializableInt to the custom object dict.
with self.assertRaisesRegex(ValueError,
'Unknown config_item: SerializableInt.*'):
_ = keras.layers.deserialize(config)
def test_implicit_serialize_deserialize_succeeds_with_object(self):
layer = keras.layers.Dense(
SerializableInt(3),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
# Because we're passing an unknown class here, deserialization should fail
# unless we add SerializableInt to the custom object dict.
new_layer = keras.layers.deserialize(
config, custom_objects={'SerializableInt': SerializableInt})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L2)
if tf.__internal__.tf2.enabled():
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.OnesV2)
else:
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.Ones)
self.assertEqual(new_layer.units.__class__, SerializableInt)
self.assertEqual(new_layer.units, 3)
@parameterized.parameters(
[batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization])
def test_serialize_deserialize_batchnorm(self, batchnorm_layer):
layer = batchnorm_layer(
momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
config = keras.layers.serialize(layer)
self.assertEqual(config['class_name'], 'BatchNormalization')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.momentum, 0.9)
if tf.__internal__.tf2.enabled():
self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.ZerosV2)
else:
self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.Zeros)
self.assertEqual(new_layer.gamma_regularizer.__class__,
keras.regularizers.L2)
@parameterized.parameters(
[batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization])
def test_deserialize_batchnorm_backwards_compatibility(self, batchnorm_layer):
layer = batchnorm_layer(
momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.momentum, 0.9)
if tf.__internal__.tf2.enabled():
self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.ZerosV2)
else:
self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.Zeros)
self.assertEqual(new_layer.gamma_regularizer.__class__,
keras.regularizers.L2)
@parameterized.parameters([rnn_v1.LSTM, rnn_v2.LSTM])
def test_serialize_deserialize_lstm(self, layer):
lstm = layer(5, return_sequences=True)
config = keras.layers.serialize(lstm)
self.assertEqual(config['class_name'], 'LSTM')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf.__internal__.tf2.enabled():
self.assertIsInstance(new_layer, rnn_v2.LSTM)
else:
self.assertIsInstance(new_layer, rnn_v1.LSTM)
self.assertNotIsInstance(new_layer, rnn_v2.LSTM)
@parameterized.parameters([rnn_v1.GRU, rnn_v2.GRU])
def test_serialize_deserialize_gru(self, layer):
gru = layer(5, return_sequences=True)
config = keras.layers.serialize(gru)
self.assertEqual(config['class_name'], 'GRU')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf.__internal__.tf2.enabled():
self.assertIsInstance(new_layer, rnn_v2.GRU)
else:
self.assertIsInstance(new_layer, rnn_v1.GRU)
self.assertNotIsInstance(new_layer, rnn_v2.GRU)
if __name__ == '__main__':
tf.test.main()
| 6,998 | 40.91018 | 80 | py |
keras | keras-master/keras/layers/separable_convolutional_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for separable convolutional layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
@keras_parameterized.run_all_keras_modes
class SeparableConv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session():
testing_utils.layer_test(
keras.layers.SeparableConv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('padding_causal', {'padding': 'causal'}),
('strides', {'strides': 2}),
('dilation_rate', {'dilation_rate': 2}),
('depth_multiplier', {'depth_multiplier': 2}),
)
def test_separable_conv1d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
self._run_test(kwargs)
def test_separable_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session():
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv1d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session():
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class SeparableConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session():
testing_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('strides', {'strides': 2}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('dilation_rate', {'dilation_rate': 2}),
('depth_multiplier', {'depth_multiplier': 2}),
)
def test_separable_conv2d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
if 'data_format' not in kwargs or tf.test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_separable_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session():
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv2d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session():
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
| 5,545 | 32.612121 | 80 | py |
keras | keras-master/keras/layers/serialization.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer serialization/deserialization functions."""
import tensorflow.compat.v2 as tf
# pylint: disable=wildcard-import
# pylint: disable=unused-import
import threading
from keras.engine import base_layer
from keras.engine import input_layer
from keras.engine import input_spec
from keras.layers import advanced_activations
from keras.layers import convolutional
from keras.layers import convolutional_recurrent
from keras.layers import core
from keras.layers import cudnn_recurrent
from keras.layers import dense_attention
from keras.layers import einsum_dense
from keras.layers import embeddings
from keras.layers import local
from keras.layers import merge
from keras.layers import multi_head_attention
from keras.layers import noise
from keras.layers import pooling
from keras.layers import recurrent
from keras.layers import recurrent_v2
from keras.layers import rnn_cell_wrapper_v2
from keras.layers import wrappers
from keras.layers.normalization import batch_normalization
from keras.layers.normalization import batch_normalization_v1
from keras.layers.normalization import layer_normalization
from keras.layers.preprocessing import category_crossing
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import discretization
from keras.layers.preprocessing import hashing
from keras.layers.preprocessing import image_preprocessing
from keras.layers.preprocessing import integer_lookup
from keras.layers.preprocessing import normalization as preprocessing_normalization
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing import text_vectorization
from keras.utils import generic_utils
from keras.utils import tf_inspect as inspect
from tensorflow.python.util.tf_export import keras_export
ALL_MODULES = (base_layer, input_layer, advanced_activations, convolutional,
convolutional_recurrent, core, cudnn_recurrent, dense_attention,
embeddings, einsum_dense, local, merge, noise,
batch_normalization_v1, layer_normalization, pooling,
image_preprocessing, recurrent, wrappers, hashing,
category_crossing, category_encoding, discretization,
multi_head_attention, integer_lookup,
preprocessing_normalization, string_lookup, text_vectorization)
ALL_V2_MODULES = (rnn_cell_wrapper_v2, batch_normalization, layer_normalization,
recurrent_v2)
# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in layer."""
global LOCAL
if not hasattr(LOCAL, 'ALL_OBJECTS'):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled(
):
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()
base_cls = base_layer.Layer
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
# Overwrite certain V1 objects with V2 versions
if tf.__internal__.tf2.enabled():
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_V2_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
# These deserialization aliases are added for backward compatibility,
# as in TF 1.13, "BatchNormalizationV1" and "BatchNormalizationV2"
# were used as class name for v1 and v2 version of BatchNormalization,
# respectively. Here we explicitly convert them to their canonical names.
LOCAL.ALL_OBJECTS[
'BatchNormalizationV1'] = batch_normalization_v1.BatchNormalization
LOCAL.ALL_OBJECTS[
'BatchNormalizationV2'] = batch_normalization.BatchNormalization
# Prevent circular dependencies.
from keras import models # pylint: disable=g-import-not-at-top
from keras.premade.linear import LinearModel # pylint: disable=g-import-not-at-top
from keras.premade.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top
from keras.feature_column.sequence_feature_column import SequenceFeatures # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['Input'] = input_layer.Input
LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec
LOCAL.ALL_OBJECTS['Functional'] = models.Functional
LOCAL.ALL_OBJECTS['Model'] = models.Model
LOCAL.ALL_OBJECTS['SequenceFeatures'] = SequenceFeatures
LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential
LOCAL.ALL_OBJECTS['LinearModel'] = LinearModel
LOCAL.ALL_OBJECTS['WideDeepModel'] = WideDeepModel
if tf.__internal__.tf2.enabled():
from keras.feature_column.dense_features_v2 import DenseFeatures # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures
else:
from keras.feature_column.dense_features import DenseFeatures # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures
# Merge layers, function versions.
LOCAL.ALL_OBJECTS['add'] = merge.add
LOCAL.ALL_OBJECTS['subtract'] = merge.subtract
LOCAL.ALL_OBJECTS['multiply'] = merge.multiply
LOCAL.ALL_OBJECTS['average'] = merge.average
LOCAL.ALL_OBJECTS['maximum'] = merge.maximum
LOCAL.ALL_OBJECTS['minimum'] = merge.minimum
LOCAL.ALL_OBJECTS['concatenate'] = merge.concatenate
LOCAL.ALL_OBJECTS['dot'] = merge.dot
@keras_export('keras.layers.serialize')
def serialize(layer):
"""Serializes a `Layer` object into a JSON-compatible representation.
Args:
layer: The `Layer` object to serialize.
Returns:
A JSON-serializable dict representing the object's config.
Example:
```python
from pprint import pprint
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(16,)))
model.add(tf.keras.layers.Dense(32, activation='relu'))
pprint(tf.keras.layers.serialize(model))
# prints the configuration of the model, as a dict.
"""
return generic_utils.serialize_keras_object(layer)
@keras_export('keras.layers.deserialize')
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Args:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names) of custom
(non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
Example:
```python
# Configuration of Dense(32, activation='relu')
config = {
'class_name': 'Dense',
'config': {
'activation': 'relu',
'activity_regularizer': None,
'bias_constraint': None,
'bias_initializer': {'class_name': 'Zeros', 'config': {}},
'bias_regularizer': None,
'dtype': 'float32',
'kernel_constraint': None,
'kernel_initializer': {'class_name': 'GlorotUniform',
'config': {'seed': None}},
'kernel_regularizer': None,
'name': 'dense',
'trainable': True,
'units': 32,
'use_bias': True
}
}
dense_layer = tf.keras.layers.deserialize(config)
```
"""
populate_deserializable_objects()
return generic_utils.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name='layer')
| 8,241 | 37.877358 | 114 | py |
keras | keras-master/keras/layers/merge_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for merge layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import backend
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
@keras_parameterized.run_all_keras_modes
class MergeLayersTest(keras_parameterized.TestCase):
def test_merge_add(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
add_layer = keras.layers.Add()
o = add_layer([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
self.assertEqual(
add_layer.compute_mask([i1, i2, i3], [None, None, None]), None)
self.assertTrue(
np.all(
backend.eval(
add_layer.compute_mask(
[i1, i2], [backend.variable(x1), backend.variable(x2)]))))
with self.assertRaisesRegex(ValueError, '`mask` should be a list.'):
add_layer.compute_mask([i1, i2, i3], x1)
with self.assertRaisesRegex(ValueError, '`inputs` should be a list.'):
add_layer.compute_mask(i1, [None, None, None])
with self.assertRaisesRegex(ValueError, ' should have the same length.'):
add_layer.compute_mask([i1, i2, i3], [None, None])
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
subtract_layer = keras.layers.Subtract()
o = subtract_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 - x2, atol=1e-4)
self.assertEqual(subtract_layer.compute_mask([i1, i2], [None, None]), None)
self.assertTrue(
np.all(
backend.eval(
subtract_layer.compute_mask(
[i1, i2], [backend.variable(x1), backend.variable(x2)]))))
with self.assertRaisesRegex(ValueError, '`mask` should be a list.'):
subtract_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegex(ValueError, '`inputs` should be a list.'):
subtract_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegex(ValueError,
'layer should be called on exactly 2 inputs'):
subtract_layer([i1, i2, i3])
with self.assertRaisesRegex(ValueError,
'layer should be called on exactly 2 inputs'):
subtract_layer([i1])
def test_merge_multiply(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.multiply([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
def test_merge_average(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.average([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
def test_merge_maximum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.maximum([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
def test_merge_minimum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.minimum([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4)
def test_merge_concatenate(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
concat_layer = keras.layers.Concatenate(axis=1)
o = concat_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
self.assertEqual(concat_layer.compute_mask([i1, i2], [None, None]), None)
self.assertTrue(
np.all(
backend.eval(
concat_layer.compute_mask(
[i1, i2], [backend.variable(x1), backend.variable(x2)]))))
# Should work with unit-length input.
unit_length_o = concat_layer([i1])
self.assertListEqual(unit_length_o.shape.as_list(), i1.shape.as_list())
with self.assertRaisesRegex(ValueError, '`mask` should be a list.'):
concat_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegex(ValueError, '`inputs` should be a list.'):
concat_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegex(ValueError, 'should have the same length'):
concat_layer.compute_mask([i1, i2], [None])
with self.assertRaisesRegex(ValueError,
'layer should be called on a list of inputs'):
concat_layer(i1)
def test_merge_dot(self):
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
o = keras.layers.dot([i1, i2], axes=1)
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
_ = keras.layers.Dot(axes=1).get_config()
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
self.assertAllClose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = keras.layers.dot([i1, i2], axes=(-1, -1))
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)
# test compute_output_shape
layer = keras.layers.Dot(axes=-1)
self.assertEqual(layer.compute_output_shape([(4, 5), (4, 5)]), (4, 1))
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
layer=[keras.layers.Add, keras.layers.Subtract,
keras.layers.Multiply, keras.layers.Minimum,
keras.layers.Maximum, keras.layers.Average,
keras.layers.Concatenate]))
def test_merge_with_ragged_input(self, layer):
ragged_data = tf.ragged.constant(
[[1., 1., 1.], [1., 1.], [1., 1., 1., 1.]], ragged_rank=1)
dense_data = ragged_data.to_tensor()
input1 = keras.Input(shape=(None,), ragged=True)
input2 = keras.Input(shape=(None,), ragged=True)
out = keras.layers.Add()([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=out)
out_ragged = model.predict([ragged_data, ragged_data], steps=1)
out_ragged = convert_ragged_tensor_value(out_ragged).to_tensor()
input1 = keras.Input(shape=(None,))
input2 = keras.Input(shape=(None,))
out = keras.layers.Add()([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=out)
out_dense = model.predict([dense_data, dense_data], steps=1)
self.assertAllEqual(out_dense, out_ragged)
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
layer=[keras.layers.Add, keras.layers.Subtract,
keras.layers.Multiply, keras.layers.Minimum,
keras.layers.Maximum, keras.layers.Average]))
def test_merge_with_scalar_input(self, layer):
x1 = np.array((1))
x2 = np.array((2))
out = layer()([x1, x2])
self.assertEqual(out.shape, ())
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MergeLayersTestNoExecution(tf.test.TestCase):
def test_merge_elementwise_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.add([i1, i2])
with self.assertRaises(ValueError):
keras.layers.add([i1])
with self.assertRaises(ValueError):
keras.layers.add(i1)
with self.assertRaises(ValueError):
keras.layers.add([i1])
def test_concatenate_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i2], axis=-1)
with self.assertRaisesRegex(ValueError, 'called on a list'):
keras.layers.concatenate(i1, axis=-1)
def test_concatenate_with_partial_shape(self):
i1 = keras.layers.Input(shape=(5,), batch_size=32)
i2 = keras.layers.Input(shape=(5,))
i3 = keras.layers.Input(shape=(4, 5), batch_size=32)
i4 = keras.layers.Input(shape=(None,), batch_size=64)
i5 = keras.layers.Input(shape=(7,))
# Valid case since the i2 has a dynamic batch size.
keras.layers.concatenate([i1, i2], axis=-1)
# Different rank
with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i3], axis=-1)
# Valid case with partial dimension information
keras.layers.concatenate([i1, i4], axis=0)
keras.layers.concatenate([i2, i4], axis=0)
keras.layers.concatenate([i2, i4], axis=1)
keras.layers.concatenate([i1, i2, i4], axis=0)
keras.layers.concatenate([i1, i5], axis=1)
# Mismatch in batch dimension.
with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i4], axis=-1)
with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i2, i4], axis=-1)
def test_dot_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
i3 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot(i1, axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2, i3], axes=-1)
with self.assertRaises(ValueError):
dot = keras.layers.Dot(1)
dot.compute_output_shape(1)
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
y = keras.layers.subtract([i1, i2])
self.assertEqual(y.shape.as_list(), [None, 4, 5])
# Test invalid use cases
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i2])
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i1, i1])
def test_merge_add_masking(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Add()
o = layer([m1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
mask = layer.output_mask
self.assertListEqual(mask.shape.as_list(), [None, 4])
def test_merge_add_dynamic_shape(self):
i1 = keras.Input(batch_shape=(4, None), dtype='float32')
i2 = keras.Input(batch_shape=(4, 5), dtype='float32')
layer = keras.layers.Add()
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [4, 5])
def test_merge_concatenate_masking(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Concatenate()
o = layer([m1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 10])
mask = layer.output_mask
self.assertListEqual(mask.shape.as_list(), [None, 4])
def test_merge_concatenate_sparse_shape(self):
i1 = keras.layers.Input(shape=(1,), batch_size=2, sparse=True)
i2 = keras.layers.Input(shape=(2,), batch_size=2, sparse=True)
layer = keras.layers.Concatenate(axis=1)
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [2, 3])
# Make sure it also respect None as the batch size
i1 = keras.layers.Input(shape=(1,), sparse=True)
i2 = keras.layers.Input(shape=(2,), sparse=True)
layer = keras.layers.Concatenate(axis=1)
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 3])
def test_user_changes_to_input_structure(self):
a = keras.layers.Input(shape=(4, 5))
struct = [a, a]
concat1 = keras.layers.Concatenate(1)
b = concat1(struct)
struct.append(b)
concat2 = keras.layers.Concatenate(1)
c = concat2(struct)
# Checks that the append to `struct` doesn't affect `concat1`s
# node data.
self.assertLen(concat1.inbound_nodes[0].input_tensors, 2)
self.assertLen(concat2.inbound_nodes[0].input_tensors, 3)
keras.Model(a, c) # Ensure model can be built.
def convert_ragged_tensor_value(inputs):
if isinstance(inputs, tf.compat.v1.ragged.RaggedTensorValue):
flat_values = tf.convert_to_tensor(
value=inputs.flat_values,
name='flat_values')
return tf.RaggedTensor.from_nested_row_splits(
flat_values, inputs.nested_row_splits, validate=False)
return inputs
if __name__ == '__main__':
tf.test.main()
| 15,914 | 37.535109 | 80 | py |
keras | keras-master/keras/layers/core/dense.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Dense layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import activations
from keras import backend as K
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`). These are all attributes of
`Dense`.
Note: If the input to the layer has a rank greater than 2, then `Dense`
computes the dot product between the `inputs` and the `kernel` along the
last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).
For example, if input has dimensions `(batch_size, d0, d1)`,
then we create a `kernel` with shape `(d1, units)`, and the `kernel` operates
along axis 2 of the `input`, on every sub-tensor of shape `(1, 1, d1)`
(there are `batch_size * d0` such sub-tensors).
The output in this case will have shape `(batch_size, d0, units)`.
Besides, layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
When a popular kwarg `input_shape` is passed, then keras will create
an input layer to insert before the current layer. This can be treated
equivalent to explicitly defining an `InputLayer`.
Example:
>>> # Create a `Sequential` model and add a Dense layer as the first layer.
>>> model = tf.keras.models.Sequential()
>>> model.add(tf.keras.Input(shape=(16,)))
>>> model.add(tf.keras.layers.Dense(32, activation='relu'))
>>> # Now the model will take as input arrays of shape (None, 16)
>>> # and output arrays of shape (None, 32).
>>> # Note that after the first layer, you don't need to specify
>>> # the size of the input anymore:
>>> model.add(tf.keras.layers.Dense(32))
>>> model.output_shape
(None, 32)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Dense, self).__init__(
activity_regularizer=activity_regularizer, **kwargs)
self.units = int(units) if not isinstance(units, int) else units
if self.units < 0:
raise ValueError(f'Received an invalid value for `units`, expected '
f'a positive integer. Received: units={units}')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
dtype = tf.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('A Dense layer can only be built with a floating-point '
f'dtype. Received: dtype={dtype}')
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError('The last dimension of the inputs to a Dense layer '
'should be defined. Found None. '
f'Full input shape received: {input_shape}')
self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:
inputs = tf.cast(inputs, dtype=self._compute_dtype_object)
if isinstance(inputs, tf.RaggedTensor) and inputs.shape[-1] is not None:
# In case we encounter a RaggedTensor with a fixed last dimension (last
# dimension not ragged), we can map the call method to the flat values.
return tf.ragged.map_flat_values(self.call, inputs)
rank = inputs.shape.rank
if rank == 2 or rank is None:
# We use embedding_lookup_sparse as a more efficient matmul operation for
# large sparse input tensors. The op will result in a sparse gradient, as
# opposed to sparse_ops.sparse_tensor_dense_matmul which results in dense
# gradients. This can lead to sigfinicant speedups, see b/171762937.
if isinstance(inputs, tf.SparseTensor):
# We need to fill empty rows, as the op assumes at least one id per row.
inputs, _ = tf.sparse.fill_empty_rows(inputs, 0)
# We need to do some munging of our input to use the embedding lookup as
# a matrix multiply. We split our input matrix into separate ids and
# weights tensors. The values of the ids tensor should be the column
# indices of our input matrix and the values of the weights tensor
# can continue to the actual matrix weights.
# The column arrangement of ids and weights
# will be summed over and does not matter. See the documentation for
# sparse_ops.sparse_tensor_dense_matmul a more detailed explanation
# of the inputs to both ops.
ids = tf.SparseTensor(
indices=inputs.indices,
values=inputs.indices[:, 1],
dense_shape=inputs.dense_shape)
weights = inputs
outputs = tf.nn.embedding_lookup_sparse(
self.kernel, ids, weights, combiner='sum')
else:
outputs = tf.raw_ops.MatMul(a=inputs, b=self.kernel)
# Broadcast kernel to inputs.
else:
outputs = tf.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.kernel.shape[-1]]
outputs.set_shape(output_shape)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
outputs = self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the input shape of a Dense layer '
'should be defined. Found None. '
f'Received: input_shape={input_shape}')
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = super(Dense, self).get_config()
config.update({
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
})
return config
| 10,585 | 42.925311 | 80 | py |
keras | keras-master/keras/layers/core/activation.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Activation layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import activations
from keras.engine.base_layer import Layer
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Activation')
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu".
Usage:
>>> layer = tf.keras.layers.Activation('relu')
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
>>> layer = tf.keras.layers.Activation(tf.nn.relu)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 2,242 | 32.477612 | 80 | py |
keras | keras-master/keras/layers/core/tf_op_layer.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the TFOpLambda layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import,g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from keras import backend as K
from keras.engine import keras_tensor
from keras.engine.base_layer import Layer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
from tensorflow.python.util.tf_export import get_symbol_from_name
class ClassMethod(Layer):
"""Wraps a TF API Class's class method in a `Layer` object.
It is inserted by the Functional API construction whenever users call
a supported TF Class's class method on KerasTensors.
This is useful in the case where users do something like:
x = keras.Input(...)
y = keras.Input(...)
out = tf.RaggedTensor.from_row_splits(x, y)
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, cls_ref, method_name, **kwargs):
self.cls_ref = cls_ref
self.method_name = method_name
self.cls_symbol = (
get_canonical_name_for_symbol(
self.cls_ref, add_prefix_to_v1_names=True) or
get_canonical_name_for_symbol(
self.cls_ref, api_name='keras', add_prefix_to_v1_names=True))
if 'name' not in kwargs:
kwargs['name'] = K.unique_object_name(
'tf.' + self.cls_symbol + '.' + self.method_name,
zero_based=True,
avoid_observed_names=True)
kwargs['autocast'] = False
# Do not individually trace op layers in the SavedModel.
self._must_restore_from_config = True
super(ClassMethod, self).__init__(**kwargs)
# Preserve all argument data structures when saving/loading a config
# (e.g., don't unnest lists that contain one element)
self._preserve_input_structure_in_config = True
self._expects_training_arg = False
self._expects_mask_arg = False
def call(self, args, kwargs):
return getattr(self.cls_ref, self.method_name)(*args, **kwargs)
def get_config(self):
if not self.cls_symbol:
raise ValueError(
'This Keras class method conversion tried to convert '
f'a method belonging to class {self.cls_symbol}, a class '
'that is not publicly exposed in the TensorFlow API. '
'To ensure cross-version compatibility of Keras models '
'that use op layers, only op layers produced from '
'public TensorFlow API symbols can be serialized.')
config = {'cls_symbol': self.cls_symbol, 'method_name': self.method_name}
base_config = super(ClassMethod, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
symbol_name = config.pop('cls_symbol')
cls_ref = get_symbol_from_name(symbol_name)
if not cls_ref:
raise ValueError(f'TensorFlow symbol `{symbol_name}` could not be found.')
config['cls_ref'] = cls_ref
return cls(**config)
class KerasOpDispatcher(tf.__internal__.dispatch.GlobalOpDispatcher):
"""A global dispatcher that allows building a functional model with TF Ops."""
def handle(self, op, args, kwargs):
"""Handle the specified operation with the specified arguments."""
if any(
isinstance(x, keras_tensor.KerasTensor)
for x in tf.nest.flatten([args, kwargs])):
return TFOpLambda(op)(*args, **kwargs)
else:
return self.NOT_SUPPORTED
KerasOpDispatcher().register()
class InstanceProperty(Layer):
"""Wraps an instance property access (e.g.
`x.foo`) in a Keras Layer.
This layer takes an attribute name `attr_name` in the constructor and,
when called on input tensor `obj` returns `obj.attr_name`.
KerasTensors specialized for specific extension types use it to
represent instance property accesses on the represented object in the
case where the property needs to be dynamically accessed as opposed to
being statically computed from the typespec, e.g.
x = keras.Input(..., ragged=True)
out = x.flat_values
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
if 'name' not in kwargs:
kwargs['name'] = K.unique_object_name(
'input.' + self.attr_name, zero_based=True, avoid_observed_names=True)
kwargs['autocast'] = False
# Do not individually trace op layers in the SavedModel.
self._must_restore_from_config = True
super(InstanceProperty, self).__init__(**kwargs)
# Preserve all argument data structures when saving/loading a config
# (e.g., don't unnest lists that contain one element)
self._preserve_input_structure_in_config = True
def call(self, obj):
return getattr(obj, self.attr_name)
def get_config(self):
config = {'attr_name': self.attr_name}
base_config = super(InstanceProperty, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
class InstanceMethod(InstanceProperty):
"""Wraps an instance method access (e.g. `x.foo(arg)` in a Keras Layer.
This layer takes an attribute name `attr_name` in the constructor and,
when called on input tensor `obj` with additional arguments `args` and
`kwargs` returns `obj.attr_name(*args, **kwargs)`.
KerasTensors specialized for specific extension types use it to
represent dynamic instance method calls on the represented object, e.g.
x = keras.Input(..., ragged=True)
new_values = keras.Input(...)
out = x.with_values(new_values)
"""
def call(self, obj, args, kwargs):
method = getattr(obj, self.attr_name)
return method(*args, **kwargs)
class TFOpLambda(Layer):
"""Wraps TF API symbols in a `Layer` object.
It is inserted by the Functional API construction whenever users call
a supported TF symbol on KerasTensors.
Like Lambda layers, this layer tries to raise warnings when it detects users
explicitly use variables in the call. (To let them know
that the layer will not capture the variables).
This is useful in the case where users do something like:
x = keras.Input(...)
y = tf.Variable(...)
out = x * tf_variable
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, function, **kwargs):
self.function = function
self.symbol = (
get_canonical_name_for_symbol(
self.function, add_prefix_to_v1_names=True) or
get_canonical_name_for_symbol(
self.function, api_name='keras', add_prefix_to_v1_names=True))
if 'name' not in kwargs:
# Generate a name.
# TFOpLambda layers avoid already-observed names,
# because users cannot easily control the generated names.
# Without this avoidance, users would be more likely to run
# into unavoidable duplicate layer name collisions.
# (For standard layers users could just set `name` when creating the
# layer to work around a collision, but they can't do that for
# auto-generated layers)
if self.symbol:
name = 'tf.' + self.symbol
else:
name = self.function.__name__
kwargs['name'] = K.unique_object_name(
name, zero_based=True, avoid_observed_names=True)
kwargs['autocast'] = False
# Decorate the function to produce this layer's call method
def _call_wrapper(*args, **kwargs):
return self._call_wrapper(*args, **kwargs)
self.call = tf.__internal__.decorator.make_decorator(
function, _call_wrapper)
# Do not individually trace op layers in the SavedModel.
self._must_restore_from_config = True
super(TFOpLambda, self).__init__(**kwargs)
# Preserve all argument data structures when saving/loading a config
# (e.g., don't unnest lists that contain one element)
self._preserve_input_structure_in_config = True
# Warning on every invocation will be quite irksome in Eager mode.
self._already_warned = False
self._expects_training_arg = False
self._expects_mask_arg = False
def _call_wrapper(self, *args, **kwargs):
created_variables = []
def _variable_creator(next_creator, **creator_kwargs):
var = next_creator(**creator_kwargs)
created_variables.append(var)
return var
with tf.GradientTape(watch_accessed_variables=True) as tape, \
tf.variable_creator_scope(_variable_creator):
# We explicitly drop `name` arguments here,
# to guard against the case where an op explicitly has a
# `name` passed (which is susceptible to producing
# multiple ops w/ the same name when the layer is reused)
kwargs.pop('name', None)
result = self.function(*args, **kwargs)
self._check_variables(created_variables, tape.watched_variables())
return result
def _check_variables(self, created_variables, accessed_variables):
if not created_variables and not accessed_variables:
# In the common case that a Lambda layer does not touch a Variable, we
# don't want to incur the runtime cost of assembling any state used for
# checking only to immediately discard it.
return
tracked_weights = set(v.ref() for v in self.weights)
untracked_new_vars = [
v for v in created_variables if v.ref() not in tracked_weights
]
if untracked_new_vars:
variable_str = '\n'.join(' {}'.format(i) for i in untracked_new_vars)
raise ValueError(
'The following Variables were created within a Lambda layer '
f'({self.name}) but are not tracked by said layer: {variable_str}\n'
'The layer cannot safely ensure proper Variable reuse '
'across multiple calls, and consquently this behavior is disallowed '
'for safety reasons. Lambda layers are not well suited for stateful '
'computation; instead, writing a subclassed Layer is the recommend '
'way to define layers with Variables.')
untracked_used_vars = [
v for v in accessed_variables if v.ref() not in tracked_weights
]
if untracked_used_vars and not self._already_warned:
variable_str = '\n'.join(' {}'.format(i) for i in untracked_used_vars)
self._warn(
'The following Variables were used in a Lambda layer\'s call '
f'({self.name}), but are not present in its tracked objects: '
f'{variable_str}. This is a strong indication that the Lambda layer '
'should be rewritten as a subclassed Layer.')
self._already_warned = True
def _warn(self, msg):
# This method will be overridden in a unit test to raise an error, because
# self.assertWarns is not universally implemented.
return tf_logging.warning(msg)
def get_config(self):
if not self.symbol:
raise ValueError(
f'This Keras op layer was generated from {self.function}, a method '
'that is not publicly exposed in the TensorFlow API. This '
'may have happened if the method was explicitly '
'decorated to add dispatching support, and it was used '
'during Functional model construction. '
'To ensure cross-version compatibility of Keras models '
'that use op layers, only op layers produced from '
'public TensorFlow API symbols can be serialized.')
config = {'function': self.symbol}
base_config = super(TFOpLambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
symbol_name = config['function']
function = get_symbol_from_name(symbol_name)
if not function:
raise ValueError(f'TF symbol `{symbol_name}` could not be found.')
config['function'] = function
return cls(**config)
def _delegate_property(keras_tensor_cls, property_name): # pylint: disable=invalid-name
"""Register property on a KerasTensor class.
Calling this multiple times with the same arguments should be a no-op.
This method exposes a property on the KerasTensor class that will use an
`InstanceProperty` layer to access the property on the represented
intermediate values in the model.
Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property.
property_name: The name of the property to expose and delegate to the
represented (Composite)Tensor.
"""
# We use a lambda because we can't create a Keras layer at import time
# due to dynamic layer class versioning.
property_access = property(lambda self: InstanceProperty(property_name)(self)) # pylint: disable=unnecessary-lambda
setattr(keras_tensor_cls, property_name, property_access)
def _delegate_method(keras_tensor_cls, method_name): # pylint: disable=invalid-name
"""Register method on a KerasTensor class.
Calling this function times with the same arguments should be a no-op.
This method exposes an instance method on the KerasTensor class that will use
an `InstanceMethod` layer to run the desired method on the represented
intermediate values in the model.
Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property.
method_name: The name of the method to expose and delegate to the
represented (Composite)Tensor.
"""
def delegate(self, *args, **kwargs):
return InstanceMethod(method_name)(self, args, kwargs)
setattr(keras_tensor_cls, method_name, delegate)
# We do not support the `uniform_row_length` property because it
# returns either `None` or an int tensor, and code that relies on it tends
# to check `is None` directly. Delegating it here would always return a
# `KerasTensor`, regardless of what can be statically inferred. This would
# never equal `None`, breaking code that expects it to be partially-static
# in unpredictable ways.
for ragged_property in [
'values', 'flat_values', 'row_splits', 'nested_row_splits'
]:
_delegate_property(keras_tensor.RaggedKerasTensor, ragged_property)
for ragged_method_name in [
'value_rowids',
'nested_value_rowids',
'nrows',
'row_starts',
'row_limits',
'row_lengths',
'nested_row_lengths',
'bounding_shape',
'with_values',
'with_flat_values',
'with_row_splits_dtype',
'merge_dims',
'to_tensor',
'to_sparse',
]:
_delegate_method(keras_tensor.RaggedKerasTensor, ragged_method_name)
for sparse_property in [
'indices',
'values',
]:
_delegate_property(keras_tensor.SparseKerasTensor, sparse_property)
for sparse_method in [
'with_values',
]:
_delegate_method(keras_tensor.SparseKerasTensor, sparse_method)
class TFClassMethodDispatcher(tf.__internal__.dispatch.OpDispatcher):
"""A class method dispatcher that allows building a functional model with TF class methods."""
def __init__(self, cls, method_name):
self.cls = cls
self.method_name = method_name
def handle(self, args, kwargs):
"""Handle the specified operation with the specified arguments."""
if any(
isinstance(x, keras_tensor.KerasTensor)
for x in tf.nest.flatten([args, kwargs])):
return ClassMethod(self.cls, self.method_name)(args[1:], kwargs)
else:
return self.NOT_SUPPORTED
for ragged_class_method in [
'from_value_rowids',
'from_row_splits',
'from_row_lengths',
'from_row_starts',
'from_row_limits',
'from_uniform_row_length',
'from_nested_value_rowids',
'from_nested_row_splits',
'from_nested_row_lengths',
'from_tensor',
'from_sparse',
]:
TFClassMethodDispatcher(tf.RaggedTensor, ragged_class_method).register(
getattr(tf.RaggedTensor, ragged_class_method))
class SlicingOpLambda(TFOpLambda):
"""Wraps TF API symbols in a `Layer` object.
It is inserted by the Functional API construction whenever users call
a supported TF symbol on KerasTensors.
Like Lambda layers, this layer tries to raise warnings when it detects users
explicitly use variables in the call. (To let them know
that the layer will not capture the variables).
This is useful in the case where users do something like:
x = keras.Input(...)
y = tf.Variable(...)
out = x * tf_variable
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, function, **kwargs):
super(SlicingOpLambda, self).__init__(function, **kwargs)
original_call = self.call
# Decorate the function to produce this layer's call method
def _call_wrapper(*args, **kwargs):
# Turn any slice dicts in the args back into `slice` objects.
# This conversion cannot use nest.flatten/map_structure,
# because dicts are flattened by nest while slices aren't.
# So, map_structure would only see the individual elements in the
# dict.
# This can't use map_structure_up_to either because the 'shallowness' of
# the shallow tree would have to vary depending on if only one dim or
# multiple are being sliced.
new_args = []
for arg in args:
arg = _dict_to_slice(arg)
if isinstance(arg, (list, tuple)):
new_arg = []
for sub_arg in arg:
new_arg.append(_dict_to_slice(sub_arg))
arg = new_arg
new_args.append(arg)
# Handle the kwargs too.
new_kwargs = {}
for key, value in kwargs.items():
value = _dict_to_slice(value)
if isinstance(value, (list, tuple)):
new_value = []
for v in value:
new_value.append(_dict_to_slice(v))
value = new_value
new_kwargs[key] = value
return original_call(*new_args, **new_kwargs)
self.call = tf.__internal__.decorator.make_decorator(
original_call, _call_wrapper)
def _slice_to_dict(x):
if isinstance(x, slice):
return {'start': x.start, 'stop': x.stop, 'step': x.step}
return x
def _dict_to_slice(x):
if isinstance(x, dict):
return slice(x['start'], x['stop'], x['step'])
return x
class TFSlicingOpDispatcher(tf.__internal__.dispatch.OpDispatcher):
"""A global dispatcher that allows building a functional model with TF Ops."""
def __init__(self, op):
self.op = op
def handle(self, args, kwargs):
"""Handle the specified operation with the specified arguments."""
args = tf.nest.map_structure(_slice_to_dict, args)
kwargs = tf.nest.map_structure(_slice_to_dict, kwargs)
if any(
isinstance(x, keras_tensor.KerasTensor)
for x in tf.nest.flatten([args, kwargs])):
return SlicingOpLambda(self.op)(*args, **kwargs)
else:
return self.NOT_SUPPORTED
for slicing_op in [
tf.__operators__.getitem, # pylint: disable=protected-access
tf.compat.v1.boolean_mask,
tf.boolean_mask,
tf.__operators__.ragged_getitem
]:
TFSlicingOpDispatcher(slicing_op).register(slicing_op)
| 19,624 | 35.009174 | 118 | py |
keras | keras-master/keras/layers/core/activity_regularization.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the ActivityRegularization layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import regularizers
from keras.engine.base_layer import Layer
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 1,964 | 35.388889 | 80 | py |
keras | keras-master/keras/layers/core/reshape.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Reshape layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras.engine.base_layer import Layer
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Reshape')
class Reshape(Layer):
"""Layer that reshapes inputs into the given shape.
Input shape:
Arbitrary, although all dimensions in the input shape must be known/fixed.
Use the keyword argument `input_shape` (tuple of integers, does not include
the samples/batch size axis) when using this layer as the first layer
in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
>>> # as first layer in a Sequential model
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Reshape((3, 4), input_shape=(12,)))
>>> # model.output_shape == (None, 3, 4), `None` is the batch size.
>>> model.output_shape
(None, 3, 4)
>>> # as intermediate layer in a Sequential model
>>> model.add(tf.keras.layers.Reshape((6, 2)))
>>> model.output_shape
(None, 6, 2)
>>> # also supports shape inference using `-1` as dimension
>>> model.add(tf.keras.layers.Reshape((-1, 2, 2)))
>>> model.output_shape
(None, 3, 2, 2)
"""
def __init__(self, target_shape, **kwargs):
"""Creates a `tf.keras.layers.Reshape` layer instance.
Args:
target_shape: Target shape. Tuple of integers, does not include the
samples dimension (batch size).
**kwargs: Any additional layer keyword arguments.
"""
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Args:
input_shape: Shape of array being reshaped
output_shape: Desired shape of the array with at most a single -1 which
indicates a dimension that should be derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises:
ValueError: If the total array size of the output_shape is
different than the input_shape, or more than one unknown dimension
is specified.
"""
output_shape = list(output_shape)
msg = ('total size of new array must be unchanged, '
'input_shape = {}, output_shape = {}'.format(input_shape,
output_shape))
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tf.TensorShape(output_shape)
def call(self, inputs):
result = tf.reshape(inputs, (tf.shape(inputs)[0],) + self.target_shape)
if not tf.executing_eagerly():
# Set the static shape for the result since it might lost during array_ops
# reshape, eg, some `None` dim in the result could be inferred.
result.set_shape(self.compute_output_shape(inputs.shape))
return result
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 4,967 | 35.8 | 80 | py |
keras | keras-master/keras/layers/core/lambda_layer.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Lambda layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
import sys
import textwrap
import types as python_types
import warnings
from keras.engine.base_layer import Layer
from keras.utils import generic_utils
from keras.utils import tf_inspect
from keras.utils import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Lambda')
class Lambda(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary expressions can be used
as a `Layer` when constructing `Sequential`
and Functional API models. `Lambda` layers are best suited for simple
operations or quick experimentation. For more advanced use cases, follow
[this guide](https://www.tensorflow.org/guide/keras/custom_layers_and_models)
for subclassing `tf.keras.layers.Layer`.
WARNING: `tf.keras.layers.Lambda` layers have (de)serialization limitations!
The main reason to subclass `tf.keras.layers.Layer` instead of using a
`Lambda` layer is saving and inspecting a Model. `Lambda` layers
are saved by serializing the Python bytecode, which is fundamentally
non-portable. They should only be loaded in the same environment where
they were saved. Subclassed layers can be saved in a more portable way
by overriding their `get_config` method. Models that rely on
subclassed Layers are also often easier to visualize and reason about.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Variables:
While it is possible to use Variables with Lambda layers, this practice is
discouraged as it can easily lead to bugs. For instance, consider the
following layer:
```python
scale = tf.Variable(1.)
scale_layer = tf.keras.layers.Lambda(lambda x: x * scale)
```
Because scale_layer does not directly track the `scale` variable, it will
not appear in `scale_layer.trainable_weights` and will therefore not be
trained if `scale_layer` is used in a Model.
A better pattern is to write a subclassed Layer:
```python
class ScaleLayer(tf.keras.layers.Layer):
def __init__(self):
super(ScaleLayer, self).__init__()
self.scale = tf.Variable(1.)
def call(self, inputs):
return inputs * self.scale
```
In general, Lambda layers can be convenient for simple stateless
computation, but anything more complex should use a subclass Layer instead.
Args:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument can be
inferred if not explicitly provided. Can be a tuple or function. If a
tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input: `output_shape =
(input_shape[0], ) + output_shape` or, the input is `None` and
the sample dimension is also `None`: `output_shape = (None, ) +
output_shape` If a function, it specifies the entire shape as a function
of the
input shape: `output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor that will be
returned as output mask regardless of what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
Input shape: Arbitrary. Use the keyword argument input_shape (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape: Specified by `output_shape` argument
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self,
function,
output_shape=None,
mask=None,
arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.arguments = arguments or {}
self.function = function
if mask is not None:
self.supports_masking = True
self.mask = mask
self._output_shape = output_shape
# Warning on every invocation will be quite irksome in Eager mode.
self._already_warned = False
function_args = tf_inspect.getfullargspec(function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer context
# is Graph mode because Lambda layers don't have side effects such as
# `add_loss`.
with tf.__internal__.eager_context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = tf.nest.flatten(
input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tf.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return tf.nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
# We must copy for thread safety, but it only needs to be a shallow copy.
kwargs = {k: v for k, v in self.arguments.items()}
if self._fn_expects_mask_arg:
kwargs['mask'] = mask
if self._fn_expects_training_arg:
kwargs['training'] = training
created_variables = []
def _variable_creator(next_creator, **kwargs):
var = next_creator(**kwargs)
created_variables.append(var)
return var
with tf.GradientTape(watch_accessed_variables=True) as tape,\
tf.variable_creator_scope(_variable_creator):
result = self.function(inputs, **kwargs)
self._check_variables(created_variables, tape.watched_variables())
return result
def _check_variables(self, created_variables, accessed_variables):
if not created_variables and not accessed_variables:
# In the common case that a Lambda layer does not touch a Variable, we
# don't want to incur the runtime cost of assembling any state used for
# checking only to immediately discard it.
return
tracked_weights = set(v.ref() for v in self.weights)
untracked_new_vars = [
v for v in created_variables if v.ref() not in tracked_weights
]
if untracked_new_vars:
variable_str = '\n'.join(' {}'.format(i) for i in untracked_new_vars)
error_str = textwrap.dedent("""
The following Variables were created within a Lambda layer ({name})
but are not tracked by said layer:
{variable_str}
The layer cannot safely ensure proper Variable reuse across multiple
calls, and consquently this behavior is disallowed for safety. Lambda
layers are not well suited to stateful computation; instead, writing a
subclassed Layer is the recommend way to define layers with
Variables.""").format(
name=self.name, variable_str=variable_str)
raise ValueError(error_str)
untracked_used_vars = [
v for v in accessed_variables if v.ref() not in tracked_weights
]
if untracked_used_vars and not self._already_warned:
variable_str = '\n'.join(' {}'.format(i) for i in untracked_used_vars)
self._warn(
textwrap.dedent("""
The following Variables were used a Lambda layer's call ({name}), but
are not present in its tracked objects:
{variable_str}
It is possible that this is intended behavior, but it is more likely
an omission. This is a strong indication that this layer should be
formulated as a subclassed Layer rather than a Lambda layer.""")
.format(name=self.name, variable_str=variable_str))
self._already_warned = True
def _warn(self, msg):
# This method will be overridden in a unit test to raise an error, because
# self.assertWarns is not universally implemented.
return tf_logging.warning(msg)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(
self._output_shape, allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError('Invalid input for serialization, type: %s ' %
type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(config, custom_objects,
'function', 'module',
'function_type')
output_shape = cls._parse_function_from_config(config, custom_objects,
'output_shape',
'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(config, custom_objects, 'mask',
'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(cls, config, custom_objects, func_attr_name,
module_attr_name, func_type_attr_name):
globs = globals().copy()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn(
'{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module), UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
| 14,250 | 39.030899 | 80 | py |
keras | keras-master/keras/layers/core/spatial_dropout.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the spatial dropout layers."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import,g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
import keras.backend as K
from keras.engine.input_spec import InputSpec
from keras.layers.core.dropout import Dropout
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments:
inputs: A 3D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
3D tensor with shape: `(samples, timesteps, channels)`
Output shape: Same as input.
References: - [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = tf.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@keras_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode,
the channels dimension (the depth) is at index 1, in 'channels_last' mode
is it at index 3. It defaults to the `image_data_format` value found in
your Keras config file at `~/.keras/keras.json`. If you never set it, then
it will be "channels_last".
Call arguments:
inputs: A 4D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'.
Output shape: Same as input.
References: - [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = tf.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@keras_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode,
the channels dimension (the depth) is at index 1, in 'channels_last' mode
is it at index 4. It defaults to the `image_data_format` value found in
your Keras config file at `~/.keras/keras.json`. If you never set it, then
it will be "channels_last".
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if
data_format='channels_first'
or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if
data_format='channels_last'.
Output shape: Same as input.
References: - [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = tf.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
| 7,111 | 42.631902 | 89 | py |
keras | keras-master/keras/layers/core/flatten.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the flatten layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
import functools
import operator
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Flatten')
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Conv2D(64, 3, 3, input_shape=(3, 32, 32)))
>>> model.output_shape
(None, 1, 10, 64)
>>> model.add(Flatten())
>>> model.output_shape
(None, 640)
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
self._channels_first = self.data_format == 'channels_first'
def call(self, inputs):
if self._channels_first:
rank = inputs.shape.rank
if rank and rank > 1:
# Switch to channels-last format.
permutation = [0]
permutation.extend(range(2, rank))
permutation.append(1)
inputs = tf.transpose(inputs, perm=permutation)
if tf.executing_eagerly():
# Full static shape is guaranteed to be available.
# Performance: Using `constant_op` is much faster than passing a list.
flattened_shape = tf.constant([inputs.shape[0], -1])
return tf.reshape(inputs, flattened_shape)
else:
input_shape = inputs.shape
rank = input_shape.rank
if rank == 1:
return tf.expand_dims(inputs, axis=1)
else:
batch_dim = tf.compat.dimension_value(input_shape[0])
non_batch_dims = input_shape[1:]
# Reshape in a way that preserves as much shape info as possible.
if non_batch_dims.is_fully_defined():
last_dim = int(functools.reduce(operator.mul, non_batch_dims))
flattened_shape = tf.constant([-1, last_dim])
elif batch_dim is not None:
flattened_shape = tf.constant([int(batch_dim), -1])
else:
flattened_shape = [tf.shape(inputs)[0], -1]
return tf.reshape(inputs, flattened_shape)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if not input_shape:
output_shape = tf.TensorShape([1])
else:
output_shape = [input_shape[0]]
if np.all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:], dtype=int)]
else:
output_shape += [None]
return tf.TensorShape(output_shape)
def get_config(self):
config = super(Flatten, self).get_config()
config.update({'data_format': self.data_format})
return config
| 4,179 | 35.666667 | 80 | py |
keras | keras-master/keras/layers/core/dropout.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the dropout layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend as K
from keras.engine.base_layer import Layer
from keras.utils import control_flow_util
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
# TODO(b/168039935): track dropout rate to decide whether/how to make a
# dropout rate fastpath.
keras_temporary_dropout_rate = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/dropout/temp_rate_is_zero',
'Temporarily record if Keras dropout layer was created w/'
'constant rate = 0')
@keras_export('keras.layers.Dropout')
class Dropout(Layer):
"""Applies Dropout to the input.
The Dropout layer randomly sets input units to 0 with a frequency of `rate`
at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Dropout layer only applies when `training` is set to True
such that no values are dropped during inference. When using `model.fit`,
`training` will be appropriately set to True automatically, and in other
contexts, you can set the kwarg explicitly to True when calling the layer.
(This is in contrast to setting `trainable=False` for a Dropout layer.
`trainable` does not affect the layer's behavior, as Dropout does
not have any variables/weights that can be frozen during training.)
>>> tf.random.set_seed(0)
>>> layer = tf.keras.layers.Dropout(.2, input_shape=(2,))
>>> data = np.arange(10).reshape(5, 2).astype(np.float32)
>>> print(data)
[[0. 1.]
[2. 3.]
[4. 5.]
[6. 7.]
[8. 9.]]
>>> outputs = layer(data, training=True)
>>> print(outputs)
tf.Tensor(
[[ 0. 1.25]
[ 2.5 3.75]
[ 5. 6.25]
[ 7.5 8.75]
[10. 0. ]], shape=(5, 2), dtype=float32)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
if isinstance(rate, (int, float)) and not 0 <= rate <= 1:
raise ValueError(f'Invalid value {rate} received for '
f'`rate`, expected a value between 0 and 1.')
self.rate = rate
if isinstance(rate, (int, float)) and not rate:
keras_temporary_dropout_rate.get_cell().set(True)
else:
keras_temporary_dropout_rate.get_cell().set(False)
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = tf.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tf.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return tf.nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = control_flow_util.smart_cond(training, dropped_inputs,
lambda: tf.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 5,167 | 36.449275 | 80 | py |
keras | keras-master/keras/layers/core/permute.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Permute layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
import copy
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Permute')
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Args:
dims: Tuple of integers. Permutation pattern does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation argument `dims` for Permute Layer. '
'The set of indices in `dims` must be consecutive and start from 1. '
f'Received dims={dims}')
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tf.TensorShape(output_shape)
def call(self, inputs):
return tf.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 2,874 | 34.493827 | 80 | py |
keras | keras-master/keras/layers/core/__init__.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers."""
from keras.layers.core.activation import Activation
from keras.layers.core.activity_regularization import ActivityRegularization
from keras.layers.core.dense import Dense
from keras.layers.core.dropout import Dropout
from keras.layers.core.flatten import Flatten
from keras.layers.core.lambda_layer import Lambda
from keras.layers.core.masking import Masking
from keras.layers.core.permute import Permute
from keras.layers.core.repeat_vector import RepeatVector
from keras.layers.core.reshape import Reshape
from keras.layers.core.spatial_dropout import SpatialDropout1D
from keras.layers.core.spatial_dropout import SpatialDropout2D
from keras.layers.core.spatial_dropout import SpatialDropout3D
# Required by third_party/py/tensorflow_gnn/graph/keras/keras_tensors.py
from keras.layers.core.tf_op_layer import _delegate_method
from keras.layers.core.tf_op_layer import _delegate_property
from keras.layers.core.tf_op_layer import ClassMethod
from keras.layers.core.tf_op_layer import InstanceMethod
from keras.layers.core.tf_op_layer import InstanceProperty
from keras.layers.core.tf_op_layer import SlicingOpLambda
from keras.layers.core.tf_op_layer import TFOpLambda
| 1,888 | 46.225 | 80 | py |
keras | keras-master/keras/layers/core/core_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
# pylint: disable=g-bad-import-order
import tensorflow.compat.v2 as tf
import textwrap
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers import core
from keras.mixed_precision import policy
import numpy as np
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={
'rate': 0.5,
'noise_shape': [3, 1]
},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={
'rate': 0.5,
'data_format': 'channels_first'
},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={
'rate': 0.5,
'data_format': 'channels_first'
},
input_shape=(2, 3, 4, 4, 5))
def test_dropout_partial_noise_shape(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
out = model(np.ones((20, 5, 10)), training=True)
out_np = keras.backend.get_value(out)
# Test that dropout mask is shared across second dim.
self.assertAllClose(out_np[:, 0, :], out_np[:, 1, :])
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({'class_name': 'Lambda', 'config': config})
self.assertEqual(ld.function(3), 4)
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([tf.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3])
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return tf.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn, dtype=tf.float64)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
output_signature = l.compute_output_signature([
tf.TensorSpec(dtype=tf.float64, shape=(10, 10)),
tf.TensorSpec(dtype=tf.float64, shape=(10, 20))
])
self.assertAllEqual((10, 20), output_signature.shape)
self.assertAllEqual(tf.float64, output_signature.dtype)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_nested(self):
def lambda_fn(inputs):
return (inputs[1]['a'], {'b': inputs[0]})
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape(((10, 20), {'a': (10, 5)}))
self.assertAllEqual(((10, 5), {'b': (10, 20)}), output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(
lambda x: x + 1, output_shape=(1, 1), mask=lambda i, m: m)
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({'class_name': 'Lambda', 'config': config})
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
layer = keras.layers.Lambda.from_config(config)
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
def test_lambda_with_training_arg(self):
def fn(x, training=True):
return keras.backend.in_train_phase(x, 2 * x, training=training)
layer = keras.layers.Lambda(fn)
x = keras.backend.ones(())
train_out = layer(x, training=True)
eval_out = layer(x, training=False)
self.assertEqual(keras.backend.get_value(train_out), 1.)
self.assertEqual(keras.backend.get_value(eval_out), 2.)
def test_lambda_with_mask(self):
def add_one(inputs):
return inputs + 1.0
def mask(unused_inputs, previous_mask):
return previous_mask
layer = keras.layers.Lambda(add_one, mask=mask)
x = np.ones([5, 4, 3])
x[:, -1, :] = 0
masking = keras.layers.Masking()
out = layer(masking(x))
expected_out = np.full([5, 4, 3], 2.0)
expected_out[:, -1, :] = 1.0
expected_mask = np.ones([5, 4])
expected_mask[:, -1] = 0.0
self.assertAllClose(self.evaluate(out), expected_out)
self.assertIsNotNone(out._keras_mask)
self.assertAllClose(self.evaluate(out._keras_mask), expected_mask)
def test_lambda_with_ragged_input(self):
def add_one(inputs):
return inputs + 1.0
layer = keras.layers.Lambda(add_one)
ragged_input = tf.ragged.constant([[1.0], [2.0, 3.0]])
out = layer(ragged_input)
expected_out = tf.ragged.constant([[2.0], [3.0, 4.0]])
self.assertAllClose(out, expected_out)
def test_lambda_deserialization_does_not_pollute_core(self):
layer = keras.layers.Lambda(lambda x: x + 1)
config = layer.get_config()
keras.layers.Lambda.from_config(config)
self.assertNotIn(self.__class__.__name__, dir(core))
class TestStatefulLambda(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_lambda_with_variable_in_model(self):
v = tf.Variable(1., trainable=True)
def lambda_fn(x, v):
return x * v
# While it is generally not advised to mix Variables with Lambda layers, if
# the variables are explicitly set as attributes then they are still
# tracked. This is consistent with the base Layer behavior.
layer = keras.layers.Lambda(lambda_fn, arguments={'v': v})
self.assertLen(layer.trainable_weights, 0)
layer.v = v
self.assertLen(layer.trainable_weights, 1)
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
self.assertLen(model.trainable_weights, 1)
self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_creation_inside_lambda(self):
def lambda_fn(x):
scale = tf.Variable(1., trainable=True, name='scale')
shift = tf.Variable(1., trainable=True, name='shift')
return x * scale + shift
expected_error = textwrap.dedent(r"""
( )?The following Variables were created within a Lambda layer \(shift_and_scale\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*shift_and_scale/scale:0\'.+
( )? <tf.Variable \'.*shift_and_scale/shift:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+""")
with self.assertRaisesRegex(ValueError, expected_error):
layer = keras.layers.Lambda(lambda_fn, name='shift_and_scale')
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(tf.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_transitive_variable_creation(self):
dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones')
def bad_lambda_fn(x):
return dense(x + 1) # Dense layer is built on first call
expected_error = textwrap.dedent(r"""
( )?The following Variables were created within a Lambda layer \(bias_dense\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+""")
with self.assertRaisesRegex(ValueError, expected_error):
layer = keras.layers.Lambda(bad_lambda_fn, name='bias_dense')
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(tf.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_warns_on_variable_capture(self):
v = tf.Variable(1., trainable=True)
def lambda_fn(x):
return x * v
expected_warning = textwrap.dedent(r"""
( )?The following Variables were used a Lambda layer\'s call \(lambda\), but
( )?are not present in its tracked objects:
( )? <tf.Variable \'.*Variable:0\'.+
( )?It is possible that this is intended behavior.+""")
layer = keras.layers.Lambda(lambda_fn)
def patched_warn(msg):
raise ValueError(msg)
layer._warn = patched_warn
with self.assertRaisesRegex(ValueError, expected_warning):
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(tf.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertIsNotNone(y._keras_mask)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_compute_mask_with_positional_mask_arg(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
return inputs
def compute_mask(self, inputs, mask=None):
if mask is not None:
return tf.ones(())
else:
return tf.zeros(())
x, mask = tf.ones((1, 1)), tf.ones((1, 1))
layer = MyLayer()
y = layer(x, mask)
# Check that `mask` was correctly sent to `compute_mask`.
self.assertEqual(keras.backend.get_value(y._keras_mask), 1)
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_reshape_set_static_shape(self):
input_layer = keras.Input(batch_shape=(1, None))
reshaped = keras.layers.Reshape((1, 100))(input_layer)
# Make sure the batch dim is not lost after array_ops.reshape.
self.assertEqual(reshaped.shape, [1, 1, 100])
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)},
input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)},
input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
testing_utils.layer_test(keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_output(self):
dense_inputs = tf.convert_to_tensor(
np.random.uniform(size=(10, 10)).astype('f'))
# Create some sparse data where multiple rows and columns are missing.
sparse_inputs = tf.SparseTensor(
indices=np.random.randint(low=0, high=10, size=(5, 2)),
values=np.random.uniform(size=(5,)).astype('f'),
dense_shape=[10, 10])
sparse_inputs = tf.sparse.reorder(sparse_inputs)
# Create some ragged data.
ragged_inputs = tf.RaggedTensor.from_row_splits(
np.random.uniform(size=(10, 10)).astype('f'),
row_splits=[0, 4, 6, 6, 9, 10])
layer = keras.layers.Dense(
5,
kernel_initializer=keras.initializers.RandomUniform(),
bias_initializer=keras.initializers.RandomUniform(),
dtype='float32')
dense_outputs = layer(dense_inputs)
sparse_outpus = layer(sparse_inputs)
ragged_outputs = layer(ragged_inputs)
expected_dense = tf.add(
tf.matmul(dense_inputs, keras.backend.get_value(layer.kernel)),
keras.backend.get_value(layer.bias))
expected_sparse = tf.add(
tf.matmul(
tf.sparse.to_dense(sparse_inputs),
keras.backend.get_value(layer.kernel)),
keras.backend.get_value(layer.bias))
expected_ragged_values = tf.add(
tf.matmul(ragged_inputs.flat_values,
keras.backend.get_value(layer.kernel)),
keras.backend.get_value(layer.bias))
expected_ragged = tf.RaggedTensor.from_row_splits(
expected_ragged_values, row_splits=[0, 4, 6, 6, 9, 10])
self.assertAllClose(dense_outputs, expected_dense)
self.assertAllClose(sparse_outpus, expected_sparse)
self.assertAllClose(ragged_outputs, expected_ragged)
def test_dense_dtype(self):
inputs = tf.convert_to_tensor(np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = tf.convert_to_tensor(np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype=policy.Policy('mixed_float16'))
outputs = layer(inputs)
output_signature = layer.compute_output_signature(
tf.TensorSpec(dtype='float16', shape=(2, 2)))
self.assertEqual(output_signature.dtype, tf.float16)
self.assertEqual(output_signature.shape, (2, 5))
self.assertEqual(outputs.dtype, 'float16')
self.assertEqual(layer.kernel.dtype, 'float32')
def test_dense_regularization(self):
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
def test_dense_constraints(self):
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_activity_regularization(self):
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
config = layer.get_config()
self.assertEqual(config.pop('l1'), 0.1)
def test_numpy_inputs(self):
if tf.executing_eagerly():
layer = keras.layers.RepeatVector(2)
x = np.ones((10, 10))
self.assertAllEqual(np.ones((10, 2, 10)), layer(x))
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
@keras_parameterized.run_all_keras_modes
class TFOpLambdaTest(keras_parameterized.TestCase):
def test_non_tf_symbol(self):
def dummy_func(a, b):
return a + b
layer = core.TFOpLambda(dummy_func)
self.assertIsNone(layer.symbol)
self.assertEqual(layer.name, 'dummy_func')
with self.assertRaisesRegex(ValueError, 'was generated from .*dummy_func'):
layer.get_config()
if __name__ == '__main__':
tf.test.main()
| 21,332 | 33.519417 | 89 | py |
keras | keras-master/keras/layers/core/masking.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Masking layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend as K
from keras.engine.base_layer import Layer
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Masking')
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Masking(mask_value=0.,
input_shape=(timesteps, features)))
model.add(tf.keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
See [the masking and padding guide](
https://www.tensorflow.org/guide/keras/masking_and_padding)
for more details.
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(tf.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
tf.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * tf.cast(boolean_mask, inputs.dtype)
# Compute the mask and outputs simultaneously.
outputs._keras_mask = tf.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 3,326 | 36.382022 | 95 | py |
keras | keras-master/keras/layers/core/repeat_vector.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the RepeatVector layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend as K
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Args:
n: Integer, repetition factor.
Input shape: 2D tensor of shape `(num_samples, features)`.
Output shape: 3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
if not isinstance(n, int):
raise TypeError(f'Expected an integer value for `n`, got {type(n)}.')
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 2,197 | 32.30303 | 80 | py |
keras | keras-master/keras/layers/normalization/layer_normalization_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.normalization import layer_normalization
def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda x: tf.cast(x, dtype='float16')))
norm = layer(input_shape=(2, 2, 2), dtype=dtype)
model.add(norm)
model.compile(
loss='mse',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class LayerNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2, -1)},
input_shape=(2, 8, 8, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
input_shape=(1, 0, 10))
@keras_parameterized.run_all_keras_modes
def test_non_fused_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': -2},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2)},
input_shape=(2, 8, 8, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -1)},
input_shape=(2, 8, 8, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
layer = keras.layers.LayerNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.LayerNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_layernorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
model.add(norm)
model.compile(
loss='mse',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_layernorm_correctness(self):
_run_layernorm_correctness_test(
layer_normalization.LayerNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_layernorm_mixed_precision(self):
_run_layernorm_correctness_test(
layer_normalization.LayerNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIncorrectAxisType(self):
with self.assertRaisesRegex(TypeError,
r'Expected an int or a list/tuple of ints'):
_ = layer_normalization.LayerNormalization(axis={'axis': -1})
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidAxis(self):
with self.assertRaisesRegex(ValueError, r'Invalid axis: 3'):
layer_norm = layer_normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDuplicateAxis(self):
with self.assertRaisesRegex(ValueError, r'Duplicate axis:'):
layer_norm = layer_normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testFusedAttr(self):
layer_norm = layer_normalization.LayerNormalization(axis=[-2, -1])
layer_norm.build(input_shape=(2, 2, 2))
self.assertEqual(layer_norm._fused, True)
class LayerNormalizationNumericsTest(keras_parameterized.TestCase):
"""Tests LayerNormalization has correct and numerically stable outputs."""
def _expected_layer_norm(self, x, beta, gamma, batch_input_shape, axis,
epsilon):
"""Returns the layer norm, which is computed using NumPy."""
broadcast_shape = [batch_input_shape[i] if i in axis else 1
for i in range(len(batch_input_shape))]
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
expected = (x - mean) / np.sqrt(var + epsilon)
expected *= np.reshape(gamma, broadcast_shape)
expected += np.reshape(beta, broadcast_shape)
return expected
def _test_forward_pass(self, batch_input_shape, axis, fp64_tol=1e-14,
fp32_tol=1e-6, fp16_tol=1e-2):
"""Tests the forward pass of layer layer_normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of Layerlayer_normalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
expected = self._expected_layer_norm(x, beta, gamma, batch_input_shape,
axis, epsilon)
for dtype in 'float64', 'float32', 'float16':
norm = layer_normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
y = norm(keras.backend.cast(x, dtype))
actual = keras.backend.eval(y)
if dtype == 'float64':
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_forward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_forward_pass((4, 3), (0,))
self._test_forward_pass((3, 4), (1,))
self._test_forward_pass((4, 3, 2), (0,))
self._test_forward_pass((2, 4, 2), (1,))
self._test_forward_pass((2, 3, 4), (2,), fp16_tol=5e-2)
self._test_forward_pass((2, 3, 2), (0, 2))
self._test_forward_pass((2, 2, 2, 2), (1, 3))
self._test_forward_pass((2, 2, 2, 2), (2, 3))
self._test_forward_pass((2, 3, 4, 5), (3,))
def _test_backward_pass(self, batch_input_shape, axis, fp64_tol=1e-5,
fp32_tol=1e-5, fp16_tol=2e-2):
"""Tests the backwards pass of layer layer_normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of Layerlayer_normalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
# Float64 must come first in this list, as we use the float64 numerical
# gradients to compare to the float32 and float16 symbolic gradients as
# well. Computing float32/float16 numerical gradients is too numerically
# unstable.
for dtype in 'float64', 'float32', 'float16':
norm = layer_normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
norm.build(x.shape)
# pylint: disable=cell-var-from-loop
def forward_fn(x, beta, gamma):
# We must monkey-patch the attributes of `norm` with the function
# arguments, so that the gradient checker will properly compute their
# gradients. The gradient checker computes gradients with respect to
# the input arguments of `f`.
with tf.compat.v1.test.mock.patch.object(norm, 'beta', beta):
with tf.compat.v1.test.mock.patch.object(norm, 'gamma', gamma):
return norm(x)
# pylint: enable=cell-var-from-loop
results = tf.test.compute_gradient(
forward_fn, [keras.backend.cast(x, dtype), norm.beta, norm.gamma])
([x_grad_t, beta_grad_t, gamma_grad_t],
[x_grad_n, beta_grad_n, gamma_grad_n]) = results
if dtype == 'float64':
# We use the float64 numeric gradients as the reference, to compare
# against the symbolic gradients for all dtypes.
x_grad_ref = x_grad_n
beta_grad_ref = beta_grad_n
gamma_grad_ref = gamma_grad_n
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(x_grad_t, x_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(beta_grad_t, beta_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(gamma_grad_t, gamma_grad_ref, rtol=tol, atol=tol)
# The gradient_checker_v2 does not work properly with LayerNorm in graph mode.
@testing_utils.run_v2_only
def test_backward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_backward_pass((4, 3), (0,))
self._test_backward_pass((2, 4, 2), (1,))
self._test_backward_pass((2, 3, 4), (2,))
self._test_backward_pass((2, 3, 2), (0, 2), fp64_tol=5e-4, fp32_tol=5e-4)
self._test_backward_pass((2, 2, 2, 2), (1, 3))
self._test_backward_pass((2, 2, 2, 2), (2, 3))
if __name__ == '__main__':
tf.test.main()
| 13,718 | 40.44713 | 80 | py |
keras | keras-master/keras/layers/normalization/batch_normalization_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.normalization import batch_normalization
from keras.layers.normalization import batch_normalization_v1
class BatchNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm(self):
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 2, 4, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet(self):
if tf.test.is_gpu_available(cuda_only=True):
with self.session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
batch_normalization_v1.BatchNormalization, dtype='float32')
_run_batchnorm_correctness_test(
batch_normalization.BatchNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_batchnorm_float16(self):
_run_batchnorm_correctness_test(
batch_normalization_v1.BatchNormalization, dtype='float16')
_run_batchnorm_correctness_test(
batch_normalization.BatchNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision(self):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(4, 4, 3),
momentum=0.8,
dtype='mixed_float16')
x = np.random.normal(size=(10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(norm.beta.dtype.base_dtype, 'float32')
self.assertEqual(norm.gamma.dtype.base_dtype, 'float32')
@combinations.generate(combinations.combine(mode=['graph', 'eager'],
fused=[True, False]))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision_does_not_overflow(self, fused):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(1, 1, 1),
fused=fused,
dtype='mixed_float16')
x = np.array([-1000., 1000.]).reshape((2, 1, 1, 1))
y = norm(x, training=True)
expected_y = np.array([-1.0, 1.0]).reshape((2, 1, 1, 1))
self.assertAllClose(keras.backend.eval(y), expected_y)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
# We use the same data shape for all the data we use in this test.
# This will prevent any used tf.functions from retracing.
# This helps us verify that changing trainable and recompiling really
# does update the training loop, rather than a different data shape
# triggering a retrace.
data_shape = (100, 3)
inputs = keras.Input((3,))
bn = batch_normalization.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.random.random(data_shape), np.random.random(data_shape))
test_data = np.random.random(data_shape)
test_targets = np.random.random(data_shape)
test_loss = model.evaluate(test_data, test_targets)
bn.trainable = False
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_eager_batchnorm_in_custom_model_call_with_tf_function(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.bn = keras.layers.BatchNormalization()
@tf.function()
def call(self, x, training):
return self.bn(x, training=training)
model = MyModel()
for _ in range(10):
x = tf.constant(0.5, shape=[1, 1])
model(x, training=True)
# Make sure the moving mean and variance have been updated
self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3)
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
@combinations.generate(combinations.combine(mode=['eager']))
def test_bessels_correction(self):
# Bessel's correction is currently only used in the fused case. In the
# future, it may be used in the nonfused case as well.
x = tf.constant([0., 2.], shape=[2, 1, 1, 1])
layer = batch_normalization.BatchNormalization(
momentum=0.5, moving_variance_initializer='zeros')
layer(x, training=True)
self.assertTrue(layer.fused)
# Since fused is used, Bessel's correction is used. The variance of [0, 2]
# is 2 with Bessel's correction. Since the momentum is 0.5, the variance is
# 2 * 0.5 == 1.
self.assertAllEqual(self.evaluate(layer.moving_variance), [1.])
x = tf.constant([0., 2.], shape=[2, 1, 1, 1, 1])
layer = batch_normalization.BatchNormalization(
momentum=0.5, moving_variance_initializer='zeros')
layer(x, training=True)
self.assertTrue(layer.fused)
# Since fused is used, Bessel's correction is used. The variance of [0, 2]
# is 2 with Bessel's correction. Since the momentum is 0.5, the variance is
# 2 * 0.5 == 1.
self.assertAllEqual(self.evaluate(layer.moving_variance), [1.])
class BatchNormalizationV1Test(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v1_fused_attribute(self):
norm = batch_normalization_v1.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = batch_normalization_v1.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization_v1.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2(self):
testing_utils.layer_test(
batch_normalization.BatchNormalization,
kwargs={'fused': True},
input_shape=(3, 3, 3, 3))
testing_utils.layer_test(
batch_normalization.BatchNormalization,
kwargs={'fused': None},
input_shape=(3, 3, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v2_fused_attribute(self):
norm = batch_normalization.BatchNormalization()
self.assertIsNone(norm.fused)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = batch_normalization.BatchNormalization()
self.assertIsNone(norm.fused)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization.BatchNormalization()
self.assertIsNone(norm.fused)
inp = keras.layers.Input(shape=(4, 4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = batch_normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegex(ValueError, 'fused.*renorm'):
batch_normalization.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
batch_normalization.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
batch_normalization.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegex(ValueError, 'fused.*virtual_batch_size'):
batch_normalization.BatchNormalization(fused=True, virtual_batch_size=2)
with self.assertRaisesRegex(ValueError, 'fused.*adjustment'):
batch_normalization.BatchNormalization(
fused=True, adjustment=lambda _: (1, 0))
norm = batch_normalization.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegex(ValueError, '4D or 5D input tensors'):
norm(inp)
def test_updates_in_wrap_function(self):
def my_func():
layer = batch_normalization_v1.BatchNormalization()
x = tf.ones((10, 1))
y = layer(x, training=True)
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
return y
wrapped_fn = tf.compat.v1.wrap_function(my_func, [])
wrapped_fn()
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2_none_shape_and_virtual_batch_size(self):
# Test case for GitHub issue for 32380
norm = batch_normalization.BatchNormalization(virtual_batch_size=8)
inp = keras.layers.Input(shape=(None, None, 3))
_ = norm(inp)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused)
model.add(norm)
if dtype == 'float16':
# Keras models require float32 losses.
model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
model.compile(
loss='mse',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
@parameterized.parameters([
batch_normalization_v1.BatchNormalization,
batch_normalization.BatchNormalization
])
class NormalizationLayersGraphModeOnlyTest(
tf.test.TestCase, parameterized.TestCase):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
new_model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.01), 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.01), 'mse')
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.01), 'mse')
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with tf.Graph().as_default(), self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
if __name__ == '__main__':
tf.test.main()
| 18,451 | 35.611111 | 81 | py |
keras | keras-master/keras/layers/normalization/batch_normalization_v1.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch Normalization V1 layer."""
# pylint: disable=g-classes-have-attributes
from keras.layers.normalization import batch_normalization
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=missing-docstring
@keras_export(v1=['keras.layers.BatchNormalization'])
class BatchNormalization(batch_normalization.BatchNormalizationBase):
_USE_V2_BEHAVIOR = False
| 1,076 | 40.423077 | 80 | py |
keras | keras-master/keras/layers/normalization/layer_normalization.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer Normalization layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LayerNormalization')
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
Given a tensor `inputs`, moments are calculated and normalization
is performed across the axes specified in `axis`.
Example:
>>> data = tf.constant(np.arange(10).reshape(5, 2) * 10, dtype=tf.float32)
>>> print(data)
tf.Tensor(
[[ 0. 10.]
[20. 30.]
[40. 50.]
[60. 70.]
[80. 90.]], shape=(5, 2), dtype=float32)
>>> layer = tf.keras.layers.LayerNormalization(axis=1)
>>> output = layer(data)
>>> print(output)
tf.Tensor(
[[-1. 1.]
[-1. 1.]
[-1. 1.]
[-1. 1.]
[-1. 1.]], shape=(5, 2), dtype=float32)
Notice that with Layer Normalization the normalization happens across the
axes *within* each example, rather than across different examples in the
batch.
If `scale` or `center` are enabled, the layer will scale the normalized
outputs by broadcasting them with a trainable variable `gamma`, and center
the outputs by broadcasting with a trainable variable `beta`. `gamma` will
default to a ones tensor and `beta` will default to a zeros tensor, so that
centering and scaling are no-ops before training has begun.
So, with scaling and centering enabled the normalization equations
are as follows:
Let the intermediate activations for a mini-batch to be the `inputs`.
For each sample `x_i` in `inputs` with `k` features, we compute the mean and
variance of the sample:
```python
mean_i = sum(x_i[j] for j in range(k)) / k
var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k
```
and then compute a normalized `x_i_normalized`, including a small factor
`epsilon` for numerical stability.
```python
x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)
```
And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,
which are learned parameters:
```python
output_i = x_i_normalized * gamma + beta
```
`gamma` and `beta` will span the axes of `inputs` specified in `axis`, and
this part of the inputs' shape must be fully defined.
For example:
>>> layer = tf.keras.layers.LayerNormalization(axis=[1, 2, 3])
>>> layer.build([5, 20, 30, 40])
>>> print(layer.beta.shape)
(20, 30, 40)
>>> print(layer.gamma.shape)
(20, 30, 40)
Note that other implementations of layer normalization may choose to define
`gamma` and `beta` over a separate set of axes from the axes being
normalized across. For example, Group Normalization
([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1
corresponds to a Layer Normalization that normalizes across height, width,
and channel and has `gamma` and `beta` span only the channel dimension.
So, this Layer Normalization implementation will not match a Group
Normalization layer with group size set to 1.
Args:
axis: Integer or List/Tuple. The axis or axes to normalize across. Typically
this is the features axis/axes. The left-out axes are typically the batch
axis/axes. This argument defaults to `-1`, the last dimension in the
input.
epsilon: Small float added to variance to avoid dividing by zero. Defaults
to 1e-3
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored. Defaults to True.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. Defaults
to True. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling will be done by the next layer.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight. None by default.
gamma_regularizer: Optional regularizer for the gamma weight. None by
default.
beta_constraint: Optional constraint for the beta weight. None by default.
gamma_constraint: Optional constraint for the gamma weight. None by default.
Input shape:
Arbitrary. Use the keyword argument `input_shape` (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape:
Same shape as input.
Reference:
- [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).
"""
def __init__(self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(LayerNormalization, self).__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('Expected an int or a list/tuple of ints for the '
'argument \'axis\', but received: %r' % axis)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
# Indicates whether a faster fused implementation can be used. This will be
# set to True or False in build()"
self._fused = None
def _fused_can_be_used(self, ndims):
"""Returns false if fused implementation cannot be used.
Check if the axis is contiguous and can be collapsed into the last axis.
The self.axis is assumed to have no duplicates.
"""
axis = sorted(self.axis)
can_use_fused = False
if axis[-1] == ndims - 1 and axis[-1] - axis[0] == len(axis) - 1:
can_use_fused = True
# fused_batch_norm will silently raise epsilon to be at least 1.001e-5, so
# we cannot used the fused version if epsilon is below that value. Also, the
# variable dtype must be float32, as fused_batch_norm only supports float32
# variables.
if self.epsilon < 1.001e-5 or self.dtype != 'float32':
can_use_fused = False
return can_use_fused
def build(self, input_shape):
ndims = len(input_shape)
if ndims is None:
raise ValueError('Input shape %s has undefined rank.' % input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
elif isinstance(self.axis, tuple):
self.axis = list(self.axis)
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: {}'.format(tuple(self.axis)))
param_shape = [input_shape[dim] for dim in self.axis]
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
self._fused = self._fused_can_be_used(ndims)
self.built = True
def call(self, inputs):
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
# Broadcasting only necessary for norm when the axis is not just
# the last dimension
broadcast_shape = [1] * ndims
for dim in self.axis:
broadcast_shape[dim] = input_shape.dims[dim].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and self.axis != [ndims - 1]):
return tf.reshape(v, broadcast_shape)
return v
if not self._fused:
input_dtype = inputs.dtype
if input_dtype in ('float16', 'bfloat16') and self.dtype == 'float32':
# If mixed precision is used, cast inputs to float32 so that this is at
# least as numerically stable as the fused version.
inputs = tf.cast(inputs, 'float32')
# Calculate the moments on the last axis (layer activations).
mean, variance = tf.nn.moments(inputs, self.axis, keepdims=True)
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
# Compute layer normalization using the batch_normalization function.
outputs = tf.nn.batch_normalization(
inputs,
mean,
variance,
offset=offset,
scale=scale,
variance_epsilon=self.epsilon)
outputs = tf.cast(outputs, input_dtype)
else:
# Collapse dims before self.axis, and dims in self.axis
pre_dim, in_dim = (1, 1)
axis = sorted(self.axis)
tensor_shape = tf.shape(inputs)
for dim in range(0, ndims):
dim_tensor = tensor_shape[dim]
if dim < axis[0]:
pre_dim = pre_dim * dim_tensor
else:
assert dim in axis
in_dim = in_dim * dim_tensor
squeezed_shape = [1, pre_dim, in_dim, 1]
# This fused operation requires reshaped inputs to be NCHW.
data_format = 'NCHW'
inputs = tf.reshape(inputs, squeezed_shape)
# self.gamma and self.beta have the wrong shape for fused_batch_norm, so
# we cannot pass them as the scale and offset parameters. Therefore, we
# create two constant tensors in correct shapes for fused_batch_norm and
# later construct a separate calculation on the scale and offset.
scale = tf.ones([pre_dim], dtype=self.dtype)
offset = tf.zeros([pre_dim], dtype=self.dtype)
# Compute layer normalization using the fused_batch_norm function.
outputs, _, _ = tf.compat.v1.nn.fused_batch_norm(
inputs,
scale=scale,
offset=offset,
epsilon=self.epsilon,
data_format=data_format)
outputs = tf.reshape(outputs, tensor_shape)
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
if scale is not None:
outputs = outputs * tf.cast(scale, outputs.dtype)
if offset is not None:
outputs = outputs + tf.cast(offset, outputs.dtype)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 13,113 | 35.226519 | 80 | py |
keras | keras-master/keras/layers/normalization/__init__.py | 1 | 0 | 0 | py | |
keras | keras-master/keras/layers/normalization/batch_normalization.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The V2 implementation of Normalization layers."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import control_flow_util
from tensorflow.python.ops.control_flow_ops import get_enclosing_xla_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
class BatchNormalizationBase(Layer):
r"""Layer that normalizes its inputs.
Batch normalization applies a transformation that maintains the mean output
close to 0 and the output standard deviation close to 1.
Importantly, batch normalization works differently during training and
during inference.
**During training** (i.e. when using `fit()` or when calling the layer/model
with the argument `training=True`), the layer normalizes its output using
the mean and standard deviation of the current batch of inputs. That is to
say, for each channel being normalized, the layer returns
`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:
- `epsilon` is small constant (configurable as part of the constructor
arguments)
- `gamma` is a learned scaling factor (initialized as 1), which
can be disabled by passing `scale=False` to the constructor.
- `beta` is a learned offset factor (initialized as 0), which
can be disabled by passing `center=False` to the constructor.
**During inference** (i.e. when using `evaluate()` or `predict()`) or when
calling the layer/model with the argument `training=False` (which is the
default), the layer normalizes its output using a moving average of the
mean and standard deviation of the batches it has seen during training. That
is to say, it returns
`gamma * (batch - self.moving_mean) / sqrt(self.moving_var + epsilon) + beta`.
`self.moving_mean` and `self.moving_var` are non-trainable variables that
are updated each time the layer in called in training mode, as such:
- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`
- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`
As such, the layer will only normalize its inputs during inference
*after having been trained on data that has similar statistics as the
inference data*.
Args:
axis: Integer or a list of integers, the axis that should be normalized
(typically the features axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
renorm: Whether to use [Batch Renormalization](
https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction `(r,
d)` is used as `corrected_value = normalized_value * r + d`, with `r`
clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training and
should be neither too small (which would add noise) nor too large (which
would give stale estimates). Note that `momentum` is still applied to get
the means and variances for inference.
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.
Note that in TensorFlow 1.x, the meaning of `fused=True` is different: if
`False`, the layer uses the system-recommended implementation.
trainable: Boolean, if `True` the variables will be marked as trainable.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if `axis=-1`,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized
value by up to 7% up or down, then shift the result by up to 0.1
(with independent scaling and bias for each feature but shared
across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using the mean and
variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using the mean and
variance of its moving statistics, learned during training.
Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape: Same shape as input.
Reference:
- [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).
"""
# By default, the base class uses V2 behavior. The BatchNormalization V1
# subclass sets this to False to use the V1 behavior.
_USE_V2_BEHAVIOR = True
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalizationBase, self).__init__(name=name, **kwargs)
if isinstance(axis, (list, tuple)):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('Expected an int or a list/tuple of ints for the '
'argument \'axis\', but received: %r' % axis)
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if self._USE_V2_BEHAVIOR:
if fused:
self._raise_if_fused_cannot_be_used()
# We leave fused as None if self._fused_can_be_used()==True, since we
# still may set it to False in self.build() if the input rank is not 4.
elif fused is None and not self._fused_can_be_used():
fused = False
elif fused is None:
fused = True
self.supports_masking = True
self.fused = fused
self._bessels_correction_test_only = True
self.trainable = trainable
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def _raise_if_fused_cannot_be_used(self):
"""Raises a ValueError if fused implementation cannot be used.
In addition to the checks done in this function, the input tensors rank must
be 4 or 5. The input rank check can only be done once the input shape is
known.
"""
# Note the ValueErrors in this function are caught and not reraised in
# _fused_can_be_used(). No other exception besides ValueError should be
# raised here.
# Currently fused batch norm doesn't support renorm. It also only supports a
# channel dimension on axis 1 or 3 (rank=4) / 1 or 4 (rank5), when no
# virtual batch size or adjustment is used.
if self.renorm:
raise ValueError('Passing both `fused=True` and `renorm=True` is '
'not supported')
axis = [self.axis] if isinstance(self.axis, int) else self.axis
# Axis -3 is equivalent to 1, and axis -1 is equivalent to 3, when the
# input rank is 4. Similarly, the valid axis is -4, -1, 1, 4 when the rank
# is 5. The combination of ranks and axes will be checked later.
if len(axis) > 1 or axis[0] not in (-4, -3, -1, 1, 3, 4):
raise ValueError('Passing `fused=True` is only supported when axis is 1 '
'or 3 for input rank = 4 or 1 or 4 for input rank = 5. '
'Got axis %s' % (axis,))
if self.virtual_batch_size is not None:
raise ValueError('Passing `fused=True` is not supported when '
'`virtual_batch_size` is specified.')
if self.adjustment is not None:
raise ValueError('Passing `fused=True` is not supported when '
'`adjustment` is specified.')
# TODO(reedwm): Support fp64 in FusedBatchNorm then remove this check.
if self._compute_dtype not in ('float16', 'bfloat16', 'float32', None):
raise ValueError(
'Passing `fused=True` is only supported when the compute '
'dtype is float16, bfloat16, or float32. Got dtype: %s' %
(self._compute_dtype,))
def _fused_can_be_used(self):
try:
self._raise_if_fused_cannot_be_used()
return True
except ValueError:
return False
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
@property
def _param_dtype(self):
# Raise parameters of fp16 batch norm to fp32
if self.dtype == tf.float16 or self.dtype == tf.bfloat16:
return tf.float32
else:
return self.dtype or tf.float32
def _support_zero_size_input(self):
if not tf.distribute.has_strategy():
return False
strategy = tf.distribute.get_strategy()
# TODO(b/195085185): remove experimental_enable_get_next_as_optional after
# migrating all users.
return getattr(
strategy.extended, 'enable_partial_batch_handling',
getattr(strategy.extended, 'experimental_enable_get_next_as_optional',
False))
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank.')
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %s' % (self.axis,))
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % (self.axis,))
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0. '
'Received axis=%s' % (self.axis,))
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused in (None, True):
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
if self._USE_V2_BEHAVIOR:
if self.fused is None:
self.fused = ndims in (4, 5)
elif self.fused and ndims not in (4, 5):
raise ValueError('Batch normalization layers with `fused=True` only '
'support 4D or 5D input tensors. '
'Received tensor with shape: %s' %
(tuple(input_shape),))
else:
assert self.fused is not None
self.fused = (ndims in (4, 5) and self._fused_can_be_used())
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1] and ndims == 4:
self._data_format = 'NCHW'
elif self.axis == [1] and ndims == 5:
self._data_format = 'NCDHW'
elif self.axis == [3] and ndims == 4:
self._data_format = 'NHWC'
elif self.axis == [4] and ndims == 5:
self._data_format = 'NDHWC'
elif ndims == 5:
# 5D tensors that can be passed in but should not use fused batch norm
# due to unsupported axis.
self.fused = False
else:
if ndims == 4:
raise ValueError(
'Unsupported axis. The use of `fused=True` is only possible with '
'`axis=1` or `axis=3` for 4D input tensors. Received '
'axis=%s' % (self.axis,))
else:
raise ValueError(
'Unsupported axis. The use of `fused=True` is only possible with '
'`axis=1` or `axis=4` for 5D input tensors. Received '
'axis=%s' % (self.axis,))
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Received input '
'with shape %s. Axis value: %s' %
(tuple(input_shape), self.axis))
self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [
axis_to_dim[i] if i in axis_to_dim else 1 for i in range(ndims)
]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.fused:
self._gamma_const = backend.constant(
1.0, dtype=self._param_dtype, shape=param_shape)
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
if self.fused:
self._beta_const = backend.constant(
0.0, dtype=self._param_dtype, shape=param_shape)
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_mean_initializer,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN,
experimental_autocast=False)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_variance_initializer,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN,
experimental_autocast=False)
if self.renorm:
# In batch renormalization we track the inference moving stddev instead
# of the moving variance to more closely align with the paper.
def moving_stddev_initializer(*args, **kwargs):
return tf.sqrt(
self.moving_variance_initializer(*args, **kwargs))
with tf.distribute.get_strategy(
).extended.colocate_vars_with(self.moving_variance):
self.moving_stddev = self.add_weight(
name='moving_stddev',
shape=param_shape,
dtype=self._param_dtype,
initializer=moving_stddev_initializer,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN,
experimental_autocast=False)
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_stddev.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name,
shape,
initializer=tf.compat.v1.zeros_initializer()):
"""Create a renorm variable."""
var = self.add_weight(
name=name,
shape=shape,
dtype=self._param_dtype,
initializer=initializer,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN,
experimental_autocast=False)
return var
with tf.distribute.get_strategy(
).extended.colocate_vars_with(self.moving_mean):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape,
self.moving_mean_initializer)
with tf.distribute.get_strategy(
).extended.colocate_vars_with(self.moving_stddev):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape,
moving_stddev_initializer)
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum, inputs_size):
def calculate_update_delta():
decay = tf.convert_to_tensor(
1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = tf.cast(decay, variable.dtype.base_dtype)
update_delta = (variable - tf.cast(value, variable.dtype)) * decay
if inputs_size is not None:
update_delta = tf.where(inputs_size > 0, update_delta,
backend.zeros_like(update_delta))
return update_delta
with backend.name_scope('AssignMovingAvg') as scope:
if tf.compat.v1.executing_eagerly_outside_functions():
return variable.assign_sub(calculate_update_delta(), name=scope)
else:
with tf.compat.v1.colocate_with(variable): # pylint: disable=protected-access
return tf.compat.v1.assign_sub(
variable, calculate_update_delta(), name=scope)
def _assign_new_value(self, variable, value):
with backend.name_scope('AssignNewValue') as scope:
if tf.compat.v1.executing_eagerly_outside_functions():
return variable.assign(value, name=scope)
else:
with tf.compat.v1.colocate_with(variable): # pylint: disable=protected-access
return tf.compat.v1.assign(variable, value, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
# TODO(b/129279393): Support zero batch input in non DistributionStrategy
# code as well.
if self._support_zero_size_input():
# Keras assumes that batch dimension is the first dimension for Batch
# Normalization.
input_batch_size = tf.shape(inputs)[0]
else:
input_batch_size = None
# TODO(rmlarsen): Support using fused avg updates for non-eager execution
# after fixing graph pattern matching and enabling fused_batch_norm to
# take exponential_avg_factor as a tensor input.
use_fused_avg_updates = (
tf.compat.v1.executing_eagerly_outside_functions() and
isinstance(self.momentum,
(float, int)) and get_enclosing_xla_context() is None)
if use_fused_avg_updates:
exponential_avg_factor = 1.0 - self.momentum
else:
exponential_avg_factor = None
def _maybe_add_or_remove_bessels_correction(variance, remove=True):
r"""Add or remove Bessel's correction."""
# Removes Bessel's correction if remove == True, adds it otherwise.
# This is to be consistent with non-fused batch norm. Note that the
# variance computed by fused batch norm is with Bessel's correction.
# This is only used in legacy V1 batch norm tests.
if self._bessels_correction_test_only:
return variance
sample_size = tf.cast(
tf.size(inputs) / tf.size(variance), variance.dtype)
if remove:
factor = (sample_size -
tf.cast(1.0, variance.dtype)) / sample_size
else:
factor = sample_size / (
sample_size - tf.cast(1.0, variance.dtype))
return variance * factor
def _fused_batch_norm_training():
return tf.compat.v1.nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=_maybe_add_or_remove_bessels_correction(
self.moving_variance, remove=False),
epsilon=self.epsilon,
is_training=True,
data_format=self._data_format,
exponential_avg_factor=exponential_avg_factor)
def _fused_batch_norm_training_empty():
return inputs, self.moving_mean, self.moving_variance
def _fused_batch_norm_inference():
return tf.compat.v1.nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
train_op = _fused_batch_norm_training
if use_fused_avg_updates and input_batch_size is not None:
# pylint: disable=g-long-lambda
train_op = lambda: control_flow_util.smart_cond(
input_batch_size > 0, _fused_batch_norm_training,
_fused_batch_norm_training_empty)
# pylint: enable=g-long-lambda
output, mean, variance = control_flow_util.smart_cond(
training, train_op, _fused_batch_norm_inference)
variance = _maybe_add_or_remove_bessels_correction(variance, remove=True)
training_value = control_flow_util.constant_value(training)
if training_value or training_value is None:
if not use_fused_avg_updates:
if training_value is None:
momentum = control_flow_util.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = tf.convert_to_tensor(self.momentum)
def mean_update():
"""Update self.moving_mean with the most recent data point."""
if use_fused_avg_updates:
return self._assign_new_value(self.moving_mean, mean)
else:
return self._assign_moving_average(self.moving_mean, mean, momentum,
input_batch_size)
def variance_update():
"""Update self.moving_variance with the most recent data point."""
if use_fused_avg_updates:
return self._assign_new_value(self.moving_variance, variance)
else:
return self._assign_moving_average(self.moving_variance, variance,
momentum, input_batch_size)
self.add_update(mean_update)
self.add_update(variance_update)
return output
def _renorm_correction_and_moments(self, mean, variance, training,
inputs_size):
"""Returns the correction and update values for renorm."""
stddev = tf.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
renorm_mean = self.renorm_mean
# Avoid divide by zero early on in training.
renorm_stddev = tf.maximum(self.renorm_stddev, tf.sqrt(self.epsilon))
# Compute the corrections for batch renorm.
r = stddev / renorm_stddev
d = (mean - renorm_mean) / renorm_stddev
# Ensure the corrections use pre-update moving averages.
with tf.control_dependencies([r, d]):
mean = tf.identity(mean)
stddev = tf.identity(stddev)
rmin, rmax, dmax = [
self.renorm_clipping.get(key) for key in ['rmin', 'rmax', 'dmax']
]
if rmin is not None:
r = tf.maximum(r, rmin)
if rmax is not None:
r = tf.minimum(r, rmax)
if dmax is not None:
d = tf.maximum(d, -dmax)
d = tf.minimum(d, dmax)
# When not training, use r=1, d=0.
r = control_flow_util.smart_cond(training, lambda: r,
lambda: tf.ones_like(r))
d = control_flow_util.smart_cond(training, lambda: d,
lambda: tf.zeros_like(d))
def _update_renorm_variable(var, value, inputs_size):
"""Updates a moving average and weight, returns the unbiased value."""
value = tf.identity(value)
def _do_update():
"""Updates the var, returns the updated value."""
new_var = self._assign_moving_average(var, value, self.renorm_momentum,
inputs_size)
return new_var
def _fake_update():
return tf.identity(var)
return control_flow_util.smart_cond(training, _do_update, _fake_update)
# TODO(yuefengz): colocate the operations
update_new_mean = _update_renorm_variable(self.renorm_mean, mean,
inputs_size)
update_new_stddev = _update_renorm_variable(self.renorm_stddev, stddev,
inputs_size)
# Update the inference mode moving averages with the batch value.
with tf.control_dependencies([update_new_mean, update_new_stddev]):
out_mean = tf.identity(mean)
out_variance = tf.identity(variance)
return (r, d, out_mean, out_variance)
def _calculate_mean_and_var(self, inputs, reduction_axes, keep_dims):
return tf.nn.moments(inputs, reduction_axes, keepdims=keep_dims)
def _moments(self, inputs, reduction_axes, keep_dims):
mean, variance = self._calculate_mean_and_var(inputs, reduction_axes,
keep_dims)
# TODO(b/129279393): Support zero batch input in non DistributionStrategy
# code as well.
if self._support_zero_size_input():
input_batch_size = tf.shape(inputs)[0]
mean = tf.where(input_batch_size > 0, mean, backend.zeros_like(mean))
variance = tf.where(input_batch_size > 0, variance,
backend.zeros_like(variance))
return mean, variance
def _get_training_value(self, training=None):
if training is None:
training = backend.learning_phase()
if self._USE_V2_BEHAVIOR:
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value passed from
# model.
training = False
return training
def call(self, inputs, training=None):
training = self._get_training_value(training)
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = tf.shape(inputs)
original_shape = tf.concat(
[tf.constant([-1]), original_shape[1:]], axis=0)
expanded_shape = tf.concat([
tf.constant([self.virtual_batch_size, -1]),
original_shape[1:]
], axis=0)
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = tf.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = tf.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
inputs_dtype = inputs.dtype.base_dtype
if inputs_dtype in (tf.float16, tf.bfloat16):
# Do all math in float32 if given 16-bit inputs for numeric stability.
# In particular, it's very easy for variance to overflow in float16 and
# for safety we also choose to cast bfloat16 to float32.
inputs = tf.cast(inputs, tf.float32)
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and
reduction_axes != list(range(ndims - 1))):
return tf.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = control_flow_util.constant_value(training)
if training_value == False: # pylint: disable=singleton-comparison,g-explicit-bool-comparison
mean, variance = self.moving_mean, self.moving_variance
else:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(tf.shape(inputs))
# Adjust only during training.
adj_scale = control_flow_util.smart_cond(
training, lambda: adj_scale, lambda: tf.ones_like(adj_scale))
adj_bias = control_flow_util.smart_cond(
training, lambda: adj_bias, lambda: tf.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = self._moments(
tf.cast(inputs, self._param_dtype),
reduction_axes,
keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = control_flow_util.smart_cond(
training, lambda: mean,
lambda: tf.convert_to_tensor(moving_mean))
variance = control_flow_util.smart_cond(
training, lambda: variance,
lambda: tf.convert_to_tensor(moving_variance))
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = tf.reduce_mean(mean, axis=1, keepdims=True)
new_variance = tf.reduce_mean(variance, axis=1, keepdims=True)
else:
new_mean, new_variance = mean, variance
if self._support_zero_size_input():
# Keras assumes that batch dimension is the first dimension for Batch
# Normalization.
input_batch_size = tf.shape(inputs)[0]
else:
input_batch_size = None
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
new_mean, new_variance, training, input_batch_size)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(tf.stop_gradient(r, name='renorm_r'))
d = _broadcast(tf.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
def _do_update(var, value):
"""Compute the updates for mean and variance."""
return self._assign_moving_average(var, value, self.momentum,
input_batch_size)
def mean_update():
true_branch = lambda: _do_update(self.moving_mean, new_mean)
false_branch = lambda: self.moving_mean
return control_flow_util.smart_cond(training, true_branch, false_branch)
def variance_update():
"""Update the moving variance."""
def true_branch_renorm():
# We apply epsilon as part of the moving_stddev to mirror the training
# code path.
moving_stddev = _do_update(self.moving_stddev,
tf.sqrt(new_variance + self.epsilon))
return self._assign_new_value(
self.moving_variance,
# Apply relu in case floating point rounding causes it to go
# negative.
backend.relu(moving_stddev * moving_stddev - self.epsilon))
if self.renorm:
true_branch = true_branch_renorm
else:
true_branch = lambda: _do_update(self.moving_variance, new_variance)
false_branch = lambda: self.moving_variance
return control_flow_util.smart_cond(training, true_branch, false_branch)
self.add_update(mean_update)
self.add_update(variance_update)
mean = tf.cast(mean, inputs.dtype)
variance = tf.cast(variance, inputs.dtype)
if offset is not None:
offset = tf.cast(offset, inputs.dtype)
if scale is not None:
scale = tf.cast(scale, inputs.dtype)
outputs = tf.nn.batch_normalization(inputs, _broadcast(mean),
_broadcast(variance), offset, scale,
self.epsilon)
if inputs_dtype in (tf.float16, tf.bfloat16):
outputs = tf.cast(outputs, inputs_dtype)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer':
initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer':
initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
if self.renorm:
config['renorm'] = True
config['renorm_clipping'] = self.renorm_clipping
config['renorm_momentum'] = self.renorm_momentum
if self.virtual_batch_size is not None:
config['virtual_batch_size'] = self.virtual_batch_size
# Note: adjustment is not serializable.
if self.adjustment is not None:
logging.warning('The `adjustment` function of this `BatchNormalization` '
'layer cannot be serialized and has been omitted from '
'the layer config. It will not be included when '
're-creating the layer from the saved config.')
base_config = super(BatchNormalizationBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# pylint: disable=g-classes-have-attributes
@keras_export('keras.layers.experimental.SyncBatchNormalization', v1=[])
class SyncBatchNormalization(BatchNormalizationBase):
r"""Normalize and scale inputs or activations synchronously across replicas.
Applies batch normalization to activations of the previous layer at each batch
by synchronizing the global batch statistics across all devices that are
training the model. For specific details about batch normalization please
refer to the `tf.keras.layers.BatchNormalization` layer docs.
If this layer is used when using tf.distribute strategy to train models
across devices/workers, there will be an allreduce call to aggregate batch
statistics across all replicas at every training step. Without tf.distribute
strategy, this layer behaves as a regular `tf.keras.layers.BatchNormalization`
layer.
Example usage:
```python
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(16))
model.add(tf.keras.layers.experimental.SyncBatchNormalization())
```
Args:
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using the
mean and variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using the
mean and variance of its moving statistics, learned during training.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
if kwargs.pop('fused', None):
raise ValueError(
'`fused` argument cannot be True for SyncBatchNormalization.')
# Currently we only support aggregating over the global batch size.
super(SyncBatchNormalization, self).__init__(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
fused=False,
**kwargs)
def _calculate_mean_and_var(self, x, axes, keep_dims):
with backend.name_scope('moments'):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = tf.cast(x, tf.float32) if x.dtype == tf.float16 else x
replica_ctx = tf.distribute.get_replica_context()
if replica_ctx:
local_sum = tf.reduce_sum(y, axis=axes, keepdims=True)
local_squared_sum = tf.reduce_sum(tf.square(y), axis=axes,
keepdims=True)
batch_size = tf.cast(tf.shape(y)[axes[0]],
tf.float32)
# TODO(b/163099951): batch the all-reduces once we sort out the ordering
# issue for NCCL. We don't have a mechanism to launch NCCL in the same
# order in each replica nowadays, so we limit NCCL to batch all-reduces.
y_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM, local_sum)
y_squared_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM,
local_squared_sum)
global_batch_size = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM,
batch_size)
axes_vals = [(tf.shape(y))[axes[i]]
for i in range(1, len(axes))]
multiplier = tf.cast(tf.reduce_prod(axes_vals),
tf.float32)
multiplier = multiplier * global_batch_size
mean = y_sum / multiplier
y_squared_mean = y_squared_sum / multiplier
# var = E(x^2) - E(x)^2
variance = y_squared_mean - tf.square(mean)
else:
# Compute true mean while keeping the dims for proper broadcasting.
mean = tf.reduce_mean(y, axes, keepdims=True, name='mean')
# sample variance, not unbiased variance
# Note: stop_gradient does not change the gradient that gets
# backpropagated to the mean from the variance calculation,
# because that gradient is zero
variance = tf.reduce_mean(
tf.math.squared_difference(y, tf.stop_gradient(mean)),
axes,
keepdims=True,
name='variance')
if not keep_dims:
mean = tf.squeeze(mean, axes)
variance = tf.squeeze(variance, axes)
if x.dtype == tf.float16:
return (tf.cast(mean, tf.float16),
tf.cast(variance, tf.float16))
else:
return (mean, variance)
@keras_export('keras.layers.BatchNormalization', v1=[])
class BatchNormalization(BatchNormalizationBase):
"""Layer that normalizes its inputs.
Batch normalization applies a transformation that maintains the mean output
close to 0 and the output standard deviation close to 1.
Importantly, batch normalization works differently during training and
during inference.
**During training** (i.e. when using `fit()` or when calling the layer/model
with the argument `training=True`), the layer normalizes its output using
the mean and standard deviation of the current batch of inputs. That is to
say, for each channel being normalized, the layer returns
`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:
- `epsilon` is small constant (configurable as part of the constructor
arguments)
- `gamma` is a learned scaling factor (initialized as 1), which
can be disabled by passing `scale=False` to the constructor.
- `beta` is a learned offset factor (initialized as 0), which
can be disabled by passing `center=False` to the constructor.
**During inference** (i.e. when using `evaluate()` or `predict()` or when
calling the layer/model with the argument `training=False` (which is the
default), the layer normalizes its output using a moving average of the
mean and standard deviation of the batches it has seen during training. That
is to say, it returns
`gamma * (batch - self.moving_mean) / sqrt(self.moving_var + epsilon) + beta`.
`self.moving_mean` and `self.moving_var` are non-trainable variables that
are updated each time the layer in called in training mode, as such:
- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`
- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`
As such, the layer will only normalize its inputs during inference
*after having been trained on data that has similar statistics as the
inference data*.
Args:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using the mean and
variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using the mean and
variance of its moving statistics, learned during training.
Input shape:
Arbitrary. Use the keyword argument `input_shape` (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape:
Same shape as input.
Reference:
- [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).
**About setting `layer.trainable = False` on a `BatchNormalization` layer:**
The meaning of setting `layer.trainable = False` is to freeze the layer,
i.e. its internal state will not change during training:
its trainable weights will not be updated
during `fit()` or `train_on_batch()`, and its state updates will not be run.
Usually, this does not necessarily mean that the layer is run in inference
mode (which is normally controlled by the `training` argument that can
be passed when calling a layer). "Frozen state" and "inference mode"
are two separate concepts.
However, in the case of the `BatchNormalization` layer, **setting
`trainable = False` on the layer means that the layer will be
subsequently run in inference mode** (meaning that it will use
the moving mean and the moving variance to normalize the current batch,
rather than using the mean and variance of the current batch).
This behavior has been introduced in TensorFlow 2.0, in order
to enable `layer.trainable = False` to produce the most commonly
expected behavior in the convnet fine-tuning use case.
Note that:
- Setting `trainable` on an model containing other layers will
recursively set the `trainable` value of all inner layers.
- If the value of the `trainable`
attribute is changed after calling `compile()` on a model,
the new value doesn't take effect for this model
until `compile()` is called again.
"""
_USE_V2_BEHAVIOR = True
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(BatchNormalization, self).__init__(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
**kwargs)
| 54,834 | 42.519841 | 98 | py |
keras | keras-master/keras/layers/legacy_rnn/rnn_cell_impl.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import collections
import warnings
from keras import activations
from keras import backend
from keras import initializers
from keras.engine import base_layer_utils
from keras.engine import input_spec
from keras.layers.legacy_rnn import rnn_cell_wrapper_impl
from keras.legacy_tf_layers import base as base_layer
from keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
# This can be used with self.assertRaisesRegexp for assert_like_rnncell.
ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell"
def _hasattr(obj, attr_name):
try:
getattr(obj, attr_name)
except AttributeError:
return False
else:
return True
def assert_like_rnncell(cell_name, cell):
"""Raises a TypeError if cell is not like an RNNCell.
NOTE: Do not rely on the error message (in particular in tests) which can be
subject to change to increase readability. Use
ASSERT_LIKE_RNNCELL_ERROR_REGEXP.
Args:
cell_name: A string to give a meaningful error referencing to the name of
the functionargument.
cell: The object which should behave like an RNNCell.
Raises:
TypeError: A human-friendly exception.
"""
conditions = [
_hasattr(cell, "output_size"),
_hasattr(cell, "state_size"),
_hasattr(cell, "get_initial_state") or _hasattr(cell, "zero_state"),
callable(cell),
]
errors = [
"'output_size' property is missing", "'state_size' property is missing",
"either 'zero_state' or 'get_initial_state' method is required",
"is not callable"
]
if not all(conditions):
errors = [error for error, cond in zip(errors, conditions) if not cond]
raise TypeError("The argument {!r} ({}) is not an RNNCell: {}.".format(
cell_name, cell, ", ".join(errors)))
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, tf.Tensor):
p = prefix
p_static = tf.get_static_value(prefix)
if p.shape.ndims == 0:
p = tf.compat.v1.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError(
"Prefix tensor must be either a scalar or vector, "
f"but received tensor: {p}")
else:
p = tf.TensorShape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (
tf.constant(p.as_list(), dtype=tf.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, tf.Tensor):
s = suffix
s_static = tf.get_static_value(suffix)
if s.shape.ndims == 0:
s = tf.compat.v1.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
f"but received tensor: {s}")
else:
s = tf.TensorShape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (
tf.constant(s.as_list(), dtype=tf.int32)
if s.is_fully_defined() else None)
if static:
shape = tf.TensorShape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError(
"Prefix or suffix can't be None. "
f"Received prefix = {prefix} and suffix = {suffix}")
shape = tf.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = tf.zeros(c, dtype=dtype)
if not tf.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return tf.nest.map_structure(get_state_shape, state_size)
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.RNNCell"])
@tf_export(v1=["nn.rnn_cell.RNNCell"])
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super(RNNCell, self).__init__(
trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the slight
# difference between TF and Keras RNN cell. Notably the state is not wrapped
# in a list for TF cell where they are single tensor state, whereas keras
# cell will wrap the state into a list, and call() will have to unwrap them.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple with
shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with tf.compat.v1.variable_scope(
scope, custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
scope_attrname = "rnncell_scope"
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), custom_getter=self._rnn_get_variable)
setattr(self, scope_attrname, scope)
with scope:
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
if tf.compat.v1.executing_eagerly_outside_functions():
trainable = variable.trainable
else:
trainable = (
variable in tf.compat.v1.trainable_variables() or
(base_layer_utils.is_split_variable(variable) and
list(variable)[0] in tf.compat.v1.trainable_variables()))
if trainable and all(variable is not v for v in self._trainable_weights):
self._trainable_weights.append(variable)
elif not trainable and all(
variable is not v for v in self._non_trainable_weights):
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if provided.
inputs = tf.convert_to_tensor(inputs, name="inputs")
if batch_size is not None:
if tf.is_tensor(batch_size):
static_batch_size = tf.get_static_value(
batch_size, partial=True)
else:
static_batch_size = batch_size
if inputs.shape.dims[0].value != static_batch_size:
raise ValueError(
"batch size from input tensor is different from the "
f"input param. Input tensor batch: {inputs.shape.dims[0].value}, "
f"batch_size: {batch_size}")
if dtype is not None and inputs.dtype != dtype:
raise ValueError(
"dtype from input tensor is different from the "
f"input param. Input tensor dtype: {inputs.dtype}, dtype: {dtype}")
batch_size = inputs.shape.dims[0].value or tf.compat.v1.shape(inputs)[0]
dtype = inputs.dtype
if batch_size is None or dtype is None:
raise ValueError(
"batch_size and dtype cannot be None while constructing initial "
f"state: batch_size={batch_size}, dtype={dtype}")
return self.zero_state(batch_size, dtype)
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = tf.executing_eagerly()
if is_eager and _hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and last_dtype == dtype and
last_state_size == state_size):
return last_output
with backend.name_scope(type(self).__name__ + "ZeroState"):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
# TODO(b/134773139): Remove when contrib RNN cells implement `get_config`
def get_config(self): # pylint: disable=useless-super-delegation
return super(RNNCell, self).get_config()
@property
def _use_input_spec_as_call_signature(self):
# We do not store the shape information for the state argument in the call
# function for legacy RNN cells, so do not generate an input signature.
return False
class LayerRNNCell(RNNCell):
"""Subclass of RNNCells that act like proper `tf.Layer` objects.
For backwards compatibility purposes, most `RNNCell` instances allow their
`call` methods to instantiate variables via `tf.compat.v1.get_variable`. The
underlying
variable scope thus keeps track of any variables, and returning cached
versions. This is atypical of `tf.layer` objects, which separate this
part of layer building into a `build` method that is only called once.
Here we provide a subclass for `RNNCell` objects that act exactly as
`Layer` objects do. They must provide a `build` method and their
`call` methods do not access Variables `tf.compat.v1.get_variable`.
"""
def __call__(self, inputs, state, scope=None, *args, **kwargs):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple with
shapes `[batch_size, s] for s in self.state_size`.
scope: optional cell scope.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
# Bypass RNNCell's variable capturing semantics for LayerRNNCell.
# Instead, it is up to subclasses to provide a proper build
# method. See the class docstring for more details.
return base_layer.Layer.__call__(
self, inputs, state, scope=scope, *args, **kwargs)
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.BasicRNNCell"])
@tf_export(v1=["nn.rnn_cell.BasicRNNCell"])
class BasicRNNCell(LayerRNNCell):
"""The most basic RNN cell.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
warnings.warn("`tf.nn.rnn_cell.BasicRNNCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.SimpleRNNCell`, "
"and will be replaced by that in Tensorflow 2.0.")
super(BasicRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}")
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=tf.compat.v1.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = tf.matmul(
tf.concat([inputs, state], 1), self._kernel)
gate_inputs = tf.nn.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.GRUCell"])
@tf_export(v1=["nn.rnn_cell.GRUCell"])
class GRUCell(LayerRNNCell):
"""Gated Recurrent Unit cell.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or
`tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU.
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
References:
Learning Phrase Representations using RNN Encoder Decoder for Statistical
Machine Translation:
[Cho et al., 2014]
(https://aclanthology.coli.uni-saarland.de/papers/D14-1179/d14-1179)
([pdf](http://emnlp2014.org/papers/pdf/EMNLP2014179.pdf))
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None,
**kwargs):
warnings.warn("`tf.nn.rnn_cell.GRUCell` is deprecated and will be removed "
"in a future version. This class "
"is equivalent as `tf.keras.layers.GRUCell`, "
"and will be replaced by that in Tensorflow 2.0.")
super(GRUCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnGRU for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}")
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._gate_kernel = self.add_variable(
"gates/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, 2 * self._num_units],
initializer=self._kernel_initializer)
self._gate_bias = self.add_variable(
"gates/%s" % _BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
tf.compat.v1.constant_initializer(1.0, dtype=self.dtype)))
self._candidate_kernel = self.add_variable(
"candidate/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units],
initializer=self._kernel_initializer)
self._candidate_bias = self.add_variable(
"candidate/%s" % _BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
tf.compat.v1.zeros_initializer(dtype=self.dtype)))
self.built = True
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = tf.matmul(
tf.concat([inputs, state], 1), self._gate_kernel)
gate_inputs = tf.nn.bias_add(gate_inputs, self._gate_bias)
value = tf.sigmoid(gate_inputs)
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = tf.matmul(
tf.concat([inputs, r_state], 1), self._candidate_kernel)
candidate = tf.nn.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
def get_config(self):
config = {
"num_units": self._num_units,
"kernel_initializer": initializers.serialize(self._kernel_initializer),
"bias_initializer": initializers.serialize(self._bias_initializer),
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.LSTMStateTuple"])
@tf_export(v1=["nn.rnn_cell.LSTMStateTuple"])
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
and `h` is the output.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent dtypes for internal state: "
f"{c.dtype} vs {h.dtype}")
return c.dtype
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.BasicLSTMCell"])
@tf_export(v1=["nn.rnn_cell.BasicLSTMCell"])
class BasicLSTMCell(LayerRNNCell):
"""DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead.
Basic LSTM recurrent network cell.
The implementation is based on
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell`
that follows.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
def __init__(self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above). Must set
to `0.0` manually when restoring from CudnnLSTM-trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, must use
`CudnnCompatibleLSTMCell` instead.
"""
warnings.warn("`tf.nn.rnn_cell.BasicLSTMCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.LSTMCell`, "
"and will be replaced by that in Tensorflow 2.0.")
super(BasicLSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warning(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}")
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=tf.compat.v1.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size,
num_units]`, if `state_is_tuple` has been set to `True`. Otherwise, a
`Tensor` shaped `[batch_size, 2 * num_units]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
_check_rnn_cell_input_dtypes([inputs, state])
sigmoid = tf.sigmoid
one = tf.constant(1, dtype=tf.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = tf.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = tf.matmul(
tf.concat([inputs, h], 1), self._kernel)
gate_inputs = tf.nn.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = tf.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = tf.add
multiply = tf.multiply
new_c = add(
multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat([new_c, new_h], 1)
return new_h, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.LSTMCell"])
@tf_export(v1=["nn.rnn_cell.LSTMCell"])
class LSTMCell(LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on (Gers et al., 1999).
The peephole implementation is based on (Sak et al., 2014).
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
References:
Long short-term memory recurrent neural network architectures for large
scale acoustic modeling:
[Sak et al., 2014]
(https://www.isca-speech.org/archive/interspeech_2014/i14_0338.html)
([pdf]
(https://www.isca-speech.org/archive/archive_papers/interspeech_2014/i14_0338.pdf))
Learning to forget:
[Gers et al., 1999]
(http://digital-library.theiet.org/content/conferences/10.1049/cp_19991218)
([pdf](https://arxiv.org/pdf/1409.2329.pdf))
Long Short-Term Memory:
[Hochreiter et al., 1997]
(https://www.mitpressjournals.org/doi/abs/10.1162/neco.1997.9.8.1735)
([pdf](http://ml.jku.at/publications/older/3504.pdf))
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1 in
order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from CudnnLSTM
trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
warnings.warn("`tf.nn.rnn_cell.LSTMCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.LSTMCell`, "
"and will be replaced by that in Tensorflow 2.0.")
super(LSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warning(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warning(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializers.get(initializer)
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units +
num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 *
num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
"received shape: {inputs_shape}")
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units if self._num_proj is None else self._num_proj
maybe_partitioner = (
tf.compat.v1.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None else None)
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
initializer=self._initializer,
partitioner=maybe_partitioner)
if self.dtype is None:
initializer = tf.compat.v1.zeros_initializer
else:
initializer = tf.compat.v1.zeros_initializer(dtype=self.dtype)
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=initializer)
if self._use_peepholes:
self._w_f_diag = self.add_variable(
"w_f_diag", shape=[self._num_units], initializer=self._initializer)
self._w_i_diag = self.add_variable(
"w_i_diag", shape=[self._num_units], initializer=self._initializer)
self._w_o_diag = self.add_variable(
"w_o_diag", shape=[self._num_units], initializer=self._initializer)
if self._num_proj is not None:
maybe_proj_partitioner = (
tf.compat.v1.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None else None)
self._proj_kernel = self.add_variable(
"projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[self._num_units, self._num_proj],
initializer=self._initializer,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = tf.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError(
"Could not infer input size from inputs.get_shape()[-1]."
f"Received input shape: {inputs.get_shape()}")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = tf.matmul(
tf.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = tf.nn.bias_add(lstm_matrix, self._bias)
i, j, f, o = tf.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = tf.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = tf.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
LSTMStateTuple(c, m)
if self._state_is_tuple else tf.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class _RNNCellWrapperV1(RNNCell):
"""Base class for cells wrappers V1 compatibility.
This class along with `_RNNCellWrapperV2` allows to define cells wrappers that
are compatible with V1 and V2, and defines helper methods for this purpose.
"""
def __init__(self, cell, *args, **kwargs):
super(_RNNCellWrapperV1, self).__init__(*args, **kwargs)
assert_like_rnncell("cell", cell)
self.cell = cell
if isinstance(cell, tf.__internal__.tracking.Trackable):
self._track_trackable(self.cell, name="cell")
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Calls the wrapped cell and performs the wrapping logic.
This method is called from the wrapper's `call` or `__call__` methods.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
raise NotImplementedError
def __call__(self, inputs, state, scope=None):
"""Runs the RNN cell step computation.
We assume that the wrapped RNNCell is being built within its `__call__`
method. We directly use the wrapped cell's `__call__` in the overridden
wrapper `__call__` method.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `__call__`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
scope: VariableScope for the subgraph created in the wrapped cells'
`__call__`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope)
def get_config(self):
config = {
"cell": {
"class_name": self.cell.__class__.__name__,
"config": self.cell.get_config()
},
}
base_config = super(_RNNCellWrapperV1, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
cell = config.pop("cell")
try:
assert_like_rnncell("cell", cell)
return cls(cell, **config)
except TypeError:
raise ValueError("RNNCellWrapper cannot reconstruct the wrapped cell. "
"Please overwrite the cell in the config with a RNNCell "
"instance.")
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.DropoutWrapper"])
@tf_export(v1=["nn.rnn_cell.DropoutWrapper"])
class DropoutWrapper(rnn_cell_wrapper_impl.DropoutWrapperBase,
_RNNCellWrapperV1):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DropoutWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.DropoutWrapperBase.__init__.__doc__
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.ResidualWrapper"])
@tf_export(v1=["nn.rnn_cell.ResidualWrapper"])
class ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase,
_RNNCellWrapperV1):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(ResidualWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.DeviceWrapper"])
@tf_export(v1=["nn.rnn_cell.DeviceWrapper"])
class DeviceWrapper(rnn_cell_wrapper_impl.DeviceWrapperBase,
_RNNCellWrapperV1):
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DeviceWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.MultiRNNCell"])
@tf_export(v1=["nn.rnn_cell.MultiRNNCell"])
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells.
Example:
```python
num_units = [128, 64]
cells = [BasicLSTMCell(num_units=n) for n in num_units]
stacked_rnn_cell = MultiRNNCell(cells)
```
"""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all concatenated along the
column axis. This latter behavior will soon be deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
logging.warning("`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class "
"is equivalent as `tf.keras.layers.StackedRNNCells`, "
"and will be replaced by that in Tensorflow 2.0.")
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not tf.nest.is_nested(cells):
raise TypeError(f"cells must be a list or tuple, but received: {cells}.")
if len(set(id(cell) for cell in cells)) < len(cells):
logging.log_first_n(
logging.WARN, "At least two cells provided to MultiRNNCell "
"are the same object and will share weights.", 1)
self._cells = cells
for cell_number, cell in enumerate(self._cells):
# Add Trackable dependencies on these cells so their variables get
# saved with this object when using object-based saving.
if isinstance(cell, tf.__internal__.tracking.Trackable):
# TODO(allenl): Track down non-Trackable callers.
self._track_trackable(cell, name="cell-%d" % (cell_number,))
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(tf.nest.is_nested(c.state_size) for c in self._cells):
raise ValueError(
"Some cells return tuples of states, but the flag "
"state_is_tuple is not set. "
f"State sizes are: {[c.state_size for c in self._cells]}")
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum(cell.state_size for cell in self._cells)
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with backend.name_scope(type(self).__name__ + "ZeroState"):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.compat.v1.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not tf.nest.is_nested(state):
raise ValueError(
f"Expected state to be a tuple of length {len(self.state_size)}"
f", but received: {state}")
cur_state = state[i]
else:
cur_state = tf.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (
tuple(new_states) if self._state_is_tuple else tf.concat(
new_states, 1))
return cur_inp, new_states
def _check_rnn_cell_input_dtypes(inputs):
"""Check whether the input tensors are with supported dtypes.
Default RNN cells only support floats and complex as its dtypes since the
activation function (tanh and sigmoid) only allow those types. This function
will throw a proper error message if the inputs is not in a supported type.
Args:
inputs: tensor or nested structure of tensors that are feed to RNN cell as
input or state.
Raises:
ValueError: if any of the input tensor are not having dtypes of float or
complex.
"""
for t in tf.nest.flatten(inputs):
_check_supported_dtypes(t.dtype)
def _check_supported_dtypes(dtype):
if dtype is None:
return
dtype = tf.as_dtype(dtype)
if not (dtype.is_floating or dtype.is_complex):
raise ValueError("RNN cell only supports floating point inputs, "
f"but received dtype: {dtype}")
| 53,998 | 38.21496 | 89 | py |
keras | keras-master/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module contains the implementation of RNN cell wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import hashlib
import numbers
import sys
import types as python_types
import warnings
from keras.utils import generic_utils
class DropoutWrapperBase:
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self,
cell,
input_keep_prob=1.0,
output_keep_prob=1.0,
state_keep_prob=1.0,
variational_recurrent=False,
input_size=None,
dtype=None,
seed=None,
dropout_state_filter_visitor=None,
**kwargs):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
[A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell. **Note**
the state components to which dropout is applied when `state_keep_prob`
is in `(0, 1)` are also determined by the argument
`dropout_state_filter_visitor` (e.g. by default dropout is never applied
to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same dropout
pattern is applied across all time steps per run call. If this parameter
is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff** `variational_recurrent
= True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns a scalar or
depth=1 structure of Python booleans describing which terms in the state
should be dropped out. In addition, if the function returns `True`,
dropout is applied across this sublevel. If the function returns
`False`, dropout is not applied across this entire sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects: ```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState): # Never perform dropout on the c
state. return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray): return False return True ```
**kwargs: dict of keyword arguments for base layer.
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
super(DropoutWrapperBase, self).__init__(cell, dtype=dtype, **kwargs)
if (dropout_state_filter_visitor is not None and
not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable. "
f"Received: {dropout_state_filter_visitor}")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with tf.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = tf.convert_to_tensor(v)
const_value = tf.get_static_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError(
f"Parameter {attr} must be between 0 and 1. "
"Received {const_prob}")
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set variational_recurrent, seed before running the code below
self._variational_recurrent = variational_recurrent
self._input_size = input_size
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return tf.concat(([1], tf.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return tf.random.uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def wrapped_cell(self):
return self.cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def build(self, inputs_shape):
self.cell.build(inputs_shape)
self.built = True
def zero_state(self, batch_size, dtype):
with tf.name_scope(type(self).__name__ + "ZeroState"):
return self.cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, unused_index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
ret = tf.divide(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self,
values,
salt_prefix,
recurrent_noise,
keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return tf.nn.dropout(
v, rate=1. - keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input", self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = tf.__internal__.nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state", self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output", self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
def get_config(self):
"""Returns the config of the dropout wrapper."""
config = {
"input_keep_prob": self._input_keep_prob,
"output_keep_prob": self._output_keep_prob,
"state_keep_prob": self._state_keep_prob,
"variational_recurrent": self._variational_recurrent,
"input_size": self._input_size,
"seed": self._seed,
}
if self._dropout_state_filter != _default_dropout_state_filter_visitor:
function, function_type, function_module = _serialize_function_to_config(
self._dropout_state_filter)
config.update({"dropout_fn": function,
"dropout_fn_type": function_type,
"dropout_fn_module": function_module})
base_config = super(DropoutWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "dropout_fn" in config:
config = config.copy()
dropout_state_filter = _parse_config_to_function(
config, custom_objects, "dropout_fn", "dropout_fn_type",
"dropout_fn_module")
config.pop("dropout_fn")
config["dropout_state_filter_visitor"] = dropout_state_filter
return super(DropoutWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
class ResidualWrapperBase:
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None, **kwargs):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
**kwargs: dict of keyword arguments for base layer.
"""
super(ResidualWrapperBase, self).__init__(cell, **kwargs)
self._residual_fn = residual_fn
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with tf.name_scope(type(self).__name__ + "ZeroState"):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
tf.nest.assert_same_structure(inputs, outputs)
tf.nest.map_structure(assert_shape_match, inputs, outputs)
return tf.nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
def get_config(self):
"""Returns the config of the residual wrapper."""
if self._residual_fn is not None:
function, function_type, function_module = _serialize_function_to_config(
self._residual_fn)
config = {
"residual_fn": function,
"residual_fn_type": function_type,
"residual_fn_module": function_module
}
else:
config = {}
base_config = super(ResidualWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "residual_fn" in config:
config = config.copy()
residual_function = _parse_config_to_function(config, custom_objects,
"residual_fn",
"residual_fn_type",
"residual_fn_module")
config["residual_fn"] = residual_function
return super(ResidualWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
class DeviceWrapperBase:
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device, **kwargs):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
**kwargs: dict of keyword arguments for base layer.
"""
super(DeviceWrapperBase, self).__init__(cell, **kwargs)
self._device = device
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with tf.name_scope(type(self).__name__ + "ZeroState"):
with tf.compat.v1.device(self._device):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell on specified device."""
with tf.compat.v1.device(self._device):
return cell_call_fn(inputs, state, **kwargs)
def get_config(self):
config = {"device": self._device}
base_config = super(DeviceWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(function):
"""Serialize the function for get_config()."""
if isinstance(function, python_types.LambdaType):
output = generic_utils.func_dump(function)
output_type = "lambda"
module = function.__module__
elif callable(function):
output = function.__name__
output_type = "function"
module = function.__module__
else:
raise ValueError(
f"Unrecognized function type for input: {type(function)}")
return output, output_type, module
def _parse_config_to_function(config, custom_objects, func_attr_name,
func_type_attr_name, module_attr_name):
"""Reconstruct the function from the config."""
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn("{} is not loaded, but a layer uses it. "
"It may cause errors.".format(module), UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == "function":
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name="function in wrapper")
elif function_type == "lambda":
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
else:
raise TypeError(
f"Unknown function type received: {function_type}. "
"Expected types are ['function', 'lambda']")
return function
def _default_dropout_state_filter_visitor(substate):
from keras.layers.legacy_rnn.rnn_cell_impl import LSTMStateTuple # pylint: disable=g-import-not-at-top
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tf.TensorArray):
return False
return True
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return tf.__internal__.nest.map_structure_up_to(shallow_structure, enumerated_fn, *args,
**kwargs)
| 20,366 | 38.701754 | 105 | py |
keras | keras-master/keras/layers/preprocessing/reduction.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras reduction layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
from keras.engine.base_layer import Layer
from tensorflow.python.platform import tf_logging as logging
def get_reduce_op(reduction_str):
"""Translate a reduction string name to a reduction op."""
if reduction_str == "max":
return tf.reduce_max
elif reduction_str == "mean":
return tf.reduce_mean
elif reduction_str == "min":
return tf.reduce_min
elif reduction_str == "prod":
return tf.reduce_prod
elif reduction_str == "sum":
return tf.reduce_sum
else:
raise ValueError("Reduction %s is not supported for unweighted inputs." %
reduction_str)
class Reduction(Layer):
"""Performs an optionally-weighted reduction.
This layer performs a reduction across one axis of its input data. This
data may optionally be weighted by passing in an identical float tensor.
Args:
reduction: The type of reduction to perform. Can be one of the following:
"max", "mean", "min", "prod", or "sum". This layer uses the Tensorflow
reduce op which corresponds to that reduction (so, for "mean", we use
"reduce_mean").
axis: The axis to reduce along. Defaults to '-2', which is usually the axis
that contains embeddings (but is not within the embedding itself).
Input shape:
A tensor of 2 or more dimensions of any numeric dtype.
Output:
A tensor of 1 less dimension than the input tensor, of the same dtype.
Call arguments:
inputs: The data to reduce.
weights: An optional tensor or constant of the same shape as inputs that
will weight the input data before it is reduced.
"""
# TODO(momernick): Add example here.
def __init__(self, reduction, axis=-2, **kwargs):
self.reduction = reduction
self.axis = axis
# We temporarily turn off autocasting, as it does not apply to named call
# kwargs.
super(Reduction, self).__init__(**kwargs)
def call(self, inputs, weights=None):
# If we are not weighting the inputs we can immediately reduce the data
# and return it.
if weights is None:
return get_reduce_op(self.reduction)(inputs, axis=self.axis)
# TODO(momernick): Add checks for this and a decent error message if the
# weight shape isn't compatible.
if weights.shape.rank + 1 == inputs.shape.rank:
weights = tf.expand_dims(weights, -1)
weighted_inputs = tf.multiply(inputs, weights)
# Weighted sum and prod can be expressed as reductions over the weighted
# values, as can min and max.
if self.reduction in ("sum", "prod", "min", "max"):
return get_reduce_op(self.reduction)(weighted_inputs, axis=self.axis)
# Weighted mean is a bit more complicated: we have to do a sum of the
# weighted values and divide by the sum of the weights.
if self.reduction == "mean":
input_sum = tf.reduce_sum(weighted_inputs, axis=self.axis)
weight_sum = tf.reduce_sum(weights, axis=self.axis)
return tf.divide(input_sum, weight_sum)
# sqrtn is also more complicated: it's like mean but with a normalized
# divisor.
if self.reduction == "sqrtn":
logging.warning("Reduction `sqrtn` is deprecated and will be removed "
"2021-01-01. Please use the `sum` reduction and divide "
"the output by the normalized weights instead.")
input_sum = tf.reduce_sum(weighted_inputs, axis=self.axis)
squared_weights = tf.pow(weights, 2)
squared_weights_sum = tf.reduce_sum(squared_weights, axis=self.axis)
sqrt_weights_sum = tf.sqrt(squared_weights_sum)
return tf.divide(input_sum, sqrt_weights_sum)
raise ValueError("%s is not a supported weighted reduction." %
self.reduction)
| 4,492 | 38.412281 | 80 | py |
keras | keras-master/keras/layers/preprocessing/index_lookup.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras index lookup preprocessing layer."""
# pylint: disable=g-classes-have-attributes
import collections
from keras import backend
from keras.engine import base_layer_utils
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import category_encoding
from keras.saving.saved_model import layer_serialization
from keras.utils import layer_utils
from keras.utils import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
INT = "int"
MULTI_HOT = "multi_hot"
ONE_HOT = "one_hot"
COUNT = "count"
TF_IDF = "tf_idf"
_VOCAB_NAME = "vocab"
_IDF_WEIGHTS_NAME = "idf_weights"
class NullInitializer(tf.lookup.KeyValueTensorInitializer):
"""A placeholder initializer for restoring this layer from a SavedModel."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = key_dtype
self._value_dtype = value_dtype
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
pass
class VocabWeightHandler(base_layer_utils.TrackableWeightHandler):
"""Adds the vocabulary as a layer weight during serialization."""
def __init__(self, lookup_layer):
self._layer = lookup_layer
self._dtype = lookup_layer.dtype
self._distribute_strategy = tf.distribute.get_strategy()
@property
def num_tensors(self):
return 1
def set_weights(self, weights):
tokens = tf.convert_to_tensor(weights[0], self._dtype)
self._layer.lookup_table = self._layer._lookup_table_from_tokens(tokens) # pylint: disable=protected-access
def get_tensors(self):
# Just save the non-config part of the vocab (no special tokens).
tokens = self._layer.get_vocabulary(include_special_tokens=False)
tokens = tf.convert_to_tensor(tokens, self._dtype)
return [tokens]
class IndexLookup(base_preprocessing_layer.PreprocessingLayer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output via
a table-based lookup, with optional out-of-vocabulary handling. This is the
basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this size
includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0. In
other output modes, the token will not appear in the vocabulary and
instances of the mask token in the input will be dropped. If set to None,
no mask term will be added.
oov_token: Only used when `invert` is True. The token to return for OOV
indices.
vocabulary: Optional. Either an array or a string path to a text file. If
passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor
containing the vocbulary terms. If passing a file path, the file should
contain one line per term in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to `"int"`.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
`"tf_idf"` configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new dimension for
the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable to `"multi_hot"` and `"count"` output
modes. If True, returns a `SparseTensor` instead of a dense `Tensor`.
Defaults to False.
"""
def __init__(self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError("If set, `max_tokens` must be greater than 1. "
"You passed `max_tokens={}`".format(max_tokens))
if pad_to_max_tokens and max_tokens is None:
raise ValueError("If pad_to_max_tokens is True, must set `max_tokens`. "
"You passed `max_tokens={}`".format(max_tokens))
if num_oov_indices < 0:
raise ValueError("`num_oov_indices` must be greater than or equal to 0. "
"You passed {}".format(num_oov_indices))
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
if output_mode == "tf-idf":
output_mode = TF_IDF
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF),
layer_name=self.__class__.__name__,
arg_name="output_mode")
if invert and output_mode != INT:
raise ValueError("`output_mode` must be {} when `invert` is true. You "
"passed {}".format(INT, output_mode))
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.input_vocabulary = None
# IndexLookupLayerSavedModelSaver will clear the config config vocabulary to
# restore the lookup table ops directly. We persist this hidden option to
# persist the fact that we have have a non-adaptable layer with a manually
# set vocabulary.
self._has_input_vocabulary = kwargs.pop("has_input_vocabulary", False)
self._frozen_vocab_size = None
# Drop deprecated config options.
kwargs.pop("vocabulary_size", None)
kwargs.pop("has_static_table", None)
super().__init__(**kwargs)
if invert:
self._key_dtype = tf.int64
self._value_dtype = tf.as_dtype(self.dtype)
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = tf.as_dtype(self.dtype)
self._value_dtype = tf.int64
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max ints
# will be dropped from the bincount op.
mask_value = 0 if self.output_mode == INT else tf.int64.max
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error out
# during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the default
# value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we hav multiple OOV values, we need to do a further hashing step;
# to make this easier, we set the OOV value to -1. (This lets us do a
# vectorized add and cast to boolean to determine locations where we
# need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(mask_value, self._value_dtype)
if self.output_mode == TF_IDF:
self.idf_weights = tf.Variable(
[0] * self._token_start_index(),
shape=(None,),
dtype=backend.floatx(),
trainable=False)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary)
else:
# When restoring from a keras SavedModel, the loading code will expect to
# find and restore a lookup_table attribute on the layer. This table needs
# to be uninitialized as a StaticHashTable cannot be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
if not self._has_input_vocabulary:
# Add a custom weight handler to return the layers vocab as it's weight.
self._add_trackable(VocabWeightHandler(self), False)
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=self.dtype, value_dtype=tf.int64, default_value=0)
if self.output_mode == TF_IDF:
self.token_document_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=self.dtype, value_dtype=tf.int64, default_value=0)
self.num_documents = tf.Variable(0, dtype=tf.int64, trainable=False)
def compute_output_shape(self, input_shape):
if self.output_mode == INT:
return input_shape
if self.pad_to_max_tokens:
out_depth = self.max_tokens
else:
out_depth = self.vocabulary_size()
return tf.TensorShape([input_shape[0], out_depth])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = (
self._value_dtype if self.output_mode == INT else backend.floatx())
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If True, the returned vocabulary will include mask
and OOV tokens, and a term's index in the vocabulary will equal the
term's index when calling the layer. If False, the returned vocabulary
will not include any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (self._tensor_vocab_to_numpy(vocab), indices.numpy())
lookup = collections.defaultdict(lambda: self.oov_token,
zip(indices, vocab))
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == INT:
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index():]
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the voculary, including optional mask and oov indices.
"""
return int(self.lookup_table.size().numpy()) + self._token_start_index()
def vocab_size(self):
logging.warning("vocab_size is deprecated, please use vocabulary_size.")
return self.vocabulary_size()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary": self._make_serializable(self.input_vocabulary),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocabulary: Either an array or a string path to a text file. If passing an
array, can pass a tuple, list, 1D numpy array, or 1D tensor containing
the vocbulary terms. If passing a file path, the file should contain one
line per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary. Must be set
if `output_mode` is `"tf_idf"`. Should not be set otherwise.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when `"multi_hot"`, `"count"`, and `"tf_idf"`
modes, if `pad_to_max_tokens` is False and the layer itself has already
been called.
RuntimeError: If a tensor vocabulary is passed outside of eager execution.
"""
self.input_vocabulary = vocabulary
self._has_input_vocabulary = True
if self.output_mode != TF_IDF and idf_weights is not None:
raise ValueError("`idf_weights` should only be set if output_mode is "
"TF_IDF. output_mode is {}.".format(self.output_mode))
if isinstance(vocabulary, str):
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
"Vocabulary file {} does not exist.".format(vocabulary))
if self.output_mode == TF_IDF:
raise ValueError("output_mode `'tf_idf'` does not support loading a "
"vocabulary from file.")
self.lookup_table = self._lookup_table_from_file(vocabulary)
return
if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or
tf.is_tensor(idf_weights)):
raise RuntimeError(
"Cannot set a tensor vocabulary on {} layer {} when not executing "
"eagerly. Create this layer or call `set_vocabulary` outside of "
"any `tf.function`s and with eager execution enabled.".format(
self.__class__.__name__, self.name))
# TODO(mattdangerw): for better performance we should rewrite this entire
# function to operate on tensors and convert vocabulary to a tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary, you passed {}.".format(vocabulary))
oov_start = self._oov_start_index()
token_start = self._token_start_index()
should_have_mask = (oov_start > 0)
has_mask = should_have_mask and vocabulary[0] == self.mask_token
should_have_oov = (self.num_oov_indices > 0)
expected_oov = [self.oov_token] * self.num_oov_indices
found_oov = vocabulary[oov_start:token_start]
has_oov = should_have_oov and np.array_equal(found_oov, expected_oov)
if all([should_have_mask, has_mask, should_have_oov]) and not has_oov:
raise ValueError(
"Invalid vocabulary format. The layer was created with "
"`mask_token={mask}` and `oov_token={oov}`. These tokens should be "
"included in the provided vocabulary. The passed vocabulary has the "
"correct mask token `{mask}` at index 0, but does not have the OOV "
"token `{oov}` in indices [{start}:{end}]. Instead, we found "
"`{found}`. Was this vocabulary generated by a layer with "
"incompatible settings?".format(
mask=self.mask_token,
oov=self.oov_token,
start=oov_start,
end=token_start,
found=found_oov))
if all([should_have_oov, has_oov, should_have_mask]) and not has_mask:
raise ValueError(
"Invalid vocabulary format. The layer was created with "
"`mask_token={mask}` and `oov_token={oov}`. These tokens should be "
"included in the provided vocabulary. The passed vocabulary has the "
"correct OOV token `{oov}` at indices [{start}:{end}], but does not "
"have the mask token `{mask}` in index 0. Instead, we found "
"`{found}`. Was this vocabulary generated by a layer with "
"incompatible settings?".format(
mask=self.mask_token,
oov=self.oov_token,
start=oov_start,
end=token_start,
found=vocabulary[0]))
found_special_tokens = has_oov or has_mask
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError("The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
"are {}".format(repeated_tokens))
if self.mask_token in tokens:
mask_index = np.argwhere(tokens == self.mask_token)[0]
raise ValueError("Reserved mask token {} was found in the passed "
"vocabulary at index {}. Please either remove the "
"reserved token from the vocabulary or change the "
"mask token for this layer.".format(
self.mask_token, mask_index))
if self.oov_token in tokens:
oov_index = np.argwhere(tokens == self.oov_token)[0]
raise ValueError("Reserved OOV token {} was found in the passed "
"vocabulary at index {}. Please either remove the "
"reserved token from the vocabulary or change the "
"OOV token for this layer.".format(
self.oov_token, oov_index))
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab size. "
"Passed vocab size is {}, max vocab size is {}.".format(
new_vocab_size, self.max_tokens))
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == TF_IDF:
if idf_weights is None:
raise ValueError("`idf_weights` must be set if output_mode is TF_IDF")
if len(vocabulary) != len(idf_weights):
raise ValueError("`idf_weights` must be the same length as vocabulary. "
"len(idf_weights) is {}, len(vocabulary) is {}".format(
len(vocabulary), len(idf_weights)))
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array, but received {}".format(
type(idf_weights)))
# If the passed vocabulary has no special tokens, we need to pad the front
# of idf_weights. We don't have real document frequencies for these tokens
# so we will use an average of all idf_weights passed in as a reasonable
# default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our total
# vocab size, we need to pad the back of idf_weights with zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = self.max_tokens - front_padding - len(idf_weights)
else:
back_padding = 0
weights = np.pad(
idf_weights, (front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value))
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights.assign(weights)
self.idf_weights_const = self.idf_weights.value()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
"Cannot adapt {} layer after setting a static vocabulary via init "
"argument or `set_vocabulary`.".format(self.__class__.__name__))
data = self._standardize_inputs(data, self.dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, -1)
if data.shape.rank == 1:
data = tf.expand_dims(data, -1)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(tokens, counts + self.token_counts.lookup(tokens))
if self.output_mode == TF_IDF:
# Dedupe each row of our dataset.
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
# Flatten and count tokens.
tokens, doc_counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, doc_counts + self.token_document_counts.lookup(tokens))
if tf_utils.is_ragged(data):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(tf.shape(data, out_type=tf.int64)[0])
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == TF_IDF:
self.idf_weights_const = self.idf_weights.value()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.dtype))
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.dtype))
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break ties
# by sorting the tokens themselves. Tensorflow has no ops for sorting
# strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == TF_IDF:
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(token_document_counts,
self.num_documents)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV tokens.
# We cannot compute the real idf weight of OOV in a single pass.
idf_weights = tf.pad(
idf_weights, [[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights))
self.idf_weights.assign(idf_weights)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary, we
# don't want to keep every token we've seen in separate lookup tables.
self.reset_state()
def reset_state(self): # pylint: disable=method-hidden
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == TF_IDF:
self.token_document_counts.remove(self.token_document_counts.export()[0])
self.num_documents.assign(0)
def call(self, inputs):
self._maybe_freeze_vocab_size()
inputs = self._standardize_inputs(inputs, self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if tf_utils.is_sparse(inputs):
lookups = tf.SparseTensor(inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape)
elif tf_utils.is_ragged(inputs):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == INT:
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
# One hot will unprank only if the final output dimension is not already 1.
if self.output_mode == ONE_HOT:
if lookups.shape[-1] != 1:
lookups = self._expand_dims(lookups, -1)
# TODO(b/190445202): remove output rank restriction.
if lookups.shape.rank > 2:
raise ValueError(
"Received input shape {}, which would result in output rank {}. "
"Currently only outputs up to rank 2 are supported for "
"`output_mode={}`.".format(original_shape, lookups.shape.rank,
self.output_mode))
binary_output = self.output_mode in (MULTI_HOT, ONE_HOT)
if self.pad_to_max_tokens:
out_depth = self.max_tokens
else:
out_depth = self._frozen_vocab_size
if self.sparse:
bincounts = category_encoding.sparse_bincount(lookups, out_depth,
binary_output)
else:
bincounts = category_encoding.dense_bincount(lookups, out_depth,
binary_output)
if self.output_mode == TF_IDF:
return tf.multiply(bincounts, self.idf_weights_const)
return bincounts
def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Inputs, do not call lookup. This
# is critical for restoring SavedModel, which will first trace layer.call
# and then attempt to restore the table. We need the table to be unitialized
# for the restore to work, but calling the table unitialized would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
lookups = self.lookup_table.lookup(inputs)
if self.mask_token is not None:
mask_locations = tf.equal(inputs, self._mask_key)
lookups = tf.where(mask_locations, self._mask_value, lookups)
if self.invert:
return lookups
lookup_checks = []
if self.num_oov_indices == 0:
# If we have zero oov indices, we need to check for oov inputs.
oov_indices = tf.where(tf.equal(lookups, -1))
oov_inputs = tf.gather_nd(inputs, oov_indices)
msg = tf.strings.format(
"When `num_oov_indices=0` all inputs should be in vocabulary, "
"found OOV values {}, consider setting `num_oov_indices=1`.",
(oov_inputs,))
assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg])
lookup_checks.append(assertion)
elif self.num_oov_indices > 1:
# If we have multiple oov indices, we need a further hashing step.
if self._key_dtype.is_integer:
oov_indices = tf.math.floormod(inputs, self.num_oov_indices)
else:
oov_indices = tf.strings.to_hash_bucket_fast(
inputs, num_buckets=self.num_oov_indices)
oov_indices = oov_indices + self._oov_start_index()
oov_locations = tf.equal(lookups, self._default_value)
lookups = tf.where(oov_locations, oov_indices, lookups)
with tf.control_dependencies(lookup_checks):
return tf.identity(lookups)
def _encode_output(self, lookups):
"""Encode the lookup result to the final output depending on output_mode."""
def _uninitialized_lookup_table(self):
with tf.init_scope():
initializer = NullInitializer(self._key_dtype, self._value_dtype)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_tokens(self, tokens):
with tf.init_scope():
token_start = self._token_start_index()
token_end = token_start + tf.size(tokens)
indices = tf.range(token_start, token_end, dtype=tf.int64)
keys, values = (indices, tokens) if self.invert else (tokens, indices)
initializer = tf.lookup.KeyValueTensorInitializer(keys, values,
self._key_dtype,
self._value_dtype)
table = tf.lookup.StaticHashTable(initializer, self._default_value)
if not tf.compat.v1.executing_eagerly_outside_functions():
backend.get_session().run(initializer.initialize(table))
return table
def _lookup_table_from_file(self, filename):
if self.invert:
key_index = tf.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.lookup.TextFileIndex.WHOLE_LINE
else:
key_index = tf.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.lookup.TextFileIndex.LINE_NUMBER
with tf.init_scope():
initializer = tf.lookup.TextFileInitializer(
filename=filename,
key_dtype=self._key_dtype,
key_index=key_index,
value_dtype=self._value_dtype,
value_index=value_index,
value_index_offset=self._token_start_index())
table = tf.lookup.StaticHashTable(initializer, self._default_value)
if not tf.compat.v1.executing_eagerly_outside_functions():
backend.get_session().run(initializer.initialize(table))
return table
def _standardize_inputs(self, inputs, dtype):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = tf.convert_to_tensor(inputs)
if inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _expand_dims(self, inputs, axis):
if tf_utils.is_sparse(inputs):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def _make_serializable(self, x):
if tf.is_tensor(x):
x = x.numpy()
if isinstance(x, (np.ndarray)):
x = x.tolist()
x = list(x)
return x
def _oov_start_index(self):
return 1 if self.mask_token is not None and self.output_mode == INT else 0
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
def _maybe_freeze_vocab_size(self):
if self.output_mode == INT or self.pad_to_max_tokens:
return
with tf.init_scope():
if not tf.executing_eagerly():
raise RuntimeError(
"When using `output_mode={}` eager mode execution must be enabled."
.format(self.output_mode))
new_vocab_size = self.vocabulary_size()
if new_vocab_size == self._token_start_index():
raise RuntimeError(
"When using `output_mode={}` and `pad_to_max_tokens=False`, you "
"must set the layer's vocabulary before calling it. Either pass "
"a `vocabulary` argument to the layer, or call `adapt` with some "
"sample data.".format(self.output_mode))
elif (self._frozen_vocab_size is not None and
new_vocab_size != self._frozen_vocab_size):
raise RuntimeError(
"When using `output_mode={}` and `pad_to_max_tokens=False`, the "
"vocabulary size cannot be changed after the layer is called. "
"Vocab size is {}, new vocab size is {}".format(
self.output_mode, self._frozen_vocab_size, new_vocab_size))
self._frozen_vocab_size = new_vocab_size
def _find_repeated_tokens(self, vocabulary):
"""Return all repeated tokens in a vocabulary."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
return [
item for item, count in collections.Counter(vocabulary).items()
if count > 1
]
else:
return []
def _num_tokens(self, data):
"""Count the number of tokens in a ragged, sparse or dense tensor."""
if tf_utils.is_sparse(data):
flat_values = data.values
elif tf_utils.is_ragged(data):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64)
return tokens, counts
def _inverse_document_frequency(self, token_document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of "tf_idf".
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
token_document_counts: An array of the # of documents each token appears
in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return tf.math.log(1 + num_documents / (1 + token_document_counts))
@property
def _trackable_saved_model_saver(self):
return layer_serialization.IndexLookupLayerSavedModelSaver(self)
# Override points for IntegerLookup and StringLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
"""Converts a tensor vocabulary to a numpy vocabulary."""
return vocabulary.numpy()
| 36,207 | 42.104762 | 112 | py |
keras | keras-master/keras/layers/preprocessing/preprocessing_stage_functional_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional preprocessing stage tests."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import time
import numpy as np
from keras import keras_parameterized
from keras.engine import base_preprocessing_layer
from keras.engine.input_layer import Input
from keras.layers import convolutional
from keras.layers import core
from keras.layers import merge
from keras.layers.preprocessing import image_preprocessing
from keras.layers.preprocessing import normalization
from keras.layers.preprocessing import preprocessing_stage
from keras.layers.preprocessing import preprocessing_test_utils
class PL(base_preprocessing_layer.PreprocessingLayer):
def __init__(self, **kwargs):
self.adapt_time = None
self.adapt_count = 0
super(PL, self).__init__(**kwargs)
def adapt(self, data, reset_state=True):
self.adapt_time = time.time()
self.adapt_count += 1
def call(self, inputs):
return inputs + 1
class PLMerge(PL):
def call(self, inputs):
return inputs[0] + inputs[1]
class PLSplit(PL):
def call(self, inputs):
return inputs + 1, inputs - 1
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class PreprocessingStageTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_adapt_preprocessing_stage_with_single_input_output(self):
x = Input(shape=(3,))
l0 = PL()
y = l0(x)
l1 = PL()
z = l1(y)
stage = preprocessing_stage.FunctionalPreprocessingStage(x, z)
stage.compile()
# Test with NumPy array
one_array = np.ones((4, 3), dtype='float32')
stage.adapt(one_array)
self.assertEqual(l0.adapt_count, 1)
self.assertEqual(l1.adapt_count, 1)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
# Check call
z = stage(tf.ones((4, 3), dtype='float32'))
self.assertAllClose(z, np.ones((4, 3), dtype='float32') + 2.)
# Test with dataset
adapt_data = tf.data.Dataset.from_tensor_slices(one_array)
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 2)
self.assertEqual(l1.adapt_count, 2)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, 'requires a '):
stage.adapt(None)
# Disallow calling fit
with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
stage.fit(None)
def test_adapt_preprocessing_stage_with_list_input(self):
x0 = Input(shape=(3,))
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
l0 = PLMerge()
y = l0([x0, x1])
l1 = PLMerge()
y = l1([y, x2])
l2 = PLSplit()
z, y = l2(y)
stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1, x2],
[y, z])
stage.compile()
# Test with NumPy array
one_array = np.ones((4, 3), dtype='float32')
stage.adapt([one_array, one_array, one_array])
self.assertEqual(l0.adapt_count, 1)
self.assertEqual(l1.adapt_count, 1)
self.assertEqual(l2.adapt_count, 1)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Check call
y, z = stage([
tf.ones((4, 3), dtype='float32'),
tf.ones((4, 3), dtype='float32'),
tf.ones((4, 3), dtype='float32')
])
self.assertAllClose(y, np.ones((4, 3), dtype='float32') + 1.)
self.assertAllClose(z, np.ones((4, 3), dtype='float32') + 3.)
# Test with dataset
adapt_data = tf.data.Dataset.from_tensor_slices(
(one_array, one_array, one_array))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 2)
self.assertEqual(l1.adapt_count, 2)
self.assertEqual(l2.adapt_count, 2)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, 'requires a '):
stage.adapt(None)
def test_adapt_preprocessing_stage_with_dict_input(self):
x0 = Input(shape=(3,), name='x0')
x1 = Input(shape=(4,), name='x1')
x2 = Input(shape=(3, 5), name='x2')
# dimension will mismatch if x1 incorrectly placed.
x1_sum = core.Lambda(
lambda x: tf.reduce_sum(x, axis=-1, keepdims=True))(
x1)
x2_sum = core.Lambda(lambda x: tf.reduce_sum(x, axis=-1))(x2)
l0 = PLMerge()
y = l0([x0, x1_sum])
l1 = PLMerge()
y = l1([y, x2_sum])
l2 = PLSplit()
z, y = l2(y)
stage = preprocessing_stage.FunctionalPreprocessingStage(
{
'x2': x2,
'x0': x0,
'x1': x1
}, [y, z])
stage.compile()
# Test with dict of NumPy array
one_array0 = np.ones((4, 3), dtype='float32')
one_array1 = np.ones((4, 4), dtype='float32')
one_array2 = np.ones((4, 3, 5), dtype='float32')
adapt_data = {'x1': one_array1, 'x0': one_array0, 'x2': one_array2}
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 1)
self.assertEqual(l1.adapt_count, 1)
self.assertEqual(l2.adapt_count, 1)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Check call
y, z = stage({
'x1': tf.constant(one_array1),
'x2': tf.constant(one_array2),
'x0': tf.constant(one_array0)
})
self.assertAllClose(y, np.zeros((4, 3), dtype='float32') + 9.)
self.assertAllClose(z, np.zeros((4, 3), dtype='float32') + 11.)
# Test with list of NumPy array
adapt_data = [one_array0, one_array1, one_array2]
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 2)
self.assertEqual(l1.adapt_count, 2)
self.assertEqual(l2.adapt_count, 2)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Test with flattened dataset
adapt_data = tf.data.Dataset.from_tensor_slices(
(one_array0, one_array1, one_array2))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 3)
self.assertEqual(l1.adapt_count, 3)
self.assertEqual(l2.adapt_count, 3)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Test with dataset in dict shape
adapt_data = tf.data.Dataset.from_tensor_slices({
'x0': one_array0,
'x2': one_array2,
'x1': one_array1
})
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 4)
self.assertEqual(l1.adapt_count, 4)
self.assertEqual(l2.adapt_count, 4)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, 'requires a '):
stage.adapt(None)
def test_adapt_preprocessing_stage_with_dict_output(self):
x = Input(shape=(3,), name='x')
l0 = PLSplit()
y0, y1 = l0(x)
l1 = PLSplit()
z0, z1 = l1(y0)
stage = preprocessing_stage.FunctionalPreprocessingStage({'x': x}, {
'y1': y1,
'z1': z1,
'y0': y0,
'z0': z0
})
stage.compile()
# Test with NumPy array
one_array = np.ones((4, 3), dtype='float32')
adapt_data = {'x': one_array}
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 1)
self.assertEqual(l1.adapt_count, 1)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
# Check call
outputs = stage({'x': tf.constant(one_array)})
self.assertEqual(set(outputs.keys()), {'y0', 'y1', 'z0', 'z1'})
self.assertAllClose(outputs['y0'], np.ones((4, 3), dtype='float32') + 1.)
self.assertAllClose(outputs['y1'], np.ones((4, 3), dtype='float32') - 1.)
self.assertAllClose(outputs['z0'], np.ones((4, 3), dtype='float32') + 2.)
self.assertAllClose(outputs['z1'], np.ones((4, 3), dtype='float32'))
def test_preprocessing_stage_with_nested_input(self):
# Test with NumPy array
x0 = Input(shape=(3,))
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
l0 = PLMerge()
y = l0([x0, x1])
l1 = PLMerge()
y = l1([y, x2])
l2 = PLSplit()
z, y = l2(y)
stage = preprocessing_stage.FunctionalPreprocessingStage([x0, [x1, x2]],
[y, z])
stage.compile()
one_array = np.ones((4, 3), dtype='float32')
stage.adapt([one_array, [one_array, one_array]])
self.assertEqual(l0.adapt_count, 1)
self.assertEqual(l1.adapt_count, 1)
self.assertEqual(l2.adapt_count, 1)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Check call
y, z = stage([
tf.ones((4, 3), dtype='float32'),
[
tf.ones((4, 3), dtype='float32'),
tf.ones((4, 3), dtype='float32')
]
])
self.assertAllClose(y, np.ones((4, 3), dtype='float32') + 1.)
self.assertAllClose(z, np.ones((4, 3), dtype='float32') + 3.)
# Test with dataset
adapt_data = tf.data.Dataset.from_tensor_slices(
(one_array, (one_array, one_array)))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 2)
self.assertEqual(l1.adapt_count, 2)
self.assertEqual(l2.adapt_count, 2)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
self.assertLessEqual(l1.adapt_time, l2.adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, 'requires a '):
stage.adapt(None)
def test_include_layers_with_dict_input(self):
class PLMergeDict(PLMerge):
def call(self, inputs):
return inputs['a'] + inputs['b']
x0 = Input(shape=(3,))
x1 = Input(shape=(3,))
l0 = PLMergeDict()
y = l0({'a': x0, 'b': x1})
l1 = PLSplit()
z, y = l1(y)
stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1], [y, z])
stage.compile()
one_array = np.ones((4, 3), dtype='float32')
adapt_data = tf.data.Dataset.from_tensor_slices((one_array, one_array))
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 1)
self.assertEqual(l1.adapt_count, 1)
self.assertLessEqual(l0.adapt_time, l1.adapt_time)
# Check call
y, z = stage([
tf.ones((4, 3), dtype='float32'),
tf.ones((4, 3), dtype='float32')
])
self.assertAllClose(y, np.ones((4, 3), dtype='float32'))
self.assertAllClose(z, np.ones((4, 3), dtype='float32') + 2.)
def test_include_layers_with_nested_input(self):
class PLMergeNest(PLMerge):
def call(self, inputs):
a = inputs[0]
b = inputs[1][0]
c = inputs[1][1]
return a + b + c
x0 = Input(shape=(3,))
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
l0 = PLMergeNest()
y = l0([x0, [x1, x2]])
stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1, x2], y)
stage.compile()
one_array = np.ones((4, 3), dtype='float32')
adapt_data = tf.data.Dataset.from_tensor_slices((one_array,) * 3)
stage.adapt(adapt_data)
self.assertEqual(l0.adapt_count, 1)
# Check call
y = stage([
tf.ones((4, 3), dtype='float32'),
tf.ones((4, 3), dtype='float32'),
tf.ones((4, 3), dtype='float32')
])
self.assertAllClose(y, np.ones((4, 3), dtype='float32') + 2.)
def test_mixing_preprocessing_and_regular_layers(self):
x0 = Input(shape=(10, 10, 3))
x1 = Input(shape=(10, 10, 3))
x2 = Input(shape=(10, 10, 3))
y0 = merge.Add()([x0, x1])
y1 = image_preprocessing.CenterCrop(8, 8)(x2)
y1 = convolutional.ZeroPadding2D(padding=1)(y1)
z = merge.Add()([y0, y1])
z = normalization.Normalization()(z)
z = convolutional.Conv2D(4, 3)(z)
stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1, x2], z)
data = [
np.ones((12, 10, 10, 3), dtype='float32'),
np.ones((12, 10, 10, 3), dtype='float32'),
np.ones((12, 10, 10, 3), dtype='float32')
]
stage.adapt(data)
_ = stage(data)
stage.compile('rmsprop', 'mse')
with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
stage.fit(data, np.ones((12, 8, 8, 4)))
ds_x0 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
ds_x1 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
ds_x2 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3)))
ds_x = tf.data.Dataset.zip((ds_x0, ds_x1, ds_x2))
ds_y = tf.data.Dataset.from_tensor_slices(np.ones((12, 8, 8, 4)))
dataset = tf.data.Dataset.zip((ds_x, ds_y)).batch(4)
with self.assertRaisesRegex(ValueError, 'Preprocessing stage'):
stage.fit(dataset)
_ = stage.evaluate(data, np.ones((12, 8, 8, 4)))
_ = stage.predict(data)
if __name__ == '__main__':
tf.test.main()
| 13,778 | 30.315909 | 80 | py |
keras | keras-master/keras/layers/preprocessing/discretization_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.discretization."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import discretization
from keras.layers.preprocessing import preprocessing_test_utils
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager", "graph"]))
class DiscretizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, strategy):
input_array = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]]
expected_output_shape = [None, 4]
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(4,))
layer = discretization.Discretization(bin_boundaries=[0., 1., 2.])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 2,228 | 36.779661 | 80 | py |
keras | keras-master/keras/layers/preprocessing/integer_lookup_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
import tensorflow.compat.v2 as tf
import gc
import itertools
import os
import random
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.preprocessing import integer_lookup
from keras.layers.preprocessing import preprocessing_test_utils
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name":
"test_ints_soft_vocab_cap",
# Create an array where 1138 is the most frequent term, followed by
# 1729, then 725, then 42. This ensures that the vocab accumulator
# is sorting by frequency.
"vocab_data":
np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],
[1729], [725], [725]],
dtype=np.int64),
"input_data":
np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],
dtype=np.int64),
"kwargs": {
"max_tokens": None,
"dtype": tf.int64,
},
"expected_output": [[1], [2], [3], [4], [4], [3], [1], [0]],
"input_dtype":
tf.int64
},)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IntegerLookupLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = integer_lookup.IntegerLookup
expected_output_dtype = tf.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# IntegerLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
def test_layer_with_list_input(self):
vocab = [12, 36, 1138, 42]
data = [[12, 1138, 42], [42, 1000, 36]] # Note OOV tokens
layer = integer_lookup.IntegerLookup(vocabulary=vocab)
output = layer(data)
expected_output = np.array([[1, 3, 4], [4, 0, 2]])
self.assertEqual(output.numpy().tolist(), expected_output.tolist())
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingInputTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [4, 0]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = integer_lookup.IntegerLookup(max_tokens=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],
dtype=np.int64)
expected_output = [[1, 2, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = integer_lookup.IntegerLookup(max_tokens=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingMultiOOVTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 133], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [6, 2]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = integer_lookup.IntegerLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=2,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 133]],
dtype=np.int64)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = integer_lookup.IntegerLookup(max_tokens=None, num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingAdaptTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_adapt(self):
vocab_data = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 2]],
values=[203, 1729, 203],
dense_shape=[3, 4])
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
layer = integer_lookup.IntegerLookup()
layer.adapt(vocab_dataset)
expected_vocabulary = [-1, 203, 1729]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_ragged_adapt(self):
vocab_data = tf.ragged.constant([[203], [1729, 203]])
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
layer = integer_lookup.IntegerLookup()
layer.adapt(vocab_dataset)
expected_vocabulary = [-1, 203, 1729]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_single_int_generator_dataset(self):
def word_gen():
for _ in itertools.count(1):
yield random.randint(0, 100)
ds = tf.data.Dataset.from_generator(word_gen, tf.int64,
tf.TensorShape([]))
batched_ds = ds.take(2)
input_t = keras.Input(shape=(), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
max_tokens=10, num_oov_indices=0, mask_token=None, oov_token=None)
_ = layer(input_t)
layer.adapt(batched_ds)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IntegerLookupOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_int_output(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup()
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_output_shape(self):
input_data = keras.Input(shape=(4,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(max_tokens=2, num_oov_indices=1)
int_data = layer(input_data)
self.assertAllEqual(int_data.shape[1:], input_data.shape[1:])
def test_int_output_with_mask(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(max_tokens=None, mask_token=0)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_explicit_vocab(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data,
max_tokens=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = [0, -1, 42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data,
max_tokens=None,
mask_token=0,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_no_oov(self):
vocab_data = [42, 1138, 725, 1729]
valid_input = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 0]])
invalid_input = np.array([[42, 1138, 725, 203], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data, mask_token=0, num_oov_indices=0)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*203"):
_ = model.predict(invalid_input)
def test_inverse_output(self):
vocab_data = [-1, 42, 1138, 725, 1729]
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 0]])
expected_output = np.array([[42, 1138, 725, 1729], [1729, 725, 42, -1]])
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(invert=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_forward_backward_explicit_vocab(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = np.array([[42, 1138, 725, 1729], [1729, 725, 42, -1]])
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(vocabulary=vocab_data)
inverse_layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
inverse_data = inverse_layer(int_data)
model = keras.Model(inputs=input_data, outputs=inverse_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_forward_backward_adapted_vocab(self):
adapt_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = np.array([[42, 1138, 725, 1729], [1729, 725, 42, -1]])
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup()
layer.adapt(adapt_data)
inverse_layer = integer_lookup.IntegerLookup(
vocabulary=layer.get_vocabulary(), invert=True)
int_data = layer(input_data)
inverse_data = inverse_layer(int_data)
model = keras.Model(inputs=input_data, outputs=inverse_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IntegerLookupVocabularyTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(str(vocab) + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_no_vocab(self):
with self.assertRaisesRegex(RuntimeError,
"you must set the layer's vocabulary"):
layer = integer_lookup.IntegerLookup(output_mode="binary")
layer([[1]])
def test_one_hot_output(self):
vocab_data = [2, 3, 4, 5]
input_array = np.array([2, 3, 4, 5, 6])
expected_output = [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data, output_mode="one_hot")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
vocab_data = [2, 3, 4, 5]
input_array = np.array([[2, 2, 3, 4], [0, 1, 5, 2]])
expected_output = [[0, 1, 1, 1, 0], [1, 1, 0, 0, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data, output_mode="multi_hot")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
vocab_data = [2, 3, 4, 5]
input_array = np.array([[2, 2, 3, 4], [0, 1, 5, 6]])
expected_output = [[0, 2, 1, 1, 0], [3, 0, 0, 0, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data, output_mode="count")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_sparse_output(self):
vocab_data = [2, 3, 4, 5]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_data, output_mode="multi_hot", sparse=True)
res = layer(input_data)
self.assertTrue(res.__class__.__name__, "SparseKerasTensor")
def test_get_vocab_returns_int(self):
vocab_data = [42, 1138, 725, 1729]
expected_vocab = [-1, 42, 1138, 725, 1729]
layer = integer_lookup.IntegerLookup(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], np.int64)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = [42, 1138, 725, 1729]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_inverted_vocab_from_file(self):
vocab_list = [42, 1138, 725, 1729]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 0]])
expected_output = [[42, 1138, 725, 1729], [1729, 725, 42, -1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(vocabulary=vocab_path, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_inverted_vocab_from_file_with_mask(self):
vocab_list = [42, 1138, 725, 1729]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = [[42, 1138, 725, 1729], [1729, 725, 42, -10]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(
vocabulary=vocab_path, invert=True, mask_value=-10)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_explicit_vocab_from_file_via_setter(self):
vocab_list = [42, 1138, 725, 1729]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup()
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_non_unique_vocab_fails(self):
vocab_data = [42, 1138, 725, 1729, 1729]
with self.assertRaisesRegex(ValueError, ".*repeated term.*1729.*"):
_ = integer_lookup.IntegerLookup(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = [42, 1138, 725, 1729, 42]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(
tf.errors.FailedPreconditionError,
".*HashTable has different value for same key.*42.*"):
_ = integer_lookup.IntegerLookup(vocabulary=vocab_path)
def test_tensor_vocab(self):
vocab_data = [-1, 42, 1138, 725, 1729]
vocab_tensor = tf.constant(vocab_data, tf.int64)
layer = integer_lookup.IntegerLookup(vocabulary=vocab_tensor)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 5)
fn = tf.function(lambda: layer.set_vocabulary(vocab_tensor))
with self.assertRaisesRegex(RuntimeError, "Cannot set a tensor vocabulary"):
fn()
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IntegerLookupErrorTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = [42, 1138, 725, 1729]
layer = integer_lookup.IntegerLookup(max_tokens=4, num_oov_indices=1)
with self.assertRaisesRegex(ValueError,
"vocabulary larger than the maximum vocab.*"):
layer.set_vocabulary(vocab_data)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, ".*max_tokens.*"):
_ = integer_lookup.IntegerLookup(max_tokens=0, num_oov_indices=1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IntegerLookupSavingTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def tearDown(self):
keras.backend.clear_session()
gc.collect()
super(IntegerLookupSavingTest, self).tearDown()
def test_vocabulary_persistence_across_saving(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = integer_lookup.IntegerLookup(max_tokens=None, num_oov_indices=1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf.__internal__.tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IntegerLookup": integer_lookup.IntegerLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
if __name__ == "__main__":
tf.test.main()
| 24,983 | 39.296774 | 80 | py |
keras | keras-master/keras/layers/preprocessing/hashing_distribution_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.hashing."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute.strategy_combinations import all_strategies
from keras.layers.preprocessing import hashing
from keras.layers.preprocessing import preprocessing_test_utils
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies,
mode=["eager", "graph"]))
class HashingDistributionTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, distribution):
input_data = np.asarray([["omar"], ["stringer"], ["marlo"], ["wire"]])
input_dataset = tf.data.Dataset.from_tensor_slices(input_data).batch(
2, drop_remainder=True)
expected_output = [[0], [0], [1], [0]]
tf.config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = hashing.Hashing(num_bins=2)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
tf.test.main()
| 2,041 | 36.814815 | 80 | py |
keras | keras-master/keras/layers/preprocessing/string_lookup.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras string lookup preprocessing layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import index_lookup
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.StringLookup",
"keras.layers.experimental.preprocessing.StringLookup",
v1=[])
class StringLookup(index_lookup.IndexLookup):
"""Maps strings from a vocabulary to integer indices.
This layer translates a set of arbitrary strings into an integer output via a
table-based vocabulary lookup.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual strings tokens, and create a vocabulary
from them. If the vocabulary is capped in size, the most frequent tokens will
be used to create the vocabulary and all others will be treated as
out-of-vocabulary (OOV).
There are two possible output modes for the layer.
When `output_mode` is `"int"`,
input strings are converted to their index in the vocabulary (an integer).
When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input strings
are encoded into an array where each dimension corresponds to an element in
the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode` is
`"int"`, the vocabulary will begin with the mask token (if set), followed by
OOV indices, followed by the rest of the vocabulary. When `output_mode` is
`"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with OOV
indices and instances of the mask token will be dropped.
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should only
be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of the
vocabulary. Note that this size includes the OOV and mask tokens. Defaults
to None.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling the layer.
Defaults to 1.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0. In
other output modes, the token will not appear in the vocabulary and
instances of the mask token in the input will be dropped. If set to None,
no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is True. The token to return for OOV
indices. Defaults to `"[UNK]"`.
vocabulary: Optional. Either an array of strings or a string path to a text
file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D
tensor containing the string vocbulary terms. If passing a file path, the
file should contain one line per term in the vocabulary. If this argument
is set, there is no need to `adapt` the layer.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to `"int"`.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
`"tf_idf"` configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new dimension for
the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
For `"int"` output, any shape of input and output is supported. For all
other output modes, currently only output up to rank 2 is supported.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, returns a `SparseTensor` instead of a
dense `Tensor`. Defaults to False.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[1, 3, 4],
[4, 0, 2]])>
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by analyzing
the dataset.
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
['[UNK]', 'd', 'z', 'c', 'b', 'a']
Note that the OOV token `"[UNK]"` has been added to the vocabulary.
The remaining tokens are sorted by frequency
(`"d"`, which has 2 occurrences, is first) then by inverse sort order.
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup()
>>> layer.adapt(data)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[5, 3, 1],
[1, 2, 4]])>
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV indices.
When a layer is created with more than one OOV index, any OOV values are
hashed into the number of OOV buckets, distributing OOV values in a
deterministic fashion across the set.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["m", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[2, 4, 5],
[0, 1, 3]])>
Note that the output for OOV value 'm' is 0, while the output for OOV value
'z' is 1. The in-vocab terms have their output index increased by 1 from
earlier examples (a maps to 2, etc) in order to make space for the extra OOV
value.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant(["a", "b", "c", "d", "z"])
>>> layer = tf.keras.layers.StringLookup(
... vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)>
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(
... vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)>
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output, the
first `num_oov_indices` dimensions in the output represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(
... vocabulary=vocab, output_mode='count')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)>
**TF-IDF output**
Configure the layer with `output_mode="tf_idf"`. As with multi_hot output, the
first `num_oov_indices` dimensions in the output represent OOV values.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be provided
along with the vocabulary. Note that the `idf_weight` for OOV values will
default to the average of all idf weights passed in.
>>> vocab = ["a", "b", "c", "d"]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
To specify the idf weights for oov values, you will need to pass the entire
vocabularly including the leading oov token.
>>> vocab = ["[UNK]", "a", "b", "c", "d"]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
When adapting the layer in `"tf_idf"` mode, each input sample will be
considered a document, and IDF weight per token will be calculated as
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to strings using this layer. (You
can also use `adapt()` with `inverse=True`, but for simplicity we'll pass the
vocab in this example.)
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([[1, 3, 4], [4, 0, 2]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab, invert=True)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=string, numpy=
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)>
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab)
>>> i_layer = tf.keras.layers.StringLookup(vocabulary=vocab, invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
<tf.Tensor: shape=(2, 3), dtype=string, numpy=
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)>
In this example, the input value `"z"` resulted in an output of `"[UNK]"`,
since 1000 was not in the vocabulary - it got represented as an OOV, and all
OOV values are returned as `"[UNK]"` in the inverse layer. Also, note that
for the inverse to work, you must have already set the forward layer
vocabulary either directly or via `adapt()` before calling `get_vocabulary()`.
"""
def __init__(self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[UNK]",
vocabulary=None,
encoding=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs):
allowed_dtypes = [tf.string]
if "dtype" in kwargs and kwargs["dtype"] not in allowed_dtypes:
raise ValueError(
f"The value of the `dtype` for `StringLookup` may "
f"only be one of {allowed_dtypes}, but received {kwargs['dtype']}.")
if "dtype" not in kwargs:
kwargs["dtype"] = tf.string
if encoding is None:
encoding = "utf-8"
self.encoding = encoding
super(StringLookup, self).__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
invert=invert,
output_mode=output_mode,
sparse=sparse,
pad_to_max_tokens=pad_to_max_tokens,
**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell("StringLookup").set(True)
def get_config(self):
config = {"encoding": self.encoding}
base_config = super(StringLookup, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Overridden methods from IndexLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
vocabulary = vocabulary.numpy()
return np.array([tf.compat.as_text(x, self.encoding) for x in vocabulary])
| 14,767 | 42.435294 | 80 | py |
keras | keras-master/keras/layers/preprocessing/index_lookup_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.index_lookup."""
import tensorflow.compat.v2 as tf
import os
import numpy as np
import keras
from keras import backend
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import index_lookup
from keras.layers.preprocessing import preprocessing_test_utils
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager"])) # Eager-only, no graph: b/158793009
class IndexLookupDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_strategy(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = tf.data.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
model.compile(loss="mse")
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_strategy_with_file(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = tf.data.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
model.compile(loss="mse")
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_tpu_with_multiple_oov(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = tf.data.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[3, 4, 5, 6], [6, 5, 3, 1]]
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
model.compile(loss="mse")
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 5,687 | 37.174497 | 80 | py |
keras | keras-master/keras/layers/preprocessing/image_preprocessing_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.image_preprocessing."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import image_preprocessing
from keras.layers.preprocessing import preprocessing_test_utils
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager", "graph"]))
class ImagePreprocessingDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, strategy):
if "CentralStorage" in type(strategy).__name__:
self.skipTest("Does not work with CentralStorageStrategy yet.")
# TODO(b/159738418): large image input causes OOM in ubuntu multi gpu.
np_images = np.random.random((32, 32, 32, 3)).astype(np.float32)
image_dataset = tf.data.Dataset.from_tensor_slices(np_images).batch(
16, drop_remainder=True)
with strategy.scope():
input_data = keras.Input(shape=(32, 32, 3), dtype=tf.float32)
image_preprocessor = keras.Sequential([
image_preprocessing.Resizing(height=256, width=256),
image_preprocessing.RandomCrop(height=224, width=224),
image_preprocessing.RandomTranslation(.1, .1),
image_preprocessing.RandomRotation(.2),
image_preprocessing.RandomFlip(),
image_preprocessing.RandomZoom(.2, .2)])
preprocessed_image = image_preprocessor(input_data)
flatten_layer = keras.layers.Flatten(data_format="channels_last")
output = flatten_layer(preprocessed_image)
cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
output = cls_layer(output)
model = keras.Model(inputs=input_data, outputs=output)
model.compile(loss="binary_crossentropy")
_ = model.predict(image_dataset)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 2,868 | 41.820896 | 80 | py |
keras | keras-master/keras/layers/preprocessing/string_lookup_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.preprocessing import preprocessing_test_utils
from keras.layers.preprocessing import string_lookup
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
},
"expected_output": [[1], [2], [3], [4], [4], [3], [1], [0]],
"input_dtype":
tf.string
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class StringLookupLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = string_lookup.StringLookup
expected_output_dtype = tf.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# StringLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class StringLookupVocabularyTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data, mask_token="")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_no_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
invalid_input = np.array([["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, mask_token="", num_oov_indices=0)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*michigan"):
_ = model.predict(invalid_input)
def test_no_vocab(self):
with self.assertRaisesRegex(RuntimeError,
"you must set the layer's vocabulary"):
layer = string_lookup.StringLookup(output_mode="binary")
layer([["a"]])
def test_one_hot_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan"])
expected_output = [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="one_hot")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 1, 1, 1, 1], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="multi_hot")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "earth", "fire", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 2, 0, 0, 2], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="count")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_sparse_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="multi_hot", sparse=True)
res = layer(input_data)
self.assertTrue(res.__class__.__name__, "SparseKerasTensor")
def test_get_vocab_returns_str(self):
vocab_data = ["earth", "wind", "and", "fire"]
expected_vocab = ["[UNK]", "earth", "wind", "and", "fire"]
layer = string_lookup.StringLookup(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], str)
inverse_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
layer_vocab = inverse_layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], str)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_from_file_via_setter(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = ["earth", "wind", "and", "fire", "earth"]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(
tf.errors.FailedPreconditionError,
"HashTable has different value for same key.*earth"):
_ = string_lookup.StringLookup(vocabulary=vocab_path)
def test_inverse_layer(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True, mask_token="")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(vocabulary=vocab_path, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file_with_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[M]"]])
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_path, invert=True, mask_token="[M]")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
invert_layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_adapted_vocab(self):
adapt_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.adapt(adapt_data)
invert_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant([["earth", "wind", "fire"],
["fire", "and", "earth",
"ohio"]])
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = string_lookup.StringLookup(num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_tensor_vocab(self):
vocab_data = ["[UNK]", "wind", "and", "fire"]
vocab_tensor = tf.constant(vocab_data)
layer = string_lookup.StringLookup(vocabulary=vocab_tensor)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 4)
fn = tf.function(lambda: layer.set_vocabulary(vocab_tensor))
with self.assertRaisesRegex(RuntimeError, "Cannot set a tensor vocabulary"):
fn()
if __name__ == "__main__":
tf.test.main()
| 16,929 | 42.188776 | 80 | py |
keras | keras-master/keras/layers/preprocessing/reduction_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.reduction."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras.layers.preprocessing import reduction
@keras_parameterized.run_all_keras_modes
class ReductionTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[3.0, 3.0], [3.0, 2.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [2.0, 1.5]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [1.0, 1.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[6.0, 6.0], [3.0, 2.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[6.0, 6.0], [4.0, 3.0]]
})
def test_unweighted_ragged_reduction(self, reduction_str, expected_output):
data = tf.ragged.constant([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0]]])
input_tensor = keras.Input(shape=(None, None), ragged=True)
output_tensor = reduction.Reduction(reduction=reduction_str)(input_tensor)
model = keras.Model(input_tensor, output_tensor)
output = model.predict(data)
self.assertAllClose(expected_output, output)
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[4.0, 4.0], [1.5, 6.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [1.666667, 1.75]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [1.0, 1.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[12.0, 12.0], [1.5, 6.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[8.0, 8.0], [2.5, 7.0]]
}, {
"testcase_name": "sqrtn",
"reduction_str": "sqrtn",
"expected_output": [[3.265986, 3.265986], [2.236067, 2.213594]]
})
def test_weighted_ragged_reduction(self, reduction_str, expected_output):
data = tf.ragged.constant([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0]]])
input_tensor = keras.Input(shape=(None, None), ragged=True)
weights = tf.ragged.constant([[[1.0, 1.0], [2.0, 2.0], [1.0, 1.0]],
[[0.5, 1.0], [1.0, 3.0]]])
weight_input_tensor = keras.Input(shape=(None, None), ragged=True)
output_tensor = reduction.Reduction(reduction=reduction_str)(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
self.assertAllClose(expected_output, output)
def test_weighted_ragged_reduction_with_different_dimensionality(self):
data = tf.ragged.constant([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0]]])
input_tensor = keras.Input(shape=(None, None), ragged=True)
weights = tf.ragged.constant([[1.0, 2.0, 1.0], [1.0, 1.0]])
weight_input_tensor = keras.Input(shape=(None,), ragged=True)
output_tensor = reduction.Reduction(reduction="mean")(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
expected_output = [[2.0, 2.0], [2.0, 1.5]]
self.assertAllClose(expected_output, output)
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[3.0, 3.0], [3.0, 2.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [1.333333, 1.0]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [0.0, 0.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[6.0, 6.0], [0.0, 0.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[6.0, 6.0], [4.0, 3.0]]
})
def test_unweighted_dense_reduction(self, reduction_str, expected_output):
data = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0], [0.0, 0.0]]])
input_tensor = keras.Input(shape=(None, None))
output_tensor = reduction.Reduction(reduction=reduction_str)(input_tensor)
model = keras.Model(input_tensor, output_tensor)
output = model.predict(data)
self.assertAllClose(expected_output, output)
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[4.0, 4.0], [1.5, 6.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [1.666667, 1.75]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [0.0, 0.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[12.0, 12.0], [0.0, 0.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[8.0, 8.0], [2.5, 7.0]]
}, {
"testcase_name": "sqrtn",
"reduction_str": "sqrtn",
"expected_output": [[3.265986, 3.265986], [2.236067, 2.213594]]
})
def test_weighted_dense_reduction(self, reduction_str, expected_output):
data = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0], [0.0, 0.0]]])
input_tensor = keras.Input(shape=(None, None))
weights = np.array([[[1.0, 1.0], [2.0, 2.0], [1.0, 1.0]],
[[0.5, 1.0], [1.0, 3.0], [0.0, 0.0]]])
weight_input_tensor = keras.Input(shape=(None, None))
output_tensor = reduction.Reduction(reduction=reduction_str)(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
self.assertAllClose(expected_output, output)
def test_weighted_dense_reduction_with_different_dimensionality(self):
data = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0], [0.0, 0.0]]])
input_tensor = keras.Input(shape=(None, None))
weights = np.array([[1.0, 2.0, 1.0], [1.0, 1.0, 0.0]])
weight_input_tensor = keras.Input(shape=(None,))
output_tensor = reduction.Reduction(reduction="mean")(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
expected_output = [[2.0, 2.0], [2.0, 1.5]]
self.assertAllClose(expected_output, output)
def test_sqrtn_fails_on_unweighted_ragged(self):
input_tensor = keras.Input(shape=(None, None), ragged=True)
with self.assertRaisesRegex(ValueError, ".*sqrtn.*"):
_ = reduction.Reduction(reduction="sqrtn")(input_tensor)
def test_sqrtn_fails_on_unweighted_dense(self):
input_tensor = keras.Input(shape=(None, None))
with self.assertRaisesRegex(ValueError, ".*sqrtn.*"):
_ = reduction.Reduction(reduction="sqrtn")(input_tensor)
if __name__ == "__main__":
tf.test.main()
| 8,591 | 36.684211 | 80 | py |
keras | keras-master/keras/layers/preprocessing/discretization_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras discretization preprocessing layer."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.preprocessing import discretization
from keras.layers.preprocessing import preprocessing_test_utils
@keras_parameterized.run_all_keras_modes
class DiscretizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_bucketize_with_explicit_buckets_integer(self):
input_array = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]]
expected_output_shape = [None, 4]
input_data = keras.Input(shape=(4,))
layer = discretization.Discretization(bin_boundaries=[0., 1., 2.])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_int_input(self):
input_array = np.array([[-1, 1, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]]
expected_output_shape = [None, 4]
input_data = keras.Input(shape=(4,), dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_sparse_float_input(self):
indices = [[0, 1], [0, 2], [1, 1]]
input_array = tf.SparseTensor(
indices=indices, values=[-1.5, 1.0, 3.4], dense_shape=[2, 3])
expected_output = [0, 2, 3]
input_data = keras.Input(shape=(3,), dtype=tf.float32, sparse=True)
layer = discretization.Discretization(bin_boundaries=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(indices, output_dataset.indices)
self.assertAllEqual(expected_output, output_dataset.values)
def test_bucketize_with_explicit_buckets_ragged_float_input(self):
input_array = tf.ragged.constant([[-1.5, 1.0, 3.4, .5],
[0.0, 3.0, 1.3]])
expected_output = [[0, 2, 3, 1], [1, 3, 2]]
expected_output_shape = [None, None]
input_data = keras.Input(shape=(None,), ragged=True)
layer = discretization.Discretization(bin_boundaries=[0., 1., 2.])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_ragged_int_input(self):
input_array = tf.ragged.constant([[-1, 1, 3, 0], [0, 3, 1]],
dtype=tf.int64)
expected_output = [[0, 2, 3, 1], [1, 3, 2]]
expected_output_shape = [None, None]
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_sparse_int_input(self):
indices = [[0, 1], [0, 2], [1, 1]]
input_array = tf.SparseTensor(
indices=indices, values=[-1, 1, 3], dense_shape=[2, 3])
expected_output = [0, 2, 3]
input_data = keras.Input(shape=(3,), dtype=tf.int32, sparse=True)
layer = discretization.Discretization(bin_boundaries=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(indices, output_dataset.indices)
self.assertAllEqual(expected_output, output_dataset.values)
def test_output_shape(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-.5, 0.5, 1.5])
output = layer(input_data)
self.assertAllEqual(output.shape.as_list(), [16, 4])
def test_output_dtype(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-.5, 0.5, 1.5])
output = layer(input_data)
self.assertAllEqual(output.dtype, tf.int64)
def test_num_bins_negative_fails(self):
with self.assertRaisesRegex(ValueError, "`num_bins` must be.*num_bins=-7"):
_ = discretization.Discretization(num_bins=-7)
def test_num_bins_and_bins_set_fails(self):
with self.assertRaisesRegex(
ValueError,
r"`num_bins` and `bin_boundaries` should not be set.*5.*\[1, 2\]"):
_ = discretization.Discretization(num_bins=5, bins=[1, 2])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class DiscretizationAdaptTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters([
{
"testcase_name": "2d_single_element",
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]]),
"test_data": np.array([[1.], [2.], [3.]]),
"use_dataset": True,
"expected": np.array([[1], [2], [3]]),
"num_bins": 5,
"epsilon": 0.01
}, {
"testcase_name": "2d_multi_element",
"adapt_data": np.array([[1., 6.], [2., 7.], [3., 8.], [4., 9.],
[5., 10.]]),
"test_data": np.array([[1., 10.], [2., 6.], [3., 8.]]),
"use_dataset": True,
"expected": np.array([[0, 4], [1, 3], [1, 4]]),
"num_bins": 5,
"epsilon": 0.01
}, {
"testcase_name": "1d_single_element",
"adapt_data": np.array([3., 2., 1., 5., 4.]),
"test_data": np.array([1., 2., 3.]),
"use_dataset": True,
"expected": np.array([1, 2, 3]),
"num_bins": 5,
"epsilon": 0.01
}, {
"testcase_name": "300_batch_1d_single_element_1",
"adapt_data": np.arange(300),
"test_data": np.arange(300),
"use_dataset": True,
"expected":
np.concatenate([np.zeros(101), np.ones(99), 2 * np.ones(100)]),
"num_bins": 3,
"epsilon": 0.01
}, {
"testcase_name": "300_batch_1d_single_element_2",
"adapt_data": np.arange(300) ** 2,
"test_data": np.arange(300) ** 2,
"use_dataset": True,
"expected":
np.concatenate([np.zeros(101), np.ones(99), 2 * np.ones(100)]),
"num_bins": 3,
"epsilon": 0.01
}, {
"testcase_name": "300_batch_1d_single_element_large_epsilon",
"adapt_data": np.arange(300),
"test_data": np.arange(300),
"use_dataset": True,
"expected": np.concatenate([np.zeros(136), np.ones(164)]),
"num_bins": 2,
"epsilon": 0.1
}])
def test_layer_computation(self, adapt_data, test_data, use_dataset,
expected, num_bins=5, epsilon=0.01):
input_shape = tuple(list(test_data.shape)[1:])
np.random.shuffle(adapt_data)
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
layer = discretization.Discretization(epsilon=epsilon, num_bins=num_bins)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
def test_multiple_adapts(self):
first_adapt = [[1], [2], [3]]
second_adapt = [[4], [5], [6]]
predict_input = [[2], [2]]
expected_first_output = [[2], [2]]
expected_second_output = [[0], [0]]
inputs = keras.Input(shape=(1,), dtype=tf.int32)
layer = discretization.Discretization(num_bins=3)
layer.adapt(first_adapt)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_first_output)
# Re-adapt the layer on new inputs.
layer.adapt(second_adapt)
# Re-compile the model.
model.compile()
# `predict` should now use the new model state.
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_second_output)
def test_saved_model_tf(self):
input_data = [[1], [2], [3]]
predict_data = [[0.5], [1.5], [2.5]]
expected_output = [[0], [1], [2]]
inputs = keras.Input(shape=(1,), dtype=tf.float32)
layer = discretization.Discretization(num_bins=3)
layer.adapt(input_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(predict_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_saved_model")
tf.saved_model.save(model, output_path)
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = f(tf.constant(predict_data))["discretization"]
self.assertAllClose(new_output_data, expected_output)
def test_saved_model_keras(self):
input_data = [[1], [2], [3]]
predict_data = [[0.5], [1.5], [2.5]]
expected_output = [[0], [1], [2]]
cls = discretization.Discretization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
layer = cls(num_bins=3)
layer.adapt(input_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(predict_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(
output_path, custom_objects={"Discretization": cls})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model.predict(predict_data)
self.assertAllClose(new_output_data, expected_output)
def test_saved_weights_keras(self):
input_data = [[1], [2], [3]]
predict_data = [[0.5], [1.5], [2.5]]
expected_output = [[0], [1], [2]]
cls = discretization.Discretization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
layer = cls(num_bins=3)
layer.adapt(input_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(predict_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_weights")
model.save_weights(output_path, save_format="tf")
new_model = keras.Model.from_config(
model.get_config(), custom_objects={"Discretization": cls})
new_model.load_weights(output_path)
# Validate correctness of the new model.
new_output_data = new_model.predict(predict_data)
self.assertAllClose(new_output_data, expected_output)
if __name__ == "__main__":
tf.test.main()
| 13,131 | 38.2 | 80 | py |
keras | keras-master/keras/layers/preprocessing/image_preprocessing_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
import functools
from absl.testing import parameterized
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import sequential
from keras.layers.preprocessing import image_preprocessing
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.distribute.mirrored_strategy import MirroredStrategy
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import stateless_random_ops
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class ResizingTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({'height': expected_height, 'width': expected_width})
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(('down_sample_bilinear_2_by_2', {
'interpolation': 'bilinear'
}, 2, 2), ('down_sample_bilinear_3_by_2', {
'interpolation': 'bilinear'
}, 3, 2), ('down_sample_nearest_2_by_2', {
'interpolation': 'nearest'
}, 2, 2), ('down_sample_nearest_3_by_2', {
'interpolation': 'nearest'
}, 3, 2), ('down_sample_area_2_by_2', {
'interpolation': 'area'
}, 2, 2), ('down_sample_area_3_by_2', {
'interpolation': 'area'
}, 3, 2), ('down_sample_crop_to_aspect_ratio_3_by_2', {
'interpolation': 'bilinear',
'crop_to_aspect_ratio': True,
}, 3, 2))
def test_down_sampling(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(('up_sample_bilinear_10_by_12', {
'interpolation': 'bilinear'
}, 10, 12), ('up_sample_bilinear_12_by_12', {
'interpolation': 'bilinear'
}, 12, 12), ('up_sample_nearest_10_by_12', {
'interpolation': 'nearest'
}, 10, 12), ('up_sample_nearest_12_by_12', {
'interpolation': 'nearest'
}, 12, 12), ('up_sample_area_10_by_12', {
'interpolation': 'area'
}, 10, 12), ('up_sample_area_12_by_12', {
'interpolation': 'area'
}, 12, 12), ('up_sample_crop_to_aspect_ratio_12_by_14', {
'interpolation': 'bilinear',
'crop_to_aspect_ratio': True,
}, 12, 14))
def test_up_sampling(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
def test_down_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(dtype)
layer = image_preprocessing.Resizing(
height=2, width=2, interpolation='nearest')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[5, 7],
[13, 15]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_up_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(dtype)
layer = image_preprocessing.Resizing(
height=4, width=4, interpolation='nearest')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[0, 0, 1, 1],
[0, 0, 1, 1],
[2, 2, 3, 3],
[2, 2, 3, 3]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(('reshape_bilinear_10_by_4', {
'interpolation': 'bilinear'
}, 10, 4))
def test_reshaping(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, 'invalid_interpolation')
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_crop_to_aspect_ratio(self):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype('float32')
layer = image_preprocessing.Resizing(4, 2, crop_to_aspect_ratio=True)
output_image = layer(input_image)
expected_output = np.asarray([
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]).astype('float32')
expected_output = np.reshape(expected_output, (1, 4, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_unbatched_image(self):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype('float32')
layer = image_preprocessing.Resizing(2, 2, interpolation='nearest')
output_image = layer(input_image)
expected_output = np.asarray([
[5, 7],
[13, 15],
]).astype('float32')
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
('batch_crop_to_aspect_ratio', True, True),
('batch_dont_crop_to_aspect_ratio', False, True),
('single_sample_crop_to_aspect_ratio', True, False),
('single_sample_dont_crop_to_aspect_ratio', False, False),
)
def test_static_shape_inference(self, crop_to_aspect_ratio, batch):
channels = 3
input_height = 8
input_width = 8
target_height = 4
target_width = 6
layer = image_preprocessing.Resizing(
target_height, target_width, crop_to_aspect_ratio=crop_to_aspect_ratio)
unit_test = self
@tf.function
def tf_function(img):
unit_test.assertListEqual([input_height, input_width, channels],
img.shape.as_list()[-3:])
img = layer(img)
unit_test.assertListEqual([target_height, target_width, channels],
img.shape.as_list()[-3:])
return img
with testing_utils.use_gpu():
if batch:
input_shape = (2, input_height, input_width, channels)
else:
input_shape = (input_height, input_width, channels)
img_data = np.random.random(size=input_shape).astype('float32')
tf_function(img_data)
def get_numpy_center_crop(images, expected_height, expected_width):
orig_height = images.shape[1]
orig_width = images.shape[2]
height_start = int((orig_height - expected_height) / 2)
width_start = int((orig_width - expected_width) / 2)
height_end = height_start + expected_height
width_end = width_start + expected_width
return images[:, height_start:height_end, width_start:width_end, :]
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CenterCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
input_images = np.random.random(
(num_samples, orig_height, orig_width, channels)).astype(np.float32)
expected_output = get_numpy_center_crop(input_images, expected_height,
expected_width)
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.CenterCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
input_data=input_images,
expected_output=expected_output,
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(('center_crop_3_by_4', 3, 4),
('center_crop_3_by_2', 3, 2))
def test_center_crop_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(('center_crop_4_by_5', 4, 5),
('center_crop_4_by_3', 4, 3))
def test_center_crop_mis_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(('center_crop_4_by_6', 4, 6),
('center_crop_3_by_2', 3, 2))
def test_center_crop_half_mis_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(('center_crop_5_by_12', 5, 12),
('center_crop_10_by_8', 10, 8),
('center_crop_10_by_12', 10, 12))
def test_invalid_center_crop(self, expected_height, expected_width):
# InternelError is raised by tf.function MLIR lowering pass when TFRT
# is enabled.
with self.assertRaisesRegex(
(tf.errors.InvalidArgumentError, tf.errors.InternalError),
r'assertion failed|error: \'tf.Slice\' op'):
self._run_test(expected_height, expected_width)
def test_config_with_custom_name(self):
layer = image_preprocessing.CenterCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.CenterCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype('float32')
layer = image_preprocessing.CenterCrop(2, 2)
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6],
[9, 10],
]).astype('float32')
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(('random_crop_5_by_12', 5, 12),
('random_crop_10_by_8', 10, 8),
('random_crop_10_by_12', 10, 12))
def test_invalid_random_crop(self, expected_height, expected_width):
# InternelError is raised by tf.function MLIR lowering pass when TFRT
# is enabled.
with self.assertRaises(
(tf.errors.InvalidArgumentError, tf.errors.InternalError)):
self._run_test(expected_height, expected_width)
def test_training_with_mock(self):
np.random.seed(1337)
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
mock_offset = [0, height_offset, width_offset, 0]
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_offset):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=1)
expected_output = inp[:, height_offset:(height_offset + height),
width_offset:(width_offset + width), :]
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(('random_crop_4_by_6', 4, 6),
('random_crop_3_by_2', 3, 2))
def test_random_crop_output_shape(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
def test_random_crop_full_height(self):
self._run_test(5, 2)
def test_random_crop_full_width(self):
self._run_test(3, 8)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
self.assertAllClose(inp, actual_output)
def test_predicting_with_mock_longer_height(self):
np.random.seed(1337)
height, width = 3, 3
inp = np.random.random((12, 10, 6, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = tf.image.resize(inp, size=[5, 3])
expected_output = resized_inp[:, 1:4, :, :]
self.assertAllClose(expected_output, actual_output)
def test_predicting_with_mock_longer_width(self):
np.random.seed(1337)
height, width = 4, 6
inp = np.random.random((12, 8, 16, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = tf.image.resize(inp, size=[4, 8])
expected_output = resized_inp[:, :, 1:7, :]
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
np.random.seed(1337)
inp = np.random.random((16, 16, 3))
mock_offset = [2, 2, 0]
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_offset):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(8, 8)
actual_output = layer(inp, training=1)
self.assertAllClose(inp[2:10, 2:10, :], actual_output)
class RescalingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_rescaling_base(self):
kwargs = {'scale': 1. / 127.5, 'offset': -1.}
testing_utils.layer_test(
image_preprocessing.Rescaling,
kwargs=kwargs,
input_shape=(2, 5, 6, 3),
expected_output_shape=(None, 5, 6, 3))
@testing_utils.run_v2_only
def test_rescaling_correctness_float(self):
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1.)
inputs = tf.random.uniform((2, 4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
@testing_utils.run_v2_only
def test_rescaling_correctness_int(self):
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1)
inputs = tf.random.uniform((2, 4, 5, 3), 0, 100, dtype='int32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype.name, 'float32')
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
def test_config_with_custom_name(self):
layer = image_preprocessing.Rescaling(0.5, name='rescaling')
config = layer.get_config()
layer_1 = image_preprocessing.Rescaling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_unbatched_image(self):
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1)
inputs = tf.random.uniform((4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomFlipTest(keras_parameterized.TestCase):
def _run_test(self, mode, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = [1 for _ in range(num_samples)]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
expected_output = inp
if mode == 'horizontal' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=2)
if mode == 'vertical' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=1)
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_random,
):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomFlip(mode)
actual_output = layer(inp, training=1)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_flip_horizontal', 'horizontal'),
('random_flip_vertical', 'vertical'),
('random_flip_both', 'horizontal_and_vertical'))
def test_random_flip(self, mode):
self._run_test(mode)
def test_random_flip_horizontal_half(self):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1)
self._run_test('horizontal', expected_output, mock_random)
def test_random_flip_vertical_half(self):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
self._run_test('vertical', expected_output, mock_random)
def test_random_flip_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
mock_random = [1, 1]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_random,
):
with self.cached_session():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=1)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomFlip(name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_random_flip_unbatched_image(self):
input_image = np.random.random((4, 4, 1)).astype(np.float32)
expected_output = np.flip(input_image, axis=0)
# mock_random = np.reshape([0.], [1, 1, 1])
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=0.,
):
with self.cached_session():
layer = image_preprocessing.RandomFlip('vertical')
actual_output = layer(input_image, training=1)
self.assertAllClose(expected_output, actual_output)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomContrastTest(keras_parameterized.TestCase):
def _run_test(self, lower, upper, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = 0.2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
# reduce mean on height.
inp_mean = np.mean(inp, axis=1, keepdims=True)
# reduce mean on width.
inp_mean = np.mean(inp_mean, axis=2, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_random,
):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((lower, upper))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(('random_contrast_2_by_5', 0.2, 0.5),
('random_contrast_2_by_13', 0.2, 1.3),
('random_contrast_5_by_2', 0.5, 0.2),
('random_contrast_10_by_10', 1.0, 1.0))
def test_random_contrast(self, lower, upper):
self._run_test(lower, upper)
@parameterized.named_parameters(('random_contrast_amplitude_2', 0.2),
('random_contrast_amplitude_5', 0.5))
def test_random_contrast_amplitude(self, amplitude):
input_images = np.random.random((2, 5, 8, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast(amplitude)
layer(input_images)
def test_random_contrast_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_contrast_int_dtype(self):
input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
layer(input_images)
def test_random_contrast_invalid_bounds(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((-0.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((1.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((0.1, -0.2))
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomContrast((.5, .6), name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomContrast.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
np.random.seed(1337)
mock_random = 0.2
inp = np.random.random((4, 4, 1))
inp_mean = np.mean(inp, axis=0, keepdims=True)
inp_mean = np.mean(inp_mean, axis=1, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_random,
):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.2, 0.5))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomTranslationTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomTranslation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_translate_4_by_6', .4, .6), ('random_translate_3_by_2', .3, .2),
('random_translate_tuple_factor', (-.5, .4), (.2, .3)))
def test_random_translation(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_translation_up_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0.)
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_up_numeric_constant(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0., fill_mode='constant')
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[0, 0, 0, 0, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.2, .2), width_factor=0.)
output_image = layer(input_image)
expected_output = np.asarray([
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_asymmetric_size_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype(dtype)
# Shifting by .5 * 8 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.5, .5), width_factor=0.)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[6, 7],
[4, 5],
[2, 3],
[0, 1],
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 8, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_constant(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.2, .2), width_factor=0., fill_mode='constant')
output_image = layer(input_image)
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0., width_factor=(-.2, -.2))
output_image = layer(input_image)
expected_output = np.asarray([
[1, 2, 3, 4, 4],
[6, 7, 8, 9, 9],
[11, 12, 13, 14, 14],
[16, 17, 18, 19, 19],
[21, 22, 23, 24, 24],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_constant(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0., width_factor=(-.2, -.2), fill_mode='constant')
output_image = layer(input_image)
expected_output = np.asarray([
[1, 2, 3, 4, 0],
[6, 7, 8, 9, 0],
[11, 12, 13, 14, 0],
[16, 17, 18, 19, 0],
[21, 22, 23, 24, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomTranslation(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomTranslation(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0.)
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomTransformTest(keras_parameterized.TestCase):
def _run_random_transform_with_mock(self,
transform_matrix,
expected_output,
mode,
fill_value=0.0,
interpolation='bilinear'):
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
with self.cached_session():
output = image_preprocessing.transform(
inp,
transform_matrix,
fill_mode=mode,
fill_value=fill_value,
interpolation=interpolation)
self.assertAllClose(expected_output, output)
def test_random_translation_reflect(self):
# reflected output is (dcba|abcd|dcba)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 1., 2.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test left shift by 1.
# reflected output is (dcba|abcd|dcba)
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.],
[10., 11., 11.],
[13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[3., 3., 4],
[6., 6., 7.],
[9., 9., 10.],
[12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
def test_random_translation_wrap(self):
# warpped output is (abcd|abcd|abcd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[12., 13., 14.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 1., 2.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 3.],
[7., 8., 6.],
[10., 11., 9.],
[13., 14., 12.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[2., 0., 1.],
[5., 3., 4],
[8., 6., 7.],
[11., 9., 10.],
[14., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
def test_random_translation_nearest(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 1., 2.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.],
[10., 11., 11.],
[13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[3., 3., 4],
[6., 6., 7.],
[9., 9., 10.],
[12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
def test_random_translation_constant_0(self):
# constant output is (0000|abcd|0000)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 0.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 0.],
[7., 8., 0.],
[10., 11., 0.],
[13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[0., 3., 4],
[0., 6., 7.],
[0., 9., 10.],
[0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
def test_random_translation_constant_1(self):
with tf.compat.forward_compatibility_horizon(2020, 8, 6):
# constant output is (1111|abcd|1111)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 1., 1.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[1., 1., 1.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 1.],
[4., 5., 1.],
[7., 8., 1.],
[10., 11., 1.],
[13., 14., 1.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 0., 1.],
[1., 3., 4],
[1., 6., 7.],
[1., 9., 10.],
[1., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
def test_random_translation_nearest_interpolation(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 0.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 0.],
[7., 8., 0.],
[10., 11., 0.],
[13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[0., 3., 4],
[0., 6., 7.],
[0., 9., 10.],
[0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomRotationTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'factor': factor}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomRotation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(('random_rotate_4', .4),
('random_rotate_3', .3),
('random_rotate_tuple_factor', (-.5, .4)))
def test_random_rotation(self, factor):
self._run_test(factor)
def test_random_rotation_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomRotation(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
def test_distribution_strategy(self):
"""Tests that RandomRotation can be created within distribution strategies."""
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
with testing_utils.use_gpu():
strat = MirroredStrategy(devices=['cpu', 'gpu'])
with strat.scope():
layer = image_preprocessing.RandomRotation(.5)
output = strat.run(lambda: layer(input_images, training=True))
values = output.values
self.assertAllEqual(2, len(values))
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomRotation(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomRotation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.float32)
# 180 rotation.
layer = image_preprocessing.RandomRotation(factor=(0.5, 0.5))
output_image = layer(input_image)
expected_output = np.asarray([
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]).astype(np.float32)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllClose(expected_output, output_image)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomZoomTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomZoom,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_zoom_4_by_6', -.4, -.6), ('random_zoom_2_by_3', -.2, -.3),
('random_zoom_tuple_factor', (-.4, -.5), (-.2, -.3)))
def test_random_zoom_in(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
@parameterized.named_parameters(
('random_zoom_4_by_6', .4, .6), ('random_zoom_2_by_3', .2, .3),
('random_zoom_tuple_factor', (.4, .5), (.2, .3)))
def test_random_zoom_out(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_zoom_in_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((-.5, -.5), (-.5, -.5),
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray([
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((.5, .5), (.8, .8),
fill_mode='constant',
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((.5, .5),
fill_mode='constant',
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 6, 7, 9, 0],
[0, 11, 12, 14, 0],
[0, 21, 22, 24, 0],
[0, 0, 0, 0, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomZoom(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomZoom(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
layer = image_preprocessing.RandomZoom((-.5, -.5), (-.5, -.5),
interpolation='nearest')
output_image = layer(input_image)
expected_output = np.asarray([
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomHeightTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with testing_utils.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomHeight(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[2], 8)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_height_4_by_6', (.4, .6)),
('random_height_3_by_2', (-.3, .2)),
('random_height_3', .3))
def test_random_height_basic(self, factor):
self._run_test(factor)
def test_valid_random_height(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with tf.compat.v1.test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf.compat.v1.test.mock.patch.object(
gen_stateless_random_ops_v2,
'stateless_random_uniform_v2',
return_value=mock_factor):
with testing_utils.use_gpu():
img = np.random.random((12, 5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
def test_random_height_longer_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (2, 3, 1)).astype(dtype)
layer = image_preprocessing.RandomHeight(factor=(1., 1.))
# Return type of RandomHeight() is float32 if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
output_image = tf.cast(
layer(np.expand_dims(input_image, axis=0)), dtype=dtype)
# pyformat: disable
expected_output = np.asarray([
[0, 1, 2],
[0.75, 1.75, 2.75],
[2.25, 3.25, 4.25],
[3, 4, 5]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 3, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (4, 2, 1)).astype(dtype)
layer = image_preprocessing.RandomHeight(
factor=(-.5, -.5), interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[2, 3],
[6, 7]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomHeight((-1.5, .4))
def test_random_height_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomHeight(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomHeight(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomHeight.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with tf.compat.v1.test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf.compat.v1.test.mock.patch.object(
gen_stateless_random_ops_v2,
'stateless_random_uniform_v2',
return_value=mock_factor):
with testing_utils.use_gpu():
img = np.random.random((5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomWidthTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with testing_utils.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomWidth(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[1], 5)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_width_4_by_6', (.4, .6)),
('random_width_3_by_2', (-.3, .2)),
('random_width_3', .3))
def test_random_width_basic(self, factor):
self._run_test(factor)
def test_valid_random_width(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with tf.compat.v1.test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf.compat.v1.test.mock.patch.object(
gen_stateless_random_ops_v2,
'stateless_random_uniform_v2',
return_value=mock_factor):
with testing_utils.use_gpu():
img = np.random.random((12, 8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[2], 3)
def test_random_width_longer_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (3, 2, 1)).astype(dtype)
layer = image_preprocessing.RandomWidth(factor=(1., 1.))
# Return type of RandomWidth() is float32 if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
output_image = tf.cast(
layer(np.expand_dims(input_image, axis=0)), dtype=dtype)
# pyformat: disable
expected_output = np.asarray([
[0, 0.25, 0.75, 1],
[2, 2.25, 2.75, 3],
[4, 4.25, 4.75, 5]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 3, 4, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (2, 4, 1)).astype(dtype)
layer = image_preprocessing.RandomWidth(
factor=(-.5, -.5), interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[1, 3],
[5, 7]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomWidth((-1.5, .4))
def test_random_width_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomWidth(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomWidth(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomWidth.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with tf.compat.v1.test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf.compat.v1.test.mock.patch.object(
gen_stateless_random_ops_v2,
'stateless_random_uniform_v2',
return_value=mock_factor):
with testing_utils.use_gpu():
img = np.random.random((8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class LearningPhaseTest(keras_parameterized.TestCase):
def test_plain_call(self):
layer = image_preprocessing.RandomWidth(.5, seed=123)
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = layer(img) # Default to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
def test_call_in_container(self):
layer1 = image_preprocessing.RandomWidth(.5, seed=123)
layer2 = image_preprocessing.RandomHeight(.5, seed=123)
seq = sequential.Sequential([layer1, layer2])
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = seq(img) # Default to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class DeterminismTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('random_flip', image_preprocessing.RandomFlip),
('random_contrast',
functools.partial(image_preprocessing.RandomContrast, factor=1.)),
('random_crop',
functools.partial(image_preprocessing.RandomCrop, height=2, width=2)),
('random_translation',
functools.partial(image_preprocessing.RandomTranslation, 0.3, 0.2)),
('random_rotation',
functools.partial(image_preprocessing.RandomRotation, 0.5)),
('random_zoom', functools.partial(image_preprocessing.RandomZoom, 0.2)),
('random_height', functools.partial(image_preprocessing.RandomHeight,
0.4)),
('random_width', functools.partial(image_preprocessing.RandomWidth, 0.3)),
)
def test_seed_constructor_arg(self, layer_cls):
input_image = np.random.random((2, 5, 8, 3)).astype(np.float32)
layer1 = layer_cls(seed=0.)
layer2 = layer_cls(seed=0.)
layer1_output = layer1(input_image)
layer2_output = layer2(input_image)
self.assertAllClose(layer1_output.numpy().tolist(),
layer2_output.numpy().tolist())
if __name__ == '__main__':
tf.test.main()
| 64,039 | 38.288344 | 82 | py |
keras | keras-master/keras/layers/preprocessing/normalization_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.preprocessing import normalization
from keras.layers.preprocessing import preprocessing_test_utils
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1], [2], [3], [4], [5]], dtype=np.int32),
"axis": -1,
"test_data": np.array([[1], [2], [3]], np.int32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_int_data"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
}, {
"adapt_data":
np.zeros((3, 4)),
"axis": -1,
"test_data":
np.zeros((3, 4)),
"expected":
np.zeros((3, 4)),
"testcase_name":
"zero_variance"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_broadcasting_during_direct_setting(self):
layer = normalization.Normalization(axis=-1, mean=[1.0], variance=[1.0])
output = layer(np.array([[1., 2.]]))
expected_output = [[0., 1.]]
self.assertAllClose(output, expected_output)
self.assertAllClose(layer.get_weights(), [])
def test_broadcasting_during_direct_setting_with_tensors(self):
if not tf.executing_eagerly():
self.skipTest("Only supported in TF2.")
layer = normalization.Normalization(
axis=-1,
mean=tf.constant([1.0]),
variance=tf.constant([1.0]))
output = layer(np.array([[1., 2.]]))
expected_output = [[0., 1.]]
self.assertAllClose(output, expected_output)
self.assertAllClose(layer.get_weights(), [])
def test_1d_data(self):
data = np.array([0., 2., 0., 2.])
layer = normalization.Normalization(mean=1.0, variance=1.0)
output = layer(data)
self.assertListEqual(output.shape.as_list(), [4])
self.assertAllClose(output, [-1, 1, -1, 1])
def test_0d_data(self):
layer = normalization.Normalization(axis=None, mean=1.0, variance=1.0)
output = layer(0.)
self.assertListEqual(output.shape.as_list(), [])
self.assertAllClose(output, -1)
def test_broadcasting_during_direct_setting_with_variables_fails(self):
with self.assertRaisesRegex(ValueError, "passing a Variable"):
_ = normalization.Normalization(
axis=-1,
mean=tf.Variable([1.0]),
variance=tf.Variable([2.0]))
def test_keeping_an_unknown_axis_fails(self):
layer = normalization.Normalization(axis=-1)
with self.assertRaisesRegex(ValueError, "axis.*must have known shape"):
layer.build([None])
@parameterized.parameters(
# Out of bounds
{"axis": 3},
{"axis": -4},
# In a tuple
{"axis": (1, 3)},
{"axis": (1, -4)},
)
def test_bad_axis_fail_build(self, axis):
layer = normalization.Normalization(axis=axis)
with self.assertRaisesRegex(ValueError, "in the range"):
layer.build([None, 2, 3])
def test_list_input(self):
with self.assertRaisesRegex(
ValueError, ("Normalization only accepts a single input. If you are "
"passing a python list or tuple as a single input, "
"please convert to a numpy array or `tf.Tensor`.")):
normalization.Normalization()([1, 2, 3])
def test_scalar_input(self):
with self.assertRaisesRegex(ValueError,
"axis.*values must be in the range"):
normalization.Normalization()(1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class NormalizationAdaptTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_layer_api_compatibility(self):
cls = normalization.Normalization
output_data = testing_utils.layer_test(
cls,
kwargs={"axis": -1},
input_shape=(None, 3),
input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32),
validate_training=False,
adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]]))
expected = np.array([[3., -3., -0.33333333], [9., 5., 1.]])
self.assertAllClose(expected, output_data)
@parameterized.named_parameters(*_get_layer_computation_test_cases())
def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
expected):
input_shape = tuple([test_data.shape[i] for i in range(1, test_data.ndim)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
layer = normalization.Normalization(axis=axis)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
def test_1d_unbatched_adapt(self):
ds = tf.data.Dataset.from_tensor_slices([
[2., 0., 2., 0.],
[0., 2., 0., 2.],
])
layer = normalization.Normalization(axis=-1)
layer.adapt(ds)
output_ds = ds.map(layer)
self.assertAllClose(
list(output_ds.as_numpy_iterator()), [
[1., -1., 1., -1.],
[-1., 1., -1., 1.],
])
def test_0d_unbatched_adapt(self):
ds = tf.data.Dataset.from_tensor_slices([2., 0., 2., 0.])
layer = normalization.Normalization(axis=None)
layer.adapt(ds)
output_ds = ds.map(layer)
self.assertAllClose(list(output_ds.as_numpy_iterator()), [1., -1., 1., -1.])
@parameterized.parameters(
# Results should be identical no matter how the axes are specified (3d).
{"axis": (1, 2)},
{"axis": (2, 1)},
{"axis": (1, -1)},
{"axis": (-1, 1)},
)
def test_axis_permutations(self, axis):
layer = normalization.Normalization(axis=axis)
# data.shape = [2, 2, 3]
data = np.array([[[0., 1., 2.], [0., 2., 6.]],
[[2., 3., 4.], [3., 6., 10.]]])
expect = np.array([[[-1., -1., -1.], [-1., -1., -1.]],
[[1., 1., 1.], [1., 1., 1.]]])
layer.adapt(data)
self.assertAllClose(expect, layer(data))
def test_model_summary_after_layer_adapt(self):
data = np.array([[[0., 1., 2.], [0., 2., 6.]],
[[2., 3., 4.], [3., 6., 10.]]])
layer = normalization.Normalization(axis=-1)
layer.adapt(data)
model = keras.Sequential(
[layer,
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(1)])
model.summary()
def test_multiple_adapts(self):
first_adapt = [[0], [2], [0], [2]]
second_adapt = [[2], [4], [2], [4]]
predict_input = [[2], [2]]
expected_first_output = [[1], [1]]
expected_second_output = [[-1], [-1]]
inputs = keras.Input(shape=(1,), dtype=tf.int32)
layer = normalization.Normalization(axis=-1)
layer.adapt(first_adapt)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_first_output)
# Re-adapt the layer on new inputs.
layer.adapt(second_adapt)
# Re-compile the model.
model.compile()
# `predict` should now use the new model state.
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_second_output)
@parameterized.parameters(
{"adapted": True},
{"adapted": False},
)
def test_saved_model_tf(self, adapted):
input_data = [[0.], [2.], [0.], [2.]]
expected_output = [[-1.], [1.], [-1.], [1.]]
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapted:
layer = normalization.Normalization(axis=-1)
layer.adapt(input_data)
else:
layer = normalization.Normalization(mean=1., variance=1.)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_saved_model")
tf.saved_model.save(model, output_path)
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = f(tf.constant(input_data))["normalization"]
self.assertAllClose(new_output_data, expected_output)
@parameterized.parameters(
{"adapted": True},
{"adapted": False},
)
def test_saved_model_keras(self, adapted):
input_data = [[0.], [2.], [0.], [2.]]
expected_output = [[-1.], [1.], [-1.], [1.]]
cls = normalization.Normalization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapted:
layer = cls(axis=-1)
layer.adapt(input_data)
else:
layer = cls(mean=1., variance=1.)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(
output_path, custom_objects={"Normalization": cls})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model.predict(input_data)
self.assertAllClose(new_output_data, expected_output)
@parameterized.parameters(
{"adapted": True},
{"adapted": False},
)
def test_saved_weights_keras(self, adapted):
input_data = [[0.], [2.], [0.], [2.]]
expected_output = [[-1.], [1.], [-1.], [1.]]
cls = normalization.Normalization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapted:
layer = cls(axis=-1)
layer.adapt(input_data)
else:
layer = cls(mean=1., variance=1.)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_weights")
model.save_weights(output_path, save_format="tf")
new_model = keras.Model.from_config(
model.get_config(), custom_objects={"Normalization": cls})
new_model.load_weights(output_path)
# Validate correctness of the new model.
new_output_data = new_model.predict(input_data)
self.assertAllClose(new_output_data, expected_output)
if __name__ == "__main__":
tf.test.main()
| 14,196 | 34.4925 | 80 | py |
keras | keras-master/keras/layers/preprocessing/image_preprocessing.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image preprocessing layers."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras import backend
from keras.engine import base_layer
from keras.engine import base_preprocessing_layer
from keras.preprocessing import image as image_preprocessing
from keras.utils import control_flow_util
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util.tf_export import keras_export
ResizeMethod = tf.image.ResizeMethod
_RESIZE_METHODS = {
'bilinear': ResizeMethod.BILINEAR,
'nearest': ResizeMethod.NEAREST_NEIGHBOR,
'bicubic': ResizeMethod.BICUBIC,
'area': ResizeMethod.AREA,
'lanczos3': ResizeMethod.LANCZOS3,
'lanczos5': ResizeMethod.LANCZOS5,
'gaussian': ResizeMethod.GAUSSIAN,
'mitchellcubic': ResizeMethod.MITCHELLCUBIC
}
H_AXIS = -3
W_AXIS = -2
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {'reflect', 'wrap', 'constant', 'nearest'}:
raise NotImplementedError(
'Unknown `fill_mode` {}. Only `reflect`, `wrap`, '
'`constant` and `nearest` are supported.'.format(fill_mode))
if interpolation not in {'nearest', 'bilinear'}:
raise NotImplementedError('Unknown `interpolation` {}. Only `nearest` and '
'`bilinear` are supported.'.format(interpolation))
@keras_export('keras.layers.Resizing',
'keras.layers.experimental.preprocessing.Resizing')
class Resizing(base_layer.Layer):
"""Image resizing layer.
Resize the batched image input to target height and width. The input should
be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"` format.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method. Defaults to `"bilinear"`.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`, `"lanczos3"`,
`"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
crop_to_aspect_ratio: If True, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the target
aspect ratio, the output image will be cropped so as to return the largest
possible window in the image (of size `(height, width)`) that matches
the target aspect ratio. By default (`crop_to_aspect_ratio=False`),
aspect ratio may not be preserved.
"""
def __init__(self,
height,
width,
interpolation='bilinear',
crop_to_aspect_ratio=False,
**kwargs):
self.target_height = height
self.target_width = width
self.interpolation = interpolation
self.crop_to_aspect_ratio = crop_to_aspect_ratio
self._interpolation_method = get_interpolation(interpolation)
super(Resizing, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Resizing').set(True)
def call(self, inputs):
if self.crop_to_aspect_ratio:
outputs = image_preprocessing.smart_resize(
inputs,
size=[self.target_height, self.target_width],
interpolation=self._interpolation_method)
else:
outputs = tf.image.resize(
inputs,
size=[self.target_height, self.target_width],
method=self._interpolation_method)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = self.target_height
input_shape[W_AXIS] = self.target_width
return tf.TensorShape(input_shape)
def get_config(self):
config = {
'height': self.target_height,
'width': self.target_width,
'interpolation': self.interpolation,
'crop_to_aspect_ratio': self.crop_to_aspect_ratio,
}
base_config = super(Resizing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.CenterCrop',
'keras.layers.experimental.preprocessing.CenterCrop')
class CenterCrop(base_layer.Layer):
"""Crop the central portion of the images to target height and width.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
"""
def __init__(self, height, width, **kwargs):
self.target_height = height
self.target_width = width
super(CenterCrop, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('CenterCrop').set(True)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
inputs_shape = tf.shape(inputs)
unbatched = inputs.shape.rank == 3
img_hd = inputs_shape[H_AXIS]
img_wd = inputs_shape[W_AXIS]
img_hd_diff = img_hd - self.target_height
img_wd_diff = img_wd - self.target_width
checks = []
checks.append(
tf.debugging.assert_non_negative(
img_hd_diff,
message='The crop height {} should not be greater than input '
'height.'.format(self.target_height)))
checks.append(
tf.debugging.assert_non_negative(
img_wd_diff,
message='The crop width {} should not be greater than input '
'width.'.format(self.target_width)))
with tf.control_dependencies(checks):
bbox_h_start = tf.cast(img_hd_diff / 2, tf.int32)
bbox_w_start = tf.cast(img_wd_diff / 2, tf.int32)
if unbatched:
bbox_begin = tf.stack([bbox_h_start, bbox_w_start, 0])
bbox_size = tf.stack([self.target_height, self.target_width, -1])
else:
bbox_begin = tf.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = tf.stack([-1, self.target_height, self.target_width, -1])
outputs = tf.slice(inputs, bbox_begin, bbox_size)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = self.target_height
input_shape[W_AXIS] = self.target_width
return tf.TensorShape(input_shape)
def get_config(self):
config = {
'height': self.target_height,
'width': self.target_width,
}
base_config = super(CenterCrop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RandomCrop',
'keras.layers.experimental.preprocessing.RandomCrop')
class RandomCrop(base_layer.Layer):
"""Randomly crop the images to target height and width.
This layer will crop all the images in the same batch to the same cropping
location.
By default, random cropping is only applied during training. At inference
time, the images will be first rescaled to preserve the shorter side, and
center cropped. If you need to apply random cropping at inference time,
set `training` to True when calling the layer.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(self, height, width, seed=None, **kwargs):
self.height = height
self.width = width
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomCrop, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomCrop').set(True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
inputs = tf.convert_to_tensor(inputs)
unbatched = inputs.shape.rank == 3
def random_cropped_inputs():
"""Cropped inputs with stateless random ops."""
shape = tf.shape(inputs)
if unbatched:
crop_size = tf.stack([self.height, self.width, shape[-1]])
else:
crop_size = tf.stack([shape[0], self.height, self.width, shape[-1]])
check = tf.Assert(
tf.reduce_all(shape >= crop_size),
[self.height, self.width])
with tf.control_dependencies([check]):
limit = shape - crop_size + 1
offset = stateless_random_ops.stateless_random_uniform(
tf.shape(shape),
dtype=crop_size.dtype,
maxval=crop_size.dtype.max,
seed=self._rng.make_seeds()[:, 0]) % limit
return tf.slice(inputs, offset, crop_size)
# TODO(b/143885775): Share logic with Resize and CenterCrop.
def resize_and_center_cropped_inputs():
"""Deterministically resize to shorter side and center crop."""
input_shape = tf.shape(inputs)
input_height_t = input_shape[H_AXIS]
input_width_t = input_shape[W_AXIS]
ratio_cond = (input_height_t / input_width_t > (self.height / self.width))
# pylint: disable=g-long-lambda
resized_height = control_flow_util.smart_cond(
ratio_cond,
lambda: tf.cast(self.width * input_height_t / input_width_t,
input_height_t.dtype), lambda: self.height)
resized_width = control_flow_util.smart_cond(
ratio_cond, lambda: self.width,
lambda: tf.cast(self.height * input_width_t / input_height_t,
input_width_t.dtype))
# pylint: enable=g-long-lambda
resized_inputs = tf.image.resize(
images=inputs, size=tf.stack([resized_height, resized_width]))
img_hd_diff = resized_height - self.height
img_wd_diff = resized_width - self.width
bbox_h_start = tf.cast(img_hd_diff / 2, tf.int32)
bbox_w_start = tf.cast(img_wd_diff / 2, tf.int32)
if unbatched:
bbox_begin = tf.stack([bbox_h_start, bbox_w_start, 0])
bbox_size = tf.stack([self.height, self.width, -1])
else:
bbox_begin = tf.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = tf.stack([-1, self.height, self.width, -1])
outputs = tf.slice(resized_inputs, bbox_begin, bbox_size)
return outputs
output = control_flow_util.smart_cond(training, random_cropped_inputs,
resize_and_center_cropped_inputs)
input_shape = inputs.shape.as_list()
if unbatched:
output_shape = [self.height, self.width, input_shape[-1]]
else:
output_shape = [input_shape[0], self.height, self.width, input_shape[-1]]
output.set_shape(output_shape)
return output
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = self.height
input_shape[W_AXIS] = self.width
return tf.TensorShape(input_shape)
def get_config(self):
config = {
'height': self.height,
'width': self.width,
'seed': self.seed,
}
base_config = super(RandomCrop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Rescaling',
'keras.layers.experimental.preprocessing.Rescaling')
class Rescaling(base_layer.Layer):
"""Multiply inputs by `scale` and adds `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference.
Input shape:
Arbitrary.
Output shape:
Same as input.
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
"""
def __init__(self, scale, offset=0., **kwargs):
self.scale = scale
self.offset = offset
super(Rescaling, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Rescaling').set(True)
def call(self, inputs):
dtype = self._compute_dtype
scale = tf.cast(self.scale, dtype)
offset = tf.cast(self.offset, dtype)
return tf.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'scale': self.scale,
'offset': self.offset,
}
base_config = super(Rescaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
HORIZONTAL = 'horizontal'
VERTICAL = 'vertical'
HORIZONTAL_AND_VERTICAL = 'horizontal_and_vertical'
@keras_export('keras.layers.RandomFlip',
'keras.layers.experimental.preprocessing.RandomFlip')
class RandomFlip(base_layer.Layer):
"""Randomly flip each image horizontally and vertically.
This layer will flip the images based on the `mode` attribute.
During inference time, the output will be identical to input. Call the layer
with `training=True` to flip the input.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Attributes:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`. Defaults to
`"horizontal_and_vertical"`. `"horizontal"` is a left-right flip and
`"vertical"` is a top-bottom flip.
seed: Integer. Used to create a random seed.
"""
def __init__(self,
mode=HORIZONTAL_AND_VERTICAL,
seed=None,
**kwargs):
super(RandomFlip, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomFlip').set(True)
self.mode = mode
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError('RandomFlip layer {name} received an unknown mode '
'argument {arg}'.format(name=self.name, arg=mode))
self.seed = seed
self._rng = make_generator(self.seed)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
def random_flipped_inputs():
flipped_outputs = inputs
if self.horizontal:
flipped_outputs = tf.image.stateless_random_flip_left_right(
flipped_outputs,
self._rng.make_seeds()[:, 0])
if self.vertical:
flipped_outputs = tf.image.stateless_random_flip_up_down(
flipped_outputs,
self._rng.make_seeds()[:, 0])
return flipped_outputs
output = control_flow_util.smart_cond(training, random_flipped_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'mode': self.mode,
'seed': self.seed,
}
base_config = super(RandomFlip, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(tanzheny): Add examples, here and everywhere.
@keras_export('keras.layers.RandomTranslation',
'keras.layers.experimental.preprocessing.RandomTranslation')
class RandomTranslation(base_layer.Layer):
"""Randomly translate each image during training.
Args:
height_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for shifting vertically. A negative
value means shifting image up, while a positive value means shifting image
down. When represented as a single positive float, this value is used for
both the upper and lower bound. For instance, `height_factor=(-0.2, 0.3)`
results in an output shifted by a random amount in the range
`[-20%, +30%]`.
`height_factor=0.2` results in an output height shifted by a random amount
in the range `[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of size 2
representing lower and upper bound for shifting horizontally. A negative
value means shifting image left, while a positive value means shifting
image right. When represented as a single positive float, this value is
used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and
shifted right by 30%. `width_factor=0.2` results in an output height
shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode="constant"`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(self,
height_factor,
width_factor,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
fill_value=0.0,
**kwargs):
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError('`height_factor` cannot have upper bound less than '
'lower bound, got {}'.format(height_factor))
if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
raise ValueError('`height_factor` must have values between [-1, 1], '
'got {}'.format(height_factor))
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError('`width_factor` cannot have upper bound less than '
'lower bound, got {}'.format(width_factor))
if abs(self.width_lower) > 1. or abs(self.width_upper) > 1.:
raise ValueError('`width_factor` must have values between [-1, 1], '
'got {}'.format(width_factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomTranslation, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomTranslation').set(
True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
inputs = tf.convert_to_tensor(inputs)
original_shape = inputs.shape
unbatched = inputs.shape.rank == 3
# The transform op only accepts rank 4 inputs, so if we have an unbatched
# image, we need to temporarily expand dims to a batch.
if unbatched:
inputs = tf.expand_dims(inputs, 0)
def random_translated_inputs():
"""Translated inputs with random ops."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_translate = self._rng.uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32)
height_translate = height_translate * img_hd
width_translate = self._rng.uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32)
width_translate = width_translate * img_wd
translations = tf.cast(
tf.concat([width_translate, height_translate], axis=1),
dtype=tf.float32)
return transform(
inputs,
get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value)
output = control_flow_util.smart_cond(training, random_translated_inputs,
lambda: inputs)
if unbatched:
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'height_factor': self.height_factor,
'width_factor': self.width_factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomTranslation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing `[dx, dy]`
to translate for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)` projective transforms which can be given
to `transform`.
"""
with backend.name_scope(name or 'translation_matrix'):
num_translations = tf.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.float32),
tf.zeros((num_translations, 1), tf.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.float32),
tf.ones((num_translations, 1), tf.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.float32),
],
axis=1)
def transform(images,
transforms,
fill_mode='reflect',
fill_value=0.0,
interpolation='bilinear',
output_shape=None,
name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape
`(num_images, num_rows, num_columns, num_channels)` (NHWC). The rank must
be statically known (the shape is not `TensorShape(None)`).
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2,
c0, c1], then it maps the *output* point `(x, y)` to a transformed *input*
point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points. Note that gradients are
not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode="constant"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
output_shape: Output dimension after the transform, `[height, width]`.
If `None`, output is the same size as input image.
name: The name of the op.
Fill mode behavior for each valid value is as follows:
- reflect (d c b a | a b c d | d c b a)
The input is extended by reflecting about the edge of the last pixel.
- constant (k k k k | a b c d | k k k k)
The input is extended by filling all
values beyond the edge with the same constant value k = 0.
- wrap (a b c d | a b c d | a b c d)
The input is extended by wrapping around to the opposite edge.
- nearest (a a a a | a b c d | d d d d)
The input is extended by the nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with backend.name_scope(name or 'transform'):
if output_shape is None:
output_shape = tf.shape(images)[1:3]
if not tf.executing_eagerly():
output_shape_value = tf.get_static_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = tf.convert_to_tensor(
output_shape, tf.int32, name='output_shape')
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError('output_shape must be a 1-D Tensor of 2 elements: '
'new_height, new_width, instead got '
'{}'.format(output_shape))
fill_value = tf.convert_to_tensor(
fill_value, tf.float32, name='fill_value')
return tf.raw_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper())
def get_rotation_matrix(angles, image_height, image_width, name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images) a
vector with an angle to rotate each image in the batch. The rank must be
statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to operation `image_projective_transform_v2`. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or 'rotation_matrix'):
x_offset = ((image_width - 1) - (tf.cos(angles) *
(image_width - 1) - tf.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (tf.sin(angles) *
(image_width - 1) + tf.cos(angles) *
(image_height - 1))) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.cos(angles)[:, None],
-tf.sin(angles)[:, None],
x_offset[:, None],
tf.sin(angles)[:, None],
tf.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.float32),
],
axis=1)
@keras_export('keras.layers.RandomRotation',
'keras.layers.experimental.preprocessing.RandomRotation')
class RandomRotation(base_layer.Layer):
"""Randomly rotate each image.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Attributes:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter clock-wise,
while a negative value means clock-wise. When represented as a single
float, this value is used for both the upper and lower bound. For
instance, `factor=(-0.2, 0.3)` results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in an
output rotating by a random amount in the range `[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode="constant"`.
"""
def __init__(self,
factor,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
fill_value=0.0,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError('Factor cannot have negative values, '
'got {}'.format(factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomRotation, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomRotation').set(
True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
inputs = tf.convert_to_tensor(inputs)
original_shape = inputs.shape
unbatched = inputs.shape.rank == 3
# The transform op only accepts rank 4 inputs, so if we have an unbatched
# image, we need to temporarily expand dims to a batch.
if unbatched:
inputs = tf.expand_dims(inputs, 0)
def random_rotated_inputs():
"""Rotated inputs with random ops."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
min_angle = self.lower * 2. * np.pi
max_angle = self.upper * 2. * np.pi
angles = self._rng.uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle)
return transform(
inputs,
get_rotation_matrix(angles, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation)
output = control_flow_util.smart_cond(training, random_rotated_inputs,
lambda: inputs)
if unbatched:
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'factor': self.factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomRotation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RandomZoom',
'keras.layers.experimental.preprocessing.RandomZoom')
class RandomZoom(base_layer.Layer):
"""Randomly zoom each image during training.
Args:
height_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for zooming vertically. When
represented as a single float, this value is used for both the upper and
lower bound. A positive value means zooming out, while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
output zoomed out by a random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range `[+20%, +30%]`.
width_factor: a float represented as fraction of value, or a tuple of size 2
representing lower and upper bound for zooming horizontally. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming
vertical and horizontal directions by preserving the aspect ratio.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode="constant"`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = tf.keras.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(self,
height_factor,
width_factor=None,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
fill_value=0.0,
**kwargs):
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
raise ValueError('`height_factor` must have values between [-1, 1], '
'got {}'.format(height_factor))
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor # pylint: disable=invalid-unary-operand-type
self.width_upper = width_factor
if self.width_lower < -1. or self.width_upper < -1.:
raise ValueError('`width_factor` must have values larger than -1, '
'got {}'.format(width_factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomZoom, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomZoom').set(True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
inputs = tf.convert_to_tensor(inputs)
original_shape = inputs.shape
unbatched = inputs.shape.rank == 3
# The transform op only accepts rank 4 inputs, so if we have an unbatched
# image, we need to temporarily expand dims to a batch.
if unbatched:
inputs = tf.expand_dims(inputs, 0)
def random_zoomed_inputs():
"""Zoomed inputs with random ops."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_zoom = self._rng.uniform(
shape=[batch_size, 1],
minval=1. + self.height_lower,
maxval=1. + self.height_upper)
if self.width_factor is not None:
width_zoom = self._rng.uniform(
shape=[batch_size, 1],
minval=1. + self.width_lower,
maxval=1. + self.width_upper)
else:
width_zoom = height_zoom
zooms = tf.cast(
tf.concat([width_zoom, height_zoom], axis=1),
dtype=tf.float32)
return transform(
inputs,
get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation)
output = control_flow_util.smart_cond(training, random_zoomed_inputs,
lambda: inputs)
if unbatched:
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'height_factor': self.height_factor,
'width_factor': self.width_factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomZoom, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_zoom_matrix(zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for each
image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)`. Projective transforms which can be
given to operation `image_projective_transform_v2`.
If one row of transforms is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or 'zoom_matrix'):
num_zooms = tf.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.) / 2.0) * (1.0 - zooms[:, 1, None])
return tf.concat(
values=[
zooms[:, 0, None],
tf.zeros((num_zooms, 1), tf.float32),
x_offset,
tf.zeros((num_zooms, 1), tf.float32),
zooms[:, 1, None],
y_offset,
tf.zeros((num_zooms, 2), tf.float32),
],
axis=1)
@keras_export('keras.layers.RandomContrast',
'keras.layers.experimental.preprocessing.RandomContrast')
class RandomContrast(base_layer.Layer):
"""Adjust the contrast of an image or images by a random factor.
Contrast is adjusted independently for each channel of each image during
training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Attributes:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound. When represented as a single
float, lower = upper. The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = self.upper = factor
if self.lower < 0. or self.upper < 0. or self.lower > 1.:
raise ValueError('Factor cannot have negative values or greater than 1.0,'
' got {}'.format(factor))
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomContrast, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomContrast').set(
True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
def random_contrasted_inputs():
return tf.image.stateless_random_contrast(inputs, 1. - self.lower,
1. + self.upper,
self._rng.make_seeds()[:, 0])
output = control_flow_util.smart_cond(training, random_contrasted_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'factor': self.factor,
'seed': self.seed,
}
base_config = super(RandomContrast, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RandomHeight',
'keras.layers.experimental.preprocessing.RandomHeight')
class RandomHeight(base_layer.Layer):
"""Randomly vary the height of a batch of images during training.
Adjusts the height of a batch of images by a random factor. The input
should be a 3D (unbatched) or 4D (batched) tensor in the `"channels_last"`
image data format.
By default, this layer is inactive during inference.
Args:
factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
height changed by a random amount in the range `[20%, 30%]`.
`factor=(-0.2, 0.3)` results in an output with height changed by a random
amount in the range `[-20%, +30%]. `factor=0.2` results in an output with
height changed by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method. Defaults to `"bilinear"`.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
seed: Integer. Used to create a random seed.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., random_height, width, channels)`.
"""
def __init__(self,
factor,
interpolation='bilinear',
seed=None,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.height_lower = factor[0]
self.height_upper = factor[1]
else:
self.height_lower = -factor
self.height_upper = factor
if self.height_upper < self.height_lower:
raise ValueError('`factor` cannot have upper bound less than '
'lower bound, got {}'.format(factor))
if self.height_lower < -1. or self.height_upper < -1.:
raise ValueError('`factor` must have values larger than -1, '
'got {}'.format(factor))
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomHeight, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomHeight').set(True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
def random_height_inputs():
"""Inputs height-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = inputs_shape[W_AXIS]
height_factor = self._rng.uniform(
shape=[],
minval=(1.0 + self.height_lower),
maxval=(1.0 + self.height_upper))
adjusted_height = tf.cast(height_factor * img_hd, tf.int32)
adjusted_size = tf.stack([adjusted_height, img_wd])
output = tf.image.resize(
images=inputs, size=adjusted_size, method=self._interpolation_method)
output_shape = inputs.shape.as_list()
output_shape[H_AXIS] = None
output.set_shape(output_shape)
return output
return control_flow_util.smart_cond(training, random_height_inputs,
lambda: inputs)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = None
return tf.TensorShape(input_shape)
def get_config(self):
config = {
'factor': self.factor,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomHeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RandomWidth',
'keras.layers.experimental.preprocessing.RandomWidth')
class RandomWidth(base_layer.Layer):
"""Randomly vary the width of a batch of images during training.
Adjusts the width of a batch of images by a random factor. The input
should be a 3D (unbatched) or 4D (batched) tensor in the `"channels_last"`
image data format.
By default, this layer is inactive during inference.
Args:
factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
width changed by a random amount in the range `[20%, 30%]`. `factor=(-0.2,
0.3)` results in an output with width changed by a random amount in the
range `[-20%, +30%]`. `factor=0.2` results in an output with width changed
by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`, `"lanczos3"`,
`"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
seed: Integer. Used to create a random seed.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., random_height, width, channels)`.
"""
def __init__(self,
factor,
interpolation='bilinear',
seed=None,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError('`factor` cannot have upper bound less than '
'lower bound, got {}'.format(factor))
if self.width_lower < -1. or self.width_upper < -1.:
raise ValueError('`factor` must have values larger than -1, '
'got {}'.format(factor))
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomWidth, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomWidth').set(True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
def random_width_inputs():
"""Inputs width-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = inputs_shape[H_AXIS]
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
width_factor = self._rng.uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper))
adjusted_width = tf.cast(width_factor * img_wd, tf.int32)
adjusted_size = tf.stack([img_hd, adjusted_width])
output = tf.image.resize(
images=inputs, size=adjusted_size, method=self._interpolation_method)
output_shape = inputs.shape.as_list()
output_shape[W_AXIS] = None
output.set_shape(output_shape)
return output
return control_flow_util.smart_cond(training, random_width_inputs,
lambda: inputs)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[W_AXIS] = None
return tf.TensorShape(input_shape)
def get_config(self):
config = {
'factor': self.factor,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomWidth, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def make_generator(seed=None):
"""Creates a random generator.
Args:
seed: the seed to initialize the generator. If None, the generator will be
initialized non-deterministically.
Returns:
A generator object.
"""
if seed is not None:
return tf.random.Generator.from_seed(seed)
else:
return tf.random.Generator.from_non_deterministic_state()
def get_interpolation(interpolation):
interpolation = interpolation.lower()
if interpolation not in _RESIZE_METHODS:
raise NotImplementedError(
'Value not recognized for `interpolation`: {}. Supported values '
'are: {}'.format(interpolation, _RESIZE_METHODS.keys()))
return _RESIZE_METHODS[interpolation]
| 53,682 | 37.760289 | 86 | py |
keras | keras-master/keras/layers/preprocessing/normalization_tpu_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute import tpu_strategy_test_utils
from keras.layers.preprocessing import normalization
from keras.layers.preprocessing import preprocessing_test_utils
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(
always_skip_v1=True, always_skip_eager=True)
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_layer_computation_test_cases())
def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
expected):
input_shape = tuple([None for _ in range(test_data.ndim - 1)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
strategy = tpu_strategy_test_utils.get_tpu_strategy()
with strategy.scope():
input_data = keras.Input(shape=input_shape)
layer = normalization.Normalization(axis=axis)
layer.adapt(adapt_data)
output = layer(input_data)
model = keras.Model(input_data, output)
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
if __name__ == "__main__":
tf.test.main()
| 4,629 | 36.04 | 80 | py |
keras | keras-master/keras/layers/preprocessing/category_encoding_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text category_encoding preprocessing layer."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import backend
from keras import keras_parameterized
from keras.layers import core
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import preprocessing_test_utils
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoryEncodingInputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_dense_input_sparse_output(self):
input_array = tf.constant([[1, 2, 3], [3, 3, 0]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1, X, X]
# [1, X, X, 2, X, X]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]
expected_values = [1, 1, 1, 1, 2]
num_tokens = 6
input_data = keras.Input(shape=(None,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
tf.sparse.to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input(self):
input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
sparse_tensor_data = tf.sparse.from_dense(input_array)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sparse_tensor_data, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_sparse_input_with_weights(self):
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)
weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])
sparse_tensor_data = tf.sparse.from_dense(input_array)
sparse_weight_data = tf.sparse.from_dense(weights_array)
# pyformat: disable
expected_output = [[0, .1, .2, .3, .4, 0],
[0, .4, 0, .1, .5, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
weight_data = keras.Input(shape=(None,), dtype=tf.float32, sparse=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT)
int_data = layer(input_data, count_weights=weight_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
output_dataset = model.predict([sparse_tensor_data, sparse_weight_data],
steps=1)
self.assertAllClose(expected_output, output_dataset)
def test_sparse_input_sparse_output(self):
sp_inp = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]],
values=[0, 2, 1, 1, 0],
dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [1, 1, 2, 1]
num_tokens = 6
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(
tf.sparse.to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input_sparse_output_with_weights(self):
indices = [[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]]
sp_inp = tf.SparseTensor(
indices=indices, values=[0, 2, 1, 1, 0], dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
sp_weight = tf.SparseTensor(
indices=indices, values=[.1, .2, .4, .3, .2], dense_shape=[4, 2])
weight_data = keras.Input(shape=(None,), dtype=tf.float32, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [.1, .2, .7, .2]
num_tokens = 6
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data, count_weights=weight_data)
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
sp_output_dataset = model.predict([sp_inp, sp_weight], steps=1)
self.assertAllClose(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
def test_ragged_input(self):
input_array = tf.ragged.constant([[1, 2, 3], [3, 1]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_input_sparse_output(self):
input_array = tf.ragged.constant([[1, 2, 3], [3, 3]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1]
# [X, X, X, 2]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 3]]
expected_values = [1, 1, 1, 2]
num_tokens = 6
input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
tf.sparse.to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_output_and_dense_layer(self):
input_array = tf.constant([[1, 2, 3], [3, 3, 0]])
num_tokens = 4
input_data = keras.Input(shape=(None,), dtype=tf.int32)
encoding_layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = encoding_layer(input_data)
dense_layer = keras.layers.Dense(units=1)
output_data = dense_layer(int_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array, steps=1)
def test_dense_oov_input(self):
valid_array = tf.constant([[0, 1, 2], [0, 1, 2]])
invalid_array = tf.constant([[0, 1, 2], [2, 3, 1]])
num_tokens = 3
expected_output_shape = [None, num_tokens]
encoder_layer = category_encoding.CategoryEncoding(num_tokens)
input_data = keras.Input(shape=(3,), dtype=tf.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
# Call predict once on valid input to compile a graph and test control flow.
_ = model.predict(valid_array, steps=1)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
".*must be in the range 0 <= values < num_tokens.*"):
_ = model.predict(invalid_array, steps=1)
def test_dense_negative(self):
valid_array = tf.constant([[0, 1, 2], [0, 1, 2]])
invalid_array = tf.constant([[1, 2, 0], [2, 2, -1]])
num_tokens = 3
expected_output_shape = [None, num_tokens]
encoder_layer = category_encoding.CategoryEncoding(num_tokens)
input_data = keras.Input(shape=(3,), dtype=tf.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
# Call predict once on valid input to compile a graph and test control flow.
_ = model.predict(valid_array, steps=1)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
".*must be in the range 0 <= values < num_tokens.*"):
_ = model.predict(invalid_array, steps=1)
def test_legacy_max_tokens_arg(self):
input_array = np.array([[1, 2, 3, 1]])
expected_output = [[0, 1, 1, 1, 0, 0]]
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
max_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes
class CategoryEncodingOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_one_hot_output(self):
input_data = np.array([[3], [2], [0], [1]])
expected_output = [
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
]
num_tokens = 4
expected_output_shape = [None, num_tokens]
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
inputs = keras.Input(shape=(1,), dtype=tf.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_dataset = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_rank_one_input(self):
input_data = np.array([3, 2, 0, 1])
expected_output = [
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
]
num_tokens = 4
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(1,), dtype=tf.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_one_hot_output_rank_zero_input(self):
input_data = np.array(3)
expected_output = [0, 0, 0, 1]
num_tokens = 4
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(1,), dtype=tf.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_one_hot_rank_3_output_fails(self):
layer = category_encoding.CategoryEncoding(
num_tokens=4, output_mode=category_encoding.ONE_HOT)
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(keras.Input(shape=(4,), dtype=tf.int32))
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(np.array([[3, 2, 0, 1], [3, 2, 0, 1]]))
def test_multi_hot_output(self):
input_data = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
expected_output = [
[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0],
]
num_tokens = 6
expected_output_shape = [None, num_tokens]
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
inputs = keras.Input(shape=(None,), dtype=tf.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output_rank_one_input(self):
input_data = np.array([3, 2, 0, 1])
expected_output = [1, 1, 1, 1, 0, 0]
num_tokens = 6
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(4,), dtype=tf.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output_rank_zero_input(self):
input_data = np.array(3)
expected_output = [0, 0, 0, 1, 0, 0]
num_tokens = 6
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(4,), dtype=tf.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_rank_3_output_fails(self):
layer = category_encoding.CategoryEncoding(
num_tokens=4, output_mode=category_encoding.ONE_HOT)
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(keras.Input(shape=(3, 4,), dtype=tf.int32))
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(np.array([[[3, 2, 0, 1], [3, 2, 0, 1]]]))
def test_count_output(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=6, output_mode=category_encoding.COUNT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
class CategoryEncodingModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_output",
"num_tokens": 5,
"output_mode": category_encoding.COUNT
}, {
"testcase_name": "multi_hot_output",
"num_tokens": 5,
"output_mode": category_encoding.MULTI_HOT
})
def test_end_to_end_bagged_modeling(self, output_mode, num_tokens):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
input_data = keras.Input(shape=(None,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=output_mode)
weights = []
if num_tokens is None:
layer.set_num_elements(5)
layer.set_weights(weights)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
if __name__ == "__main__":
tf.test.main()
| 19,483 | 37.968 | 80 | py |
keras | keras-master/keras/layers/preprocessing/preprocessing_stage.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing stage."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras.engine import base_preprocessing_layer
from keras.engine import functional
from keras.engine import sequential
from keras.utils import tf_utils
# Sequential methods should take precedence.
class PreprocessingStage(sequential.Sequential,
base_preprocessing_layer.PreprocessingLayer):
"""A sequential preprocessing stage.
This preprocessing stage wraps a list of preprocessing layers into a
Sequential-like object that enables you to `adapt()` the whole list via
a single `adapt()` call on the preprocessing stage.
Args:
layers: List of layers. Can include layers that aren't preprocessing layers.
name: String. Optional name for the preprocessing stage object.
"""
def adapt(self, data, reset_state=True):
"""Adapt the state of the layers of the preprocessing stage to the data.
Args:
data: A batched Dataset object, or a NumPy array, or an EagerTensor.
Data to be iterated over to adapt the state of the layers in this
preprocessing stage.
reset_state: Whether this call to `adapt` should reset the state of
the layers in this preprocessing stage.
"""
if not isinstance(data,
(tf.data.Dataset, np.ndarray, tf.__internal__.EagerTensor)):
raise ValueError(
'`adapt()` requires a batched Dataset, an EagerTensor, '
'or a Numpy array as input, '
'got {}'.format(type(data)))
if isinstance(data, tf.data.Dataset):
# Validate the datasets to try and ensure we haven't been passed one with
# infinite size. That would cause an infinite loop here.
if tf_utils.dataset_is_infinite(data):
raise ValueError(
'The dataset passed to `adapt()` has an infinite number of '
'elements. Please use dataset.take(...) to make the number '
'of elements finite.')
for current_layer_index in range(0, len(self.layers)):
if not hasattr(self.layers[current_layer_index], 'adapt'):
# Skip any layer that does not need adapting.
continue
def map_fn(x):
"""Maps `PreprocessingStage` inputs to inputs at `current_layer_index`.
Args:
x: Batch of inputs seen in entry of the `PreprocessingStage` instance.
Returns:
Batch of inputs to be processed by layer
`self.layers[current_layer_index]`
"""
if current_layer_index == 0: # pylint: disable=cell-var-from-loop
return x
for i in range(current_layer_index): # pylint: disable=cell-var-from-loop
x = self.layers[i](x)
return x
if isinstance(data, tf.data.Dataset):
current_layer_data = data.map(map_fn)
else:
current_layer_data = map_fn(data)
self.layers[current_layer_index].adapt(current_layer_data,
reset_state=reset_state)
# Functional methods should take precedence.
class FunctionalPreprocessingStage(functional.Functional,
base_preprocessing_layer.PreprocessingLayer):
"""A functional preprocessing stage.
This preprocessing stage wraps a graph of preprocessing layers into a
Functional-like object that enables you to `adapt()` the whole graph via
a single `adapt()` call on the preprocessing stage.
Preprocessing stage is not a complete model, so it cannot be called with
`fit()`. However, it is possible to add regular layers that may be trainable
to a preprocessing stage.
A functional preprocessing stage is created in the same way as `Functional`
models. A stage can be instantiated by passing two arguments to
`__init__`. The first argument is the `keras.Input` Tensors that represent
the inputs to the stage. The second argument specifies the output
tensors that represent the outputs of this stage. Both arguments can be a
nested structure of tensors.
Example:
>>> inputs = {'x2': tf.keras.Input(shape=(5,)),
... 'x1': tf.keras.Input(shape=(1,))}
>>> norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
>>> y = norm_layer(inputs['x2'])
>>> y, z = tf.keras.layers.Lambda(lambda x: (x, x))(inputs['x1'])
>>> outputs = [inputs['x1'], [y, z]]
>>> stage = FunctionalPreprocessingStage(inputs, outputs)
Args:
inputs: An input tensor (must be created via `tf.keras.Input()`), or a list,
a dict, or a nested strcture of input tensors.
outputs: An output tensor, or a list, a dict or a nested structure of output
tensors.
name: String, optional. Name of the preprocessing stage.
"""
def fit(self, *args, **kwargs):
raise ValueError(
'Preprocessing stage is not a complete model, and hence should not be '
'`fit`. Instead, you may feed data to `adapt` the stage to set '
'appropriate states of the layers in the stage.')
def adapt(self, data, reset_state=True):
"""Adapt the state of the layers of the preprocessing stage to the data.
Args:
data: A batched Dataset object, a NumPy array, an EagerTensor, or a list,
dict or nested structure of Numpy Arrays or EagerTensors. The elements
of Dataset object need to conform with inputs of the stage. The first
dimension of NumPy arrays or EagerTensors are understood to be batch
dimension. Data to be iterated over to adapt the state of the layers in
this preprocessing stage.
reset_state: Whether this call to `adapt` should reset the state of the
layers in this preprocessing stage.
Examples:
>>> # For a stage with dict input
>>> inputs = {'x2': tf.keras.Input(shape=(5,)),
... 'x1': tf.keras.Input(shape=(1,))}
>>> outputs = [inputs['x1'], inputs['x2']]
>>> stage = FunctionalPreprocessingStage(inputs, outputs)
>>> ds = tf.data.Dataset.from_tensor_slices({'x1': tf.ones((4,5)),
... 'x2': tf.ones((4,1))})
>>> sorted(ds.element_spec.items()) # Check element_spec
[('x1', TensorSpec(shape=(5,), dtype=tf.float32, name=None)),
('x2', TensorSpec(shape=(1,), dtype=tf.float32, name=None))]
>>> stage.adapt(ds)
>>> data_np = {'x1': np.ones((4, 5)), 'x2': np.ones((4, 1))}
>>> stage.adapt(data_np)
"""
if not isinstance(data, tf.data.Dataset):
data = self._flatten_to_reference_inputs(data)
if any(not isinstance(datum, (np.ndarray, tf.__internal__.EagerTensor))
for datum in data):
raise ValueError(
'`adapt()` requires a batched Dataset, a list of EagerTensors '
'or Numpy arrays as input, got {}'.format(type(data)))
ds_input = [
tf.data.Dataset.from_tensor_slices(x).batch(1) for x in data
]
if isinstance(data, tf.data.Dataset):
# Validate the datasets to try and ensure we haven't been passed one with
# infinite size. That would cause an infinite loop here.
if tf_utils.dataset_is_infinite(data):
raise ValueError(
'The dataset passed to `adapt()` has an infinite number of '
'elements. Please use dataset.take(...) to make the number '
'of elements finite.')
# Unzip dataset object to a list of single input dataset.
ds_input = _unzip_dataset(data)
# Dictionary mapping reference tensors to datasets
ds_dict = {}
tensor_usage_count = self._tensor_usage_count
for x, y in zip(self.inputs, ds_input):
x_id = str(id(x))
ds_dict[x_id] = [y] * tensor_usage_count[x_id]
nodes_by_depth = self._nodes_by_depth
depth_keys = sorted(nodes_by_depth.keys(), reverse=True)
def build_map_fn(node, args, kwargs):
if not isinstance(args.element_spec, tuple):
def map_fn(*x):
return tf.nest.flatten(node.layer(*x, **kwargs))
else:
def map_fn(*x):
return tf.nest.flatten(node.layer(x, **kwargs))
return map_fn
for depth in depth_keys:
for node in nodes_by_depth[depth]:
# Input node
if node.is_input:
continue
# Node with input not computed yet
if any(t_id not in ds_dict for t_id in node.flat_input_ids):
continue
args, kwargs = node.map_arguments(ds_dict)
args = tf.data.Dataset.zip(tf.__internal__.nest.list_to_tuple(*args))
if node.layer.stateful and hasattr(node.layer, 'adapt'):
node.layer.adapt(args, reset_state=reset_state)
map_fn = build_map_fn(node, args, kwargs)
outputs = args.map(map_fn)
outputs = _unzip_dataset(outputs)
# Update ds_dict.
for x_id, y in zip(node.flat_output_ids, outputs):
ds_dict[x_id] = [y] * tensor_usage_count[x_id]
def _unzip_dataset(ds):
"""Unzip dataset into a list of single element datasets.
Args:
ds: A Dataset object.
Returns:
A list of Dataset object, each correspond to one of the `element_spec` of
the input Dataset object.
Example:
>>> ds1 = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> ds2 = tf.data.Dataset.from_tensor_slices([4, 5, 6])
>>> ds_zipped_tuple = tf.data.Dataset.zip((ds1, ds2))
>>> ds_unzipped_tuple = _unzip_dataset(ds_zipped_tuple)
>>> ds_zipped_dict = tf.data.Dataset.zip({'ds1': ds1, 'ds2': ds2})
>>> ds_unzipped_dict = _unzip_dataset(ds_zipped_dict)
Then the two elements of `ds_unzipped_tuple` and `ds_unzipped_dict` are both
the same as `ds1` and `ds2`.
"""
element_count = len(tf.nest.flatten(ds.element_spec))
ds_unzipped = []
for i in range(element_count):
def map_fn(*x, j=i):
return tf.nest.flatten(x)[j]
ds_unzipped.append(ds.map(map_fn))
return ds_unzipped
| 10,519 | 38.107807 | 82 | py |
keras | keras-master/keras/layers/preprocessing/hashing.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras hashing preprocessing layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import functools
import numpy as np
from keras.engine import base_layer
from keras.engine import base_preprocessing_layer
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Hashing',
'keras.layers.experimental.preprocessing.Hashing')
class Hashing(base_layer.Layer):
"""Implements categorical feature hashing, also known as "hashing trick".
This layer transforms single or multiple categorical inputs to hashed output.
It converts a sequence of int or string to a sequence of int. The stable hash
function uses `tensorflow::ops::Fingerprint` to produce the same output
consistently across all platforms.
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
which provides a consistent hashed output across different platforms and is
stable across invocations, regardless of device and context, by mixing the
input bits thoroughly.
If you want to obfuscate the hashed output, you can also pass a random `salt`
argument in the constructor. In that case, the layer will use the
[SipHash64](https://github.com/google/highwayhash) hash function, with
the `salt` value serving as additional input to the hash function.
**Example (FarmHash64)**
>>> layer = tf.keras.layers.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[0],
[1],
[1],
[2]])>
**Example (FarmHash64) with a mask value**
>>> layer = tf.keras.layers.Hashing(num_bins=3, mask_value='')
>>> inp = [['A'], ['B'], [''], ['C'], ['D']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[1],
[0],
[2],
[2]])>
**Example (SipHash64)**
>>> layer = tf.keras.layers.Hashing(num_bins=3, salt=[133, 137])
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[2],
[1],
[0],
[2]])>
**Example (Siphash64 with a single integer, same as `salt=[133, 133]`)**
>>> layer = tf.keras.layers.Hashing(num_bins=3, salt=133)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[0],
[2],
[1],
[0]])>
Args:
num_bins: Number of hash bins. Note that this includes the `mask_value` bin,
so the effective number of bins is `(num_bins - 1)` if `mask_value` is
set.
mask_value: A value that represents masked inputs, which are mapped to
index 0. Defaults to None, meaning no mask term will be added and the
hashing will start at index 0.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64, with these values
used as an additional input (known as a "salt" in cryptography).
These should be non-zero. Defaults to `None` (in that
case, the FarmHash64 hash function is used). It also supports
tuple/list of 2 unsigned integer numbers, see reference paper for details.
**kwargs: Keyword arguments to construct a layer.
Input shape:
A single or list of string, int32 or int64 `Tensor`,
`SparseTensor` or `RaggedTensor` of shape `(batch_size, ...,)`
Output shape:
An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape
`(batch_size, ...)`. If any input is `RaggedTensor` then output is
`RaggedTensor`, otherwise if any input is `SparseTensor` then output is
`SparseTensor`, otherwise the output is `Tensor`.
Reference:
- [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
"""
def __init__(self, num_bins, mask_value=None, salt=None, **kwargs):
if num_bins is None or num_bins <= 0:
raise ValueError(
f'The `num_bins` for `Hashing` cannot be `None` or non-positive '
f'values. Received: num_bins={num_bins}.')
super().__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Hashing').set(True)
self.num_bins = num_bins
self.mask_value = mask_value
self.strong_hash = True if salt is not None else False
self.salt = None
if salt is not None:
if isinstance(salt, (tuple, list)) and len(salt) == 2:
self.salt = salt
elif isinstance(salt, int):
self.salt = [salt, salt]
else:
raise ValueError(
f'The `salt` argument for `Hashing` can only be a tuple of size 2 '
f'integers, or a single integer. Received: salt={salt}.')
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = tf.convert_to_tensor(inputs)
if isinstance(inputs, tf.SparseTensor):
return tf.SparseTensor(
indices=inputs.indices,
values=self._hash_values_to_bins(inputs.values),
dense_shape=inputs.dense_shape)
return self._hash_values_to_bins(inputs)
def _hash_values_to_bins(self, values):
"""Converts a non-sparse tensor of values to bin indices."""
str_to_hash_bucket = self._get_string_to_hash_bucket_fn()
num_available_bins = self.num_bins
mask = None
# If mask_value is set, the zeroth bin is reserved for it.
if self.mask_value is not None and num_available_bins > 1:
num_available_bins -= 1
mask = tf.equal(values, self.mask_value)
# Convert all values to strings before hashing.
if values.dtype.is_integer:
values = tf.as_string(values)
values = str_to_hash_bucket(values, num_available_bins, name='hash')
if mask is not None:
values = tf.add(values, tf.ones_like(values))
values = tf.where(mask, tf.zeros_like(values), values)
return values
def _get_string_to_hash_bucket_fn(self):
"""Returns the string_to_hash_bucket op to use based on `hasher_key`."""
# string_to_hash_bucket_fast uses FarmHash64 as hash function.
if not self.strong_hash:
return tf.strings.to_hash_bucket_fast
# string_to_hash_bucket_strong uses SipHash64 as hash function.
else:
return functools.partial(
tf.strings.to_hash_bucket_strong, key=self.salt)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape)
output_dtype = tf.int64
if isinstance(input_spec, tf.SparseTensorSpec):
return tf.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_config(self):
config = super().get_config()
config.update({
'num_bins': self.num_bins,
'salt': self.salt,
'mask_value': self.mask_value,
})
return config
| 7,662 | 36.563725 | 80 | py |
keras | keras-master/keras/layers/preprocessing/category_encoding_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.category_encoding."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import preprocessing_test_utils
def batch_wrapper(dataset, batch_size, strategy, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(strategy,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
# (b/156783625): Outside compilation failed for eager mode only.
strategy=strategy_combinations.strategies_minus_tpu +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager", "graph"]))
class CategoryEncodingDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_strategy(self, strategy):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
inp_dataset = tf.data.Dataset.from_tensor_slices(input_array)
inp_dataset = batch_wrapper(inp_dataset, 2, strategy)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(4,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(inp_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 2,940 | 38.213333 | 108 | py |
keras | keras-master/keras/layers/preprocessing/category_crossing.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras category crossing preprocessing layers."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import itertools
import numpy as np
from keras.engine import base_layer
from keras.engine import base_preprocessing_layer
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.experimental.preprocessing.CategoryCrossing')
class CategoryCrossing(base_layer.Layer):
"""Category crossing layer.
This layer concatenates multiple categorical inputs into a single categorical
output (similar to Cartesian product). The output dtype is string.
Usage:
>>> inp_1 = ['a', 'b', 'c']
>>> inp_2 = ['d', 'e', 'f']
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing()
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(3, 1), dtype=string, numpy=
array([[b'a_X_d'],
[b'b_X_e'],
[b'c_X_f']], dtype=object)>
>>> inp_1 = ['a', 'b', 'c']
>>> inp_2 = ['d', 'e', 'f']
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing(
... separator='-')
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(3, 1), dtype=string, numpy=
array([[b'a-d'],
[b'b-e'],
[b'c-f']], dtype=object)>
Args:
depth: depth of input crossing. By default None, all inputs are crossed into
one output. It can also be an int or tuple/list of ints. Passing an
integer will create combinations of crossed outputs with depth up to that
integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will
create crossed outputs with depth for the specified values in the tuple,
i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth
equal to N1 or N2. Passing `None` means a single crossed output with all
inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the
output will be [a;b;c;cross(a, b);cross(bc);cross(ca)].
separator: A string added between each input being joined. Defaults to
'_X_'.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: a list of string or int tensors or sparse tensors of shape
`[batch_size, d1, ..., dm]`
Output shape: a single string or int tensor or sparse tensor of shape
`[batch_size, d1, ..., dm]`
Returns:
If any input is `RaggedTensor`, the output is `RaggedTensor`.
Else, if any input is `SparseTensor`, the output is `SparseTensor`.
Otherwise, the output is `Tensor`.
Example: (`depth`=None)
If the layer receives three inputs:
`a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]`
the output will be a string tensor:
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
Example: (`depth` is an integer)
With the same input above, and if `depth`=2,
the output will be a list of 6 string tensors:
`[[b'1'], [b'4']]`
`[[b'2'], [b'5']]`
`[[b'3'], [b'6']]`
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`
Example: (`depth` is a tuple/list of integers)
With the same input above, and if `depth`=(2, 3)
the output will be a list of 4 string tensors:
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`,
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
"""
def __init__(self, depth=None, name=None, separator='_X_', **kwargs):
super(CategoryCrossing, self).__init__(name=name, **kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell(
'CategoryCrossing').set(True)
self.depth = depth
self.separator = separator
if isinstance(depth, (tuple, list)):
self._depth_tuple = depth
elif depth is not None:
self._depth_tuple = tuple([i for i in range(1, depth + 1)])
def partial_crossing(self, partial_inputs, ragged_out, sparse_out):
"""Gets the crossed output from a partial list/tuple of inputs."""
# If ragged_out=True, convert output from sparse to ragged.
if ragged_out:
# TODO(momernick): Support separator with ragged_cross.
if self.separator != '_X_':
raise ValueError(
f'Non-default separator with ragged input is not implemented. '
f'Received separator: {self.separator}.')
return tf.ragged.cross(partial_inputs)
elif sparse_out:
return tf.sparse.cross(partial_inputs, separator=self.separator)
else:
return tf.sparse.to_dense(
tf.sparse.cross(partial_inputs, separator=self.separator))
def _preprocess_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = tf.convert_to_tensor(inp)
if inp.shape.rank == 1:
inp = tf.expand_dims(inp, axis=-1)
return inp
def call(self, inputs):
inputs = [self._preprocess_input(inp) for inp in inputs]
depth_tuple = self._depth_tuple if self.depth else (len(inputs),)
ragged_out = sparse_out = False
if any(tf_utils.is_ragged(inp) for inp in inputs):
ragged_out = True
elif any(isinstance(inp, tf.SparseTensor) for inp in inputs):
sparse_out = True
outputs = []
for depth in depth_tuple:
if len(inputs) < depth:
raise ValueError(
f'Number of inputs cannot be less than depth. Received '
f'{len(inputs)} input tensors, and depth {depth}.')
for partial_inps in itertools.combinations(inputs, depth):
partial_out = self.partial_crossing(
partial_inps, ragged_out, sparse_out)
outputs.append(partial_out)
if sparse_out:
return tf.sparse.concat(axis=1, sp_inputs=outputs)
return tf.concat(outputs, axis=1)
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
raise ValueError(
f'A `CategoryCrossing` layer should be called on a list of inputs. '
f'Received: input_shape={input_shape}.')
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tf.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError(
f'Inputs must be rank 2. Received: input_shape={input_shape}.')
if batch_size is None:
batch_size = inp_tensor_shape[0]
# The second dimension is dynamic based on inputs.
output_shape = [batch_size, None]
return tf.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
if any(
isinstance(inp_spec, tf.RaggedTensorSpec)
for inp_spec in input_spec):
return tf.TensorSpec(shape=output_shape, dtype=tf.string)
elif any(
isinstance(inp_spec, tf.SparseTensorSpec)
for inp_spec in input_spec):
return tf.SparseTensorSpec(
shape=output_shape, dtype=tf.string)
return tf.TensorSpec(shape=output_shape, dtype=tf.string)
def get_config(self):
config = {
'depth': self.depth,
'separator': self.separator,
}
base_config = super(CategoryCrossing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 7,877 | 37.807882 | 80 | py |
keras | keras-master/keras/layers/preprocessing/text_vectorization_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.text_vectorization."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import backend
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import preprocessing_test_utils
from keras.layers.preprocessing import text_vectorization
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager"]))
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = tf.data.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_distribution_strategy_output_with_adapt(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = tf.data.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
tf.config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 3,981 | 37.660194 | 80 | py |
keras | keras-master/keras/layers/preprocessing/category_crossing_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for categorical preprocessing layers."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import input_layer
from keras.engine import training
from keras.layers.preprocessing import category_crossing
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoryCrossingTest(keras_parameterized.TestCase):
def test_crossing_sparse_inputs(self):
layer = category_crossing.CategoryCrossing()
inputs_0 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=['a', 'b', 'c'],
dense_shape=[2, 2])
inputs_1 = tf.SparseTensor(
indices=[[0, 1], [1, 2]], values=['d', 'e'], dense_shape=[2, 3])
output = layer([inputs_0, inputs_1])
self.assertAllClose(np.asarray([[0, 0], [1, 0], [1, 1]]), output.indices)
self.assertAllEqual([b'a_X_d', b'b_X_e', b'c_X_e'], output.values)
def test_crossing_sparse_inputs_custom_sep(self):
layer = category_crossing.CategoryCrossing(separator='_Y_')
inputs_0 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=['a', 'b', 'c'],
dense_shape=[2, 2])
inputs_1 = tf.SparseTensor(
indices=[[0, 1], [1, 2]], values=['d', 'e'], dense_shape=[2, 3])
output = layer([inputs_0, inputs_1])
self.assertAllClose(np.asarray([[0, 0], [1, 0], [1, 1]]), output.indices)
self.assertAllEqual([b'a_Y_d', b'b_Y_e', b'c_Y_e'], output.values)
def test_crossing_sparse_inputs_empty_sep(self):
layer = category_crossing.CategoryCrossing(separator='')
inputs_0 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=['a', 'b', 'c'],
dense_shape=[2, 2])
inputs_1 = tf.SparseTensor(
indices=[[0, 1], [1, 2]], values=['d', 'e'], dense_shape=[2, 3])
output = layer([inputs_0, inputs_1])
self.assertAllClose(np.asarray([[0, 0], [1, 0], [1, 1]]), output.indices)
self.assertAllEqual([b'ad', b'be', b'ce'], output.values)
def test_crossing_sparse_inputs_depth_int(self):
layer = category_crossing.CategoryCrossing(depth=1)
inputs_0 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['a', 'b', 'c'],
dense_shape=[3, 1])
inputs_1 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['d', 'e', 'f'],
dense_shape=[3, 1])
output = layer([inputs_0, inputs_1])
self.assertIsInstance(output, tf.SparseTensor)
output = tf.sparse.to_dense(output)
expected_out = [[b'a', b'd'], [b'b', b'e'], [b'c', b'f']]
self.assertAllEqual(expected_out, output)
def test_crossing_sparse_inputs_depth_tuple(self):
layer = category_crossing.CategoryCrossing(depth=(2, 3))
inputs_0 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['a', 'b', 'c'],
dense_shape=[3, 1])
inputs_1 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['d', 'e', 'f'],
dense_shape=[3, 1])
inputs_2 = tf.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['g', 'h', 'i'],
dense_shape=[3, 1])
inp_0_t = input_layer.Input(shape=(1,), sparse=True, dtype=tf.string)
inp_1_t = input_layer.Input(shape=(1,), sparse=True, dtype=tf.string)
inp_2_t = input_layer.Input(shape=(1,), sparse=True, dtype=tf.string)
out_t = layer([inp_0_t, inp_1_t, inp_2_t])
model = training.Model([inp_0_t, inp_1_t, inp_2_t], out_t)
output = model.predict([inputs_0, inputs_1, inputs_2])
self.assertIsInstance(output, tf.SparseTensor)
output = tf.sparse.to_dense(output)
expected_outputs_0 = [[b'a_X_d', b'a_X_g', b'd_X_g', b'a_X_d_X_g']]
expected_outputs_1 = [[b'b_X_e', b'b_X_h', b'e_X_h', b'b_X_e_X_h']]
expected_outputs_2 = [[b'c_X_f', b'c_X_i', b'f_X_i', b'c_X_f_X_i']]
expected_out = tf.concat(
[expected_outputs_0, expected_outputs_1, expected_outputs_2], axis=0)
self.assertAllEqual(expected_out, output)
def test_crossing_ragged_inputs(self):
inputs_0 = tf.ragged.constant(
[['omar', 'skywalker'], ['marlo']],
dtype=tf.string)
inputs_1 = tf.ragged.constant(
[['a'], ['b']],
dtype=tf.string)
inp_0_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
inp_1_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
non_hashed_layer = category_crossing.CategoryCrossing()
out_t = non_hashed_layer([inp_0_t, inp_1_t])
model = training.Model(inputs=[inp_0_t, inp_1_t], outputs=out_t)
expected_output = [[b'omar_X_a', b'skywalker_X_a'], [b'marlo_X_b']]
self.assertAllEqual(expected_output, model.predict([inputs_0, inputs_1]))
def test_crossing_ragged_inputs_depth_int(self):
layer = category_crossing.CategoryCrossing(depth=1)
inputs_0 = tf.ragged.constant([['a'], ['b'], ['c']])
inputs_1 = tf.ragged.constant([['d'], ['e'], ['f']])
output = layer([inputs_0, inputs_1])
expected_output = [[b'a', b'd'], [b'b', b'e'], [b'c', b'f']]
self.assertIsInstance(output, tf.RaggedTensor)
self.assertAllEqual(expected_output, output)
layer = category_crossing.CategoryCrossing(depth=2)
inp_0_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
inp_1_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer([inp_0_t, inp_1_t])
model = training.Model([inp_0_t, inp_1_t], out_t)
expected_output = [[b'a', b'd', b'a_X_d'], [b'b', b'e', b'b_X_e'],
[b'c', b'f', b'c_X_f']]
self.assertAllEqual(expected_output, model.predict([inputs_0, inputs_1]))
def test_crossing_ragged_inputs_depth_tuple(self):
layer = category_crossing.CategoryCrossing(depth=[2, 3])
inputs_0 = tf.ragged.constant([['a'], ['b'], ['c']])
inputs_1 = tf.ragged.constant([['d'], ['e'], ['f']])
inputs_2 = tf.ragged.constant([['g'], ['h'], ['i']])
inp_0_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
inp_1_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
inp_2_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer([inp_0_t, inp_1_t, inp_2_t])
model = training.Model([inp_0_t, inp_1_t, inp_2_t], out_t)
expected_output = [[b'a_X_d', b'a_X_g', b'd_X_g', b'a_X_d_X_g'],
[b'b_X_e', b'b_X_h', b'e_X_h', b'b_X_e_X_h'],
[b'c_X_f', b'c_X_i', b'f_X_i', b'c_X_f_X_i']]
output = model.predict([inputs_0, inputs_1, inputs_2])
self.assertIsInstance(output, tf.RaggedTensor)
self.assertAllEqual(expected_output, output)
def test_crossing_with_dense_inputs(self):
layer = category_crossing.CategoryCrossing()
inputs_0 = np.asarray([[1, 2]])
inputs_1 = np.asarray([[1, 3]])
output = layer([inputs_0, inputs_1])
self.assertAllEqual([[b'1_X_1', b'1_X_3', b'2_X_1', b'2_X_3']], output)
def test_crossing_with_list_inputs(self):
layer = category_crossing.CategoryCrossing()
inputs_0 = [[1, 2]]
inputs_1 = [[1, 3]]
output = layer([inputs_0, inputs_1])
self.assertAllEqual([[b'1_X_1', b'1_X_3', b'2_X_1', b'2_X_3']], output)
inputs_0 = [1, 2]
inputs_1 = [1, 3]
output = layer([inputs_0, inputs_1])
self.assertAllEqual([[b'1_X_1'], [b'2_X_3']], output)
inputs_0 = np.asarray([1, 2])
inputs_1 = np.asarray([1, 3])
output = layer([inputs_0, inputs_1])
self.assertAllEqual([[b'1_X_1'], [b'2_X_3']], output)
def test_crossing_dense_inputs_depth_int(self):
layer = category_crossing.CategoryCrossing(depth=1)
inputs_0 = tf.constant([['a'], ['b'], ['c']])
inputs_1 = tf.constant([['d'], ['e'], ['f']])
output = layer([inputs_0, inputs_1])
expected_output = [[b'a', b'd'], [b'b', b'e'], [b'c', b'f']]
self.assertAllEqual(expected_output, output)
layer = category_crossing.CategoryCrossing(depth=2)
inp_0_t = input_layer.Input(shape=(1,), dtype=tf.string)
inp_1_t = input_layer.Input(shape=(1,), dtype=tf.string)
out_t = layer([inp_0_t, inp_1_t])
model = training.Model([inp_0_t, inp_1_t], out_t)
crossed_output = [[b'a_X_d'], [b'b_X_e'], [b'c_X_f']]
expected_output = tf.concat([expected_output, crossed_output],
axis=1)
self.assertAllEqual(expected_output, model.predict([inputs_0, inputs_1]))
def test_crossing_dense_inputs_depth_tuple(self):
layer = category_crossing.CategoryCrossing(depth=[2, 3])
inputs_0 = tf.constant([['a'], ['b'], ['c']])
inputs_1 = tf.constant([['d'], ['e'], ['f']])
inputs_2 = tf.constant([['g'], ['h'], ['i']])
inp_0_t = input_layer.Input(shape=(1,), dtype=tf.string)
inp_1_t = input_layer.Input(shape=(1,), dtype=tf.string)
inp_2_t = input_layer.Input(shape=(1,), dtype=tf.string)
out_t = layer([inp_0_t, inp_1_t, inp_2_t])
model = training.Model([inp_0_t, inp_1_t, inp_2_t], out_t)
expected_outputs_0 = [[b'a_X_d', b'a_X_g', b'd_X_g', b'a_X_d_X_g']]
expected_outputs_1 = [[b'b_X_e', b'b_X_h', b'e_X_h', b'b_X_e_X_h']]
expected_outputs_2 = [[b'c_X_f', b'c_X_i', b'f_X_i', b'c_X_f_X_i']]
expected_output = tf.concat(
[expected_outputs_0, expected_outputs_1, expected_outputs_2], axis=0)
self.assertAllEqual(expected_output,
model.predict([inputs_0, inputs_1, inputs_2]))
def test_crossing_compute_output_signature(self):
input_shapes = [
tf.TensorShape([2, 2]),
tf.TensorShape([2, 3])
]
input_specs = [
tf.TensorSpec(input_shape, tf.string)
for input_shape in input_shapes
]
layer = category_crossing.CategoryCrossing()
output_spec = layer.compute_output_signature(input_specs)
self.assertEqual(output_spec.shape.dims[0], input_shapes[0].dims[0])
self.assertEqual(output_spec.dtype, tf.string)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = category_crossing.CategoryCrossing(depth=2, name='hashing')
config = layer.get_config()
layer_1 = category_crossing.CategoryCrossing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
layer = category_crossing.CategoryCrossing(name='hashing')
config = layer.get_config()
layer_1 = category_crossing.CategoryCrossing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
if __name__ == '__main__':
tf.test.main()
| 11,105 | 43.247012 | 80 | py |
keras | keras-master/keras/layers/preprocessing/preprocessing_stage_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing stage tests."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import time
import numpy as np
from keras import keras_parameterized
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_stage
from keras.layers.preprocessing import preprocessing_test_utils
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class PreprocessingStageTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_adapt(self):
class PL(base_preprocessing_layer.PreprocessingLayer):
def __init__(self, **kwargs):
self.adapt_time = None
self.adapt_count = 0
super(PL, self).__init__(**kwargs)
def adapt(self, data, reset_state=True):
self.adapt_time = time.time()
self.adapt_count += 1
def call(self, inputs):
return inputs + 1.
# Test with NumPy array
stage = preprocessing_stage.PreprocessingStage([
PL(),
PL(),
PL(),
])
stage.adapt(np.ones((3, 4)))
self.assertEqual(stage.layers[0].adapt_count, 1)
self.assertEqual(stage.layers[1].adapt_count, 1)
self.assertEqual(stage.layers[2].adapt_count, 1)
self.assertLessEqual(stage.layers[0].adapt_time, stage.layers[1].adapt_time)
self.assertLessEqual(stage.layers[1].adapt_time, stage.layers[2].adapt_time)
# Check call
y = stage(tf.ones((3, 4)))
self.assertAllClose(y, np.ones((3, 4)) + 3.)
# Test with dataset
adapt_data = tf.data.Dataset.from_tensor_slices(np.ones((3, 10)))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(stage.layers[0].adapt_count, 2)
self.assertEqual(stage.layers[1].adapt_count, 2)
self.assertEqual(stage.layers[2].adapt_count, 2)
self.assertLess(stage.layers[0].adapt_time, stage.layers[1].adapt_time)
self.assertLess(stage.layers[1].adapt_time, stage.layers[2].adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, 'requires a '):
stage.adapt(None)
if __name__ == '__main__':
tf.test.main()
| 2,868 | 33.154762 | 80 | py |
keras | keras-master/keras/layers/preprocessing/category_encoding.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras CategoryEncoding preprocessing layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras import backend
from keras.engine import base_layer
from keras.engine import base_preprocessing_layer
from keras.utils import layer_utils
from keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
INT = "int"
ONE_HOT = "one_hot"
MULTI_HOT = "multi_hot"
COUNT = "count"
@keras_export("keras.layers.CategoryEncoding",
"keras.layers.experimental.preprocessing.CategoryEncoding")
class CategoryEncoding(base_layer.Layer):
"""Category encoding layer.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs, and it outputs a dense representation of those
inputs. For integer inputs where the total number of tokens is not known,
use instead `tf.keras.layers.IntegerLookup`.
Examples:
**One-hot encoding data**
>>> layer = tf.keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="one_hot")
>>> layer([3, 2, 0, 1])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[0., 0., 0., 1.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]], dtype=float32)>
**Multi-hot encoding data**
>>> layer = tf.keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="multi_hot")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
**Using weighted inputs in `"count"` mode**
>>> layer = tf.keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
<tf.Tensor: shape=(4, 4), dtype=float64, numpy=
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]])>
Args:
num_tokens: The total number of tokens the layer should support. All inputs
to the layer must integers in the range `0 <= value < num_tokens`, or an
error will be thrown.
output_mode: Specification for the output of the layer.
Defaults to `"multi_hot"`. Values can be `"one_hot"`, `"multi_hot"` or
`"count"`, configuring the layer as follows:
- `"one_hot"`: Encodes each individual element in the input into an
array of `num_tokens` size, containing a 1 at the element index. If
the last dimension is size 1, will encode on that dimension. If the
last dimension is not size 1, will append a new dimension for the
encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
of `num_tokens` size, containing a 1 for each vocabulary term present
in the sample. Treats the last dimension as the sample dimension, if
input shape is `(..., sample_length)`, output shape will be
`(..., num_tokens)`.
- `"count"`: Like `"multi_hot"`, but the int array contains a count of
the number of times the token at that index appeared in the sample.
For all output modes, currently only output up to rank 2 is supported.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
Call arguments:
inputs: A 1D or 2D tensor of integer inputs.
count_weights: A tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode. Not used in
`"multi_hot"` or `"one_hot"` modes.
"""
def __init__(self,
num_tokens=None,
output_mode="multi_hot",
sparse=False,
**kwargs):
# max_tokens is an old name for the num_tokens arg we continue to support
# because of usage.
if "max_tokens" in kwargs:
logging.warning(
"max_tokens is deprecated, please use num_tokens instead.")
num_tokens = kwargs["max_tokens"]
del kwargs["max_tokens"]
super(CategoryEncoding, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell("CategoryEncoding").set(
True)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
# 'output_mode' must be one of (COUNT, ONE_HOT, MULTI_HOT)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(COUNT, ONE_HOT, MULTI_HOT),
layer_name="CategoryEncoding",
arg_name="output_mode")
if num_tokens is None:
raise ValueError("num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead.")
if num_tokens < 1:
raise ValueError(
f"`num_tokens` must be >= 1. Received: num_tokens={num_tokens}.")
self.num_tokens = num_tokens
self.output_mode = output_mode
self.sparse = sparse
def compute_output_shape(self, input_shape):
if not input_shape:
return tf.TensorShape([self.num_tokens])
if self.output_mode == ONE_HOT and input_shape[-1] != 1:
return tf.TensorShape(input_shape + [self.num_tokens])
else:
return tf.TensorShape(input_shape[:-1] + [self.num_tokens])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
if self.sparse:
return tf.SparseTensorSpec(
shape=output_shape, dtype=tf.int64)
else:
return tf.TensorSpec(shape=output_shape, dtype=tf.int64)
def get_config(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
base_config = super(CategoryEncoding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, count_weights=None):
if isinstance(inputs, (list, np.ndarray)):
inputs = tf.convert_to_tensor(inputs)
def expand_dims(inputs, axis):
if tf_utils.is_sparse(inputs):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
original_shape = inputs.shape
# In all cases, we should uprank scalar input to a single sample.
if inputs.shape.rank == 0:
inputs = expand_dims(inputs, -1)
# One hot will unprank only if the final output dimension is not already 1.
if self.output_mode == ONE_HOT:
if inputs.shape[-1] != 1:
inputs = expand_dims(inputs, -1)
# TODO(b/190445202): remove output rank restriction.
if inputs.shape.rank > 2:
raise ValueError(
"Received input shape {}, which would result in output rank {}. "
"Currently only outputs up to rank 2 are supported.".format(
original_shape, inputs.shape.rank))
if count_weights is not None and self.output_mode != COUNT:
raise ValueError(
"`count_weights` is not used when `output_mode` is not `'count'`. "
"Received `count_weights={}`.".format(count_weights))
out_depth = self.num_tokens
binary_output = self.output_mode in (MULTI_HOT, ONE_HOT)
if isinstance(inputs, tf.SparseTensor):
max_value = tf.reduce_max(inputs.values)
min_value = tf.reduce_min(inputs.values)
else:
max_value = tf.reduce_max(inputs)
min_value = tf.reduce_min(inputs)
condition = tf.logical_and(
tf.greater(
tf.cast(out_depth, max_value.dtype), max_value),
tf.greater_equal(
min_value, tf.cast(0, min_value.dtype)))
assertion = tf.Assert(condition, [
"Input values must be in the range 0 <= values < num_tokens"
" with num_tokens={}".format(out_depth)
])
with tf.control_dependencies([assertion]):
if self.sparse:
return sparse_bincount(inputs, out_depth, binary_output,
count_weights)
else:
return dense_bincount(inputs, out_depth, binary_output,
count_weights)
def sparse_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = tf.sparse.bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
axis=-1,
binary_output=binary_output)
if inputs.shape.rank == 1:
output_shape = (out_depth,)
else:
result = tf.cast(result, backend.floatx())
batch_size = tf.shape(result)[0]
output_shape = (batch_size, out_depth)
result = tf.SparseTensor(
indices=result.indices,
values=result.values,
dense_shape=output_shape)
return result
def dense_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input."""
result = tf.math.bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
dtype=backend.floatx(),
axis=-1,
binary_output=binary_output)
if inputs.shape.rank == 1:
result.set_shape(tf.TensorShape((out_depth,)))
else:
batch_size = inputs.shape.as_list()[0]
result.set_shape(tf.TensorShape((batch_size, out_depth)))
return result
| 10,324 | 37.099631 | 80 | py |
keras | keras-master/keras/layers/preprocessing/discretization.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras discretization preprocessing layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras.engine import base_preprocessing_layer
from keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
def summarize(values, epsilon):
"""Reduce a 1D sequence of values to a summary.
This algorithm is based on numpy.quantiles but modified to allow for
intermediate steps between multiple data sets. It first finds the target
number of bins as the reciprocal of epsilon and then takes the individual
values spaced at appropriate intervals to arrive at that target.
The final step is to return the corresponding counts between those values
If the target num_bins is larger than the size of values, the whole array is
returned (with weights of 1).
Args:
values: 1D `np.ndarray` to be summarized.
epsilon: A `'float32'` that determines the approximate desired precision.
Returns:
A 2D `np.ndarray` that is a summary of the inputs. First column is the
interpolated partition values, the second is the weights (counts).
"""
values = tf.reshape(values, [-1])
values = tf.sort(values)
elements = tf.cast(tf.size(values), tf.float32)
num_buckets = 1. / epsilon
increment = tf.cast(elements / num_buckets, tf.int32)
start = increment
step = tf.maximum(increment, 1)
boundaries = values[start::step]
weights = tf.ones_like(boundaries)
weights = weights * tf.cast(step, tf.float32)
return tf.stack([boundaries, weights])
def compress(summary, epsilon):
"""Compress a summary to within `epsilon` accuracy.
The compression step is needed to keep the summary sizes small after merging,
and also used to return the final target boundaries. It finds the new bins
based on interpolating cumulative weight percentages from the large summary.
Taking the difference of the cumulative weights from the previous bin's
cumulative weight will give the new weight for that bin.
Args:
summary: 2D `np.ndarray` summary to be compressed.
epsilon: A `'float32'` that determines the approxmiate desired precision.
Returns:
A 2D `np.ndarray` that is a compressed summary. First column is the
interpolated partition values, the second is the weights (counts).
"""
# TODO(b/184863356): remove the numpy escape hatch here.
return tf.numpy_function(
lambda s: _compress_summary_numpy(s, epsilon), [summary], tf.float32)
def _compress_summary_numpy(summary, epsilon):
"""Compress a summary with numpy."""
if summary.shape[1] * epsilon < 1:
return summary
percents = epsilon + np.arange(0.0, 1.0, epsilon)
cum_weights = summary[1].cumsum()
cum_weight_percents = cum_weights / cum_weights[-1]
new_bins = np.interp(percents, cum_weight_percents, summary[0])
cum_weights = np.interp(percents, cum_weight_percents, cum_weights)
new_weights = cum_weights - np.concatenate((np.array([0]), cum_weights[:-1]))
summary = np.stack((new_bins, new_weights))
return summary.astype(np.float32)
def merge_summaries(prev_summary, next_summary, epsilon):
"""Weighted merge sort of summaries.
Given two summaries of distinct data, this function merges (and compresses)
them to stay within `epsilon` error tolerance.
Args:
prev_summary: 2D `np.ndarray` summary to be merged with `next_summary`.
next_summary: 2D `np.ndarray` summary to be merged with `prev_summary`.
epsilon: A float that determines the approxmiate desired precision.
Returns:
A 2-D `np.ndarray` that is a merged summary. First column is the
interpolated partition values, the second is the weights (counts).
"""
merged = tf.concat((prev_summary, next_summary), axis=1)
merged = tf.gather(merged, tf.argsort(merged[0]), axis=1)
return compress(merged, epsilon)
def get_bin_boundaries(summary, num_bins):
return compress(summary, 1.0 / num_bins)[0, :-1]
@keras_export("keras.layers.Discretization",
"keras.layers.experimental.preprocessing.Discretization")
class Discretization(base_preprocessing_layer.PreprocessingLayer):
"""Buckets data into discrete ranges.
This layer will place each element of its input data into one of several
contiguous ranges and output an integer index indicating which range each
element was placed in.
Input shape:
Any `tf.Tensor` or `tf.RaggedTensor` of dimension 2 or higher.
Output shape:
Same as input shape.
Attributes:
bin_boundaries: A list of bin boundaries. The leftmost and rightmost bins
will always extend to `-inf` and `inf`, so `bin_boundaries=[0., 1., 2.]`
generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. If
this option is set, `adapt` should not be called.
num_bins: The integer number of bins to compute. If this option is set,
`adapt` should be called to learn the bin boundaries.
epsilon: Error tolerance, typically a small fraction close to zero (e.g.
0.01). Higher values of epsilon increase the quantile approximation, and
hence result in more unequal buckets, but could improve performance
and resource consumption.
Examples:
Bucketize float values based on provided buckets.
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
>>> layer = tf.keras.layers.Discretization(bin_boundaries=[0., 1., 2.])
>>> layer(input)
<tf.Tensor: shape=(2, 4), dtype=int64, numpy=
array([[0, 2, 3, 1],
[1, 3, 2, 1]], dtype=int64)>
Bucketize float values based on a number of buckets to compute.
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
>>> layer = tf.keras.layers.Discretization(num_bins=4, epsilon=0.01)
>>> layer.adapt(input)
>>> layer(input)
<tf.Tensor: shape=(2, 4), dtype=int64, numpy=
array([[0, 2, 3, 2],
[1, 3, 3, 1]], dtype=int64)>
"""
def __init__(self,
bin_boundaries=None,
num_bins=None,
epsilon=0.01,
**kwargs):
# bins is a deprecated arg for setting bin_boundaries or num_bins that still
# has some usage.
if "bins" in kwargs:
logging.warning(
"bins is deprecated, please use bin_boundaries or num_bins instead.")
if isinstance(kwargs["bins"], int) and num_bins is None:
num_bins = kwargs["bins"]
elif bin_boundaries is None:
bin_boundaries = kwargs["bins"]
del kwargs["bins"]
super().__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell("Discretization").set(
True)
if num_bins is not None and num_bins < 0:
raise ValueError("`num_bins` must be must be greater than or equal to 0. "
"You passed `num_bins={}`".format(num_bins))
if num_bins is not None and bin_boundaries is not None:
raise ValueError("Both `num_bins` and `bin_boundaries` should not be "
"set. You passed `num_bins={}` and "
"`bin_boundaries={}`".format(num_bins, bin_boundaries))
bin_boundaries = self._convert_to_list(bin_boundaries)
self.input_bin_boundaries = bin_boundaries
self.bin_boundaries = bin_boundaries if bin_boundaries is not None else []
self.num_bins = num_bins
self.epsilon = epsilon
def build(self, input_shape):
super().build(input_shape)
if self.input_bin_boundaries is not None:
return
# Summary contains two equal length vectors of bins at index 0 and weights
# at index 1.
self.summary = self.add_weight(
name="summary",
shape=(2, None),
dtype=tf.float32,
initializer=lambda shape, dtype: [[], []], # pylint: disable=unused-arguments
trainable=False)
def update_state(self, data):
if self.input_bin_boundaries is not None:
raise ValueError(
"Cannot adapt a Discretization layer that has been initialized with "
"`bin_boundaries`, use `num_bins` instead. You passed "
"`bin_boundaries={}`.".format(self.input_bin_boundaries))
if not self.built:
raise RuntimeError("`build` must be called before `update_state`.")
data = tf.convert_to_tensor(data)
if data.dtype != tf.float32:
data = tf.cast(data, tf.float32)
summary = summarize(data, self.epsilon)
self.summary.assign(merge_summaries(summary, self.summary, self.epsilon))
def finalize_state(self):
if self.input_bin_boundaries is not None or not self.built:
return
# The bucketize op only support list boundaries.
self.bin_boundaries = self._convert_to_list(
get_bin_boundaries(self.summary, self.num_bins))
def reset_state(self): # pylint: disable=method-hidden
if self.input_bin_boundaries is not None or not self.built:
return
self.summary.assign([[], []])
def get_config(self):
config = super().get_config()
config.update({
"bin_boundaries": self.input_bin_boundaries,
"num_bins": self.num_bins,
"epsilon": self.epsilon,
})
return config
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = tf.int64
if isinstance(input_spec, tf.SparseTensorSpec):
return tf.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def call(self, inputs):
def bucketize(inputs):
outputs = tf.raw_ops.Bucketize(
input=inputs, boundaries=self.bin_boundaries)
# All other preprocessing layers use int64 for int output, so we conform
# here. Sadly the underlying op only supports int32, so we need to cast.
return tf.cast(outputs, tf.int64)
if tf_utils.is_ragged(inputs):
integer_buckets = tf.ragged.map_flat_values(bucketize, inputs)
# Ragged map_flat_values doesn't touch the non-values tensors in the
# ragged composite tensor. If this op is the only op a Keras model,
# this can cause errors in Graph mode, so wrap the tensor in an identity.
return tf.identity(integer_buckets)
elif tf_utils.is_sparse(inputs):
return tf.SparseTensor(
indices=tf.identity(inputs.indices),
values=bucketize(inputs.values),
dense_shape=tf.identity(inputs.dense_shape))
else:
return bucketize(inputs)
def _convert_to_list(self, inputs):
if tf.is_tensor(inputs):
inputs = inputs.numpy()
if isinstance(inputs, (np.ndarray)):
inputs = inputs.tolist()
inputs = list(inputs)
return inputs
| 11,425 | 37.996587 | 86 | py |
keras | keras-master/keras/layers/preprocessing/integer_lookup.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras string lookup preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import index_lookup
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.IntegerLookup",
"keras.layers.experimental.preprocessing.IntegerLookup",
v1=[])
class IntegerLookup(index_lookup.IndexLookup):
"""Reindex integer inputs to be in a contiguous range, via a dict lookup.
This layer maps a set of arbitrary integer input tokens into indexed
integer output via a table-based vocabulary lookup. The layer's output indices
will be contiguously arranged up to the maximum vocab size, even if the input
tokens are non-continguous or unbounded. The layer supports multiple options
for encoding the output via `output_mode`, and has optional support for
out-of-vocabulary (OOV) tokens and masking.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual integer tokens, and create a vocabulary
from them. If the vocabulary is capped in size, the most frequent tokens will
be used to create the vocabulary and all others will be treated as OOV.
There are two possible output modes for the layer.
When `output_mode` is `"int"`,
input integers are converted to their index in the vocabulary (an integer).
When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input integers
are encoded into an array where each dimension corresponds to an element in
the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode` is
`"int"`, the vocabulary will begin with the mask token at index 0, followed by
OOV indices, followed by the rest of the vocabulary. When `output_mode` is
`"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with OOV
indices and instances of the mask token will be dropped.
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should only
be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of the
vocabulary. Note that this size includes the OOV and mask tokens. Defaults
to None.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are modulated to determine their OOV
value. If this value is 0, OOV inputs will cause an error when calling the
layer. Defaults to 1.
mask_token: An integer token that represents masked inputs. When
`output_mode` is `"int"`, the token is included in vocabulary and mapped
to index 0. In other output modes, the token will not appear in the
vocabulary and instances of the mask token in the input will be dropped.
If set to None, no mask term will be added. Defaults to None.
oov_token: Only used when `invert` is True. The token to return for OOV
indices. Defaults to -1.
vocabulary: Optional. Either an array of integers or a string path to a text
file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D
tensor containing the integer vocbulary terms. If passing a file path, the
file should contain one line per term in the vocabulary. If this argument
is set, there is no need to `adapt` the layer.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to `"int"`.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
`"tf_idf"` configuring the layer as follows:
- `"int"`: Return the vocabulary indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new dimension for
the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
For `"int"` output, any shape of input and output is supported. For all
other output modes, currently only output up to rank 2 is supported.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, returns a `SparseTensor` instead of a
dense `Tensor`. Defaults to False.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) # Note OOV tokens
>>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[1, 3, 4],
[4, 0, 2]])>
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by analyzing
the dataset.
>>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])
>>> layer = tf.keras.layers.IntegerLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
[-1, 42, 1138, 1000, 36, 12]
Note that the OOV token -1 have been added to the vocabulary. The remaining
tokens are sorted by frequency (42, which has 2 occurrences, is first) then
by inverse sort order.
>>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])
>>> layer = tf.keras.layers.IntegerLookup()
>>> layer.adapt(data)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[5, 2, 1],
[1, 3, 4]])>
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV indices.
When a layer is created with more than one OOV index, any OOV tokens are
hashed into the number of OOV buckets, distributing OOV tokens in a
deterministic fashion across the set.
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[12, 1138, 42], [37, 1000, 36]])
>>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[2, 4, 5],
[1, 0, 3]])>
Note that the output for OOV token 37 is 1, while the output for OOV token
1000 is 0. The in-vocab terms have their output index increased by 1 from
earlier examples (12 maps to 2, etc) in order to make space for the extra OOV
token.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([12, 36, 1138, 42, 7]) # Note OOV tokens
>>> layer = tf.keras.layers.IntegerLookup(
... vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)>
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV tokens
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = tf.keras.layers.IntegerLookup(
... vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)>
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output, the
first `num_oov_indices` dimensions in the output represent OOV tokens.
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = tf.keras.layers.IntegerLookup(
... vocabulary=vocab, output_mode='count')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)>
**TF-IDF output**
Configure the layer with `output_mode='tf_idf'`. As with multi_hot output, the
first `num_oov_indices` dimensions in the output represent OOV tokens.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be provided
along with the vocabulary. Note that the `idf_weight` for OOV tokens will
default to the average of all idf weights passed in.
>>> vocab = [12, 36, 1138, 42]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = tf.keras.layers.IntegerLookup(output_mode='tf_idf')
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
To specify the idf weights for oov tokens, you will need to pass the entire
vocabularly including the leading oov token.
>>> vocab = [-1, 12, 36, 1138, 42]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = tf.keras.layers.IntegerLookup(output_mode='tf_idf')
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
When adapting the layer in tf_idf mode, each input sample will be considered a
document, and idf weight per token will be calculated as
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to tokens using this layer. (You
can also use `adapt()` with `inverse=True`, but for simplicity we'll pass the
vocab in this example.)
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[1, 3, 4], [4, 0, 2]])
>>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab, invert=True)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[ 12, 1138, 42],
[ 42, -1, 36]])>
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])
>>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab)
>>> i_layer = tf.keras.layers.IntegerLookup(
... vocabulary=layer.get_vocabulary(), invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[ 12, 1138, 42],
[ 42, -1, 36]])>
In this example, the input token 1000 resulted in an output of -1, since
1000 was not in the vocabulary - it got represented as an OOV, and all OOV
tokens are returned as -1 in the inverse layer. Also, note that for the
inverse to work, you must have already set the forward layer vocabulary
either directly or via `adapt()` before calling `get_vocabulary()`.
"""
def __init__(self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token=-1,
vocabulary=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs):
allowed_dtypes = [tf.int64]
# Support deprecated args for this layer.
if "max_values" in kwargs:
logging.log_first_n(logging.WARN,
"max_values is deprecated, use max_tokens instead.",
1)
max_tokens = kwargs["max_values"]
del kwargs["max_values"]
if "mask_value" in kwargs:
logging.log_first_n(logging.WARN,
"mask_value is deprecated, use mask_token instead.",
1)
mask_token = kwargs["mask_value"]
del kwargs["mask_value"]
if "oov_value" in kwargs:
logging.log_first_n(logging.WARN,
"oov_value is deprecated, use oov_token instead.", 1)
oov_token = kwargs["oov_value"]
del kwargs["oov_value"]
if "dtype" in kwargs and kwargs["dtype"] not in allowed_dtypes:
raise ValueError("The value of the dtype argument for IntegerLookup may "
"only be one of %s." % (allowed_dtypes,))
if "dtype" not in kwargs:
kwargs["dtype"] = tf.int64
# If max_tokens is set, the token must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
f"If `max_tokens` is set for `IntegerLookup`, it must be "
f"greater than 1. Received: max_tokens={max_tokens}.")
if num_oov_indices < 0:
raise ValueError(
f"The value of `num_oov_indices` argument for `IntegerLookup` "
f"must >= 0. Received num_oov_indices="
f"{num_oov_indices}.")
# Make sure mask and oov are of the dtype we want.
mask_token = None if mask_token is None else np.int64(mask_token)
oov_token = None if oov_token is None else np.int64(oov_token)
super(IntegerLookup, self).__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
invert=invert,
output_mode=output_mode,
sparse=sparse,
pad_to_max_tokens=pad_to_max_tokens,
**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell("IntegerLookup").set(True)
| 16,151 | 43.252055 | 80 | py |
keras | keras-master/keras/layers/preprocessing/index_lookup_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
import tensorflow.compat.v2 as tf
import itertools
import os
import random
import string
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.layers.preprocessing import index_lookup
from keras.layers.preprocessing import preprocessing_test_utils
from keras.utils.generic_utils import CustomObjectScope
def zip_and_sort(weight_values):
keys, values = weight_values
return sorted(zip(keys, values), key=lambda x: x[1])
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name":
"test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"dtype": tf.string,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype":
tf.string
},
{
"testcase_name":
"test_inverse_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"dtype": tf.string,
"invert": True
},
"expected_output":
np.array([[b"earth"], [b"wind"], [b"and"], [b"[OOV]"], [b"[OOV]"],
[b"and"], [b"earth"], [b"fire"]]),
"input_dtype":
tf.int64
},
{
"testcase_name":
"test_strings_with_special_tokens",
# Mask and oov values in the vocab data should be dropped, and mapped
# to 0 and 1 respectively when calling the layer.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
[""], [""], [""], ["[OOV]"], ["[OOV]"], ["[OOV]"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], [""], ["wind"], ["[OOV]"], ["and"], [""],
["fire"], ["and"], ["[OOV]"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"dtype": tf.string,
},
"expected_output": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],
"input_dtype":
tf.string
},
{
"testcase_name":
"test_ints_soft_vocab_cap",
# Create an array where 1138 is the most frequent term, followed by
# 1729, then 725, then 42. This ensures that the vocab accumulator
# is sorting by frequency.
"vocab_data":
np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],
[1729], [725], [725]],
dtype=np.int64),
"input_data":
np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],
dtype=np.int64),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"dtype": tf.int64,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype":
tf.int64
},
{
"testcase_name":
"test_ints_with_special_tokens",
# Mask and oov values in the vocab data should be dropped, and mapped
# to 0 and 1 respectively when calling the layer.
"vocab_data":
np.array([[42], [1138], [1138], [1138], [1138], [0], [0], [0],
[-1], [-1], [-1], [1729], [1729], [1729], [725], [725]],
dtype=np.int64),
"input_data":
np.array([[1138], [0], [1729], [-1], [725], [0], [42], [725],
[-1], [4]],
dtype=np.int64),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"dtype": tf.int64,
},
"expected_output": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],
"input_dtype":
tf.int64
},
{
"testcase_name":
"test_strings_hard_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"dtype": tf.string,
},
"expected_output": [[2], [3], [4], [1], [1], [4], [2], [1]],
"input_dtype":
tf.string
},
{
"testcase_name":
"test_inverse_strings_hard_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),
"kwargs": {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"dtype": tf.string,
"invert": True
},
"expected_output":
np.array([[b"earth"], [b"wind"], [b"and"], [b"[OOV]"], [b"[OOV]"],
[b"and"], [b"earth"], [b"[OOV]"]]),
"input_dtype":
tf.int64
},
{
"testcase_name":
"test_ints_hard_vocab_cap",
# Create an array where 1138 is the most frequent term, followed by
# 1729, then 725, then 42. This ensures that the vocab accumulator
# is sorting by frequency.
"vocab_data":
np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],
[1729], [725], [725]],
dtype=np.int64),
"input_data":
np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],
dtype=np.int64),
"kwargs": {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"dtype": tf.int64,
},
"expected_output": [[2], [3], [4], [1], [1], [4], [2], [1]],
"input_dtype":
tf.int64
},
{
"testcase_name":
"test_ints_tf_idf_output",
"vocab_data":
np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],
[1729], [725], [725]]),
"input_data":
np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"output_mode": index_lookup.TF_IDF,
"dtype": tf.int64,
},
"expected_output": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],
"input_dtype":
tf.int64
},
{
"testcase_name":
"test_strings_tf_idf_output",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"output_mode": index_lookup.TF_IDF,
"dtype": tf.string,
},
"expected_output": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],
"input_dtype":
tf.string
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IndexLookupLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = index_lookup.IndexLookup
if "invert" in kwargs and kwargs["invert"]:
expected_output_dtype = kwargs["dtype"]
elif "output_mode" in kwargs and kwargs["output_mode"] != index_lookup.INT:
expected_output_dtype = tf.float32
else:
expected_output_dtype = tf.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# IndexLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
with CustomObjectScope({"IndexLookup": cls}):
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
if "invert" in kwargs and kwargs["invert"]:
self.assertAllEqual(expected_output, output_data)
else:
self.assertAllClose(expected_output, output_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingInputTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_string_input(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=["fire", "michigan"],
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_string_input(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant(
[["earth", "wind", "fire"], ["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],
dtype=np.int64)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int32_input_with_int64_keys(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],
dtype=np.int32)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingMultiOOVTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=["fire", "ohio"], dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [6, 2]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_sparse_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 133], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [6, 2]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=2,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant([["earth", "wind", "fire"],
["fire", "and", "earth",
"ohio"]])
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 133]],
dtype=np.int64)
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=2,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingAdaptTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_adapt(self):
vocab_data = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 2]],
values=["michigan", "fire", "michigan"],
dense_shape=[3, 4])
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.adapt(vocab_dataset)
expected_vocabulary = ["", "[OOV]", "michigan", "fire"]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_ragged_adapt(self):
vocab_data = tf.ragged.constant([["michigan"],
["fire", "michigan"]])
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.adapt(vocab_dataset)
expected_vocabulary = ["", "[OOV]", "michigan", "fire"]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_string_input(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant(
[["earth", "wind", "fire"], ["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],
dtype=np.int64)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_string_generator_dataset(self):
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
ds = tf.data.Dataset.from_generator(word_gen, tf.string,
tf.TensorShape([]))
batched_ds = ds.take(2)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=10,
num_oov_indices=0,
mask_token=None,
oov_token=None,
dtype=tf.string)
_ = layer(input_t)
layer.adapt(batched_ds)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IndexLookupOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_rank_one(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = np.array(["earth", "wind", "and", "fire"])
expected_output = [2, 3, 4, 5]
inputs = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_dataset = model(input_data)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_rank_zero(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = tf.constant("earth")
expected_output = 2
inputs = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_dataset = model(input_data)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_shape(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
int_data = layer(input_data)
self.assertAllEqual(int_data.shape.as_list(), [16, 4])
def test_int_output_no_reserved_zero(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_no_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
invalid_input = np.array([["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*michigan"):
_ = model.predict(invalid_input)
def test_int_output_no_oov_ragged(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
invalid_input = np.array([["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"]])
valid_input = tf.RaggedTensor.from_tensor(valid_input)
invalid_input = tf.RaggedTensor.from_tensor(invalid_input)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*michigan"):
_ = model.predict(invalid_input)
def test_int_output_no_oov_sparse(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
invalid_input = np.array([["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"]])
valid_input = tf.sparse.from_dense(valid_input)
invalid_input = tf.sparse.from_dense(invalid_input)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output,
tf.sparse.to_dense(output_data))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*michigan"):
_ = model.predict(invalid_input)
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_hard_maximum(self):
"""Check binary output when pad_to_max_tokens=True."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan", ""])
expected_output = [
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=6,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
pad_to_max_tokens=True,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_soft_maximum(self):
"""Check binary output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan", ""])
expected_output = [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_rank_zero_no_oov(self):
"""Check binary output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
input_data = tf.constant("earth")
expected_output = [1, 0, 0, 0]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
def test_one_hot_output_shape(self):
inputs = keras.Input(batch_size=16, shape=(1,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=["earth"],
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
dtype=tf.string)
outputs = layer(inputs)
self.assertAllEqual(outputs.shape.as_list(), [16, 2])
def test_multi_hot_output_hard_maximum(self):
"""Check binary output when pad_to_max_tokens=True."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire", ""],
["fire", "fire", "and", "earth", "michigan"]])
expected_output = [
[0, 1, 1, 1, 1, 0],
[1, 1, 0, 1, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=6,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
pad_to_max_tokens=True,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multi_hot_output_no_oov(self):
"""Check binary output when pad_to_max_tokens=True."""
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
invalid_input = np.array([["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"]])
expected_output = [
[1, 1, 1, 1, 0],
[1, 0, 1, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
pad_to_max_tokens=True,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*michigan"):
_ = model.predict(invalid_input)
def test_multi_hot_output_hard_maximum_multiple_adapts(self):
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
adapt_data = ["earth", "earth", "earth", "earth", "wind", "wind", "wind"]
first_expected_output = [
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
]
second_adapt_data = [
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]
second_expected_output = [
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
pad_to_max_tokens=True,
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Test the first adapt
layer.adapt(adapt_data)
first_output = model.predict(input_array)
# Test the second adapt
layer.adapt(second_adapt_data)
# We need to recompile the model to retrace our call graph.
model.compile()
second_output = model.predict(input_array)
self.assertAllEqual(first_expected_output, first_output)
self.assertAllEqual(second_expected_output, second_output)
def test_multi_hot_output_soft_maximum(self):
"""Check multi_hot output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire", ""],
["fire", "and", "earth", "michigan", ""]])
expected_output = [
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multi_hot_output_shape(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
vocabulary=["foo"],
dtype=tf.string)
binary_data = layer(input_data)
self.assertAllEqual(binary_data.shape.as_list(), [16, 2])
def test_count_output_hard_maxiumum(self):
"""Check count output when pad_to_max_tokens=True."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "wind", ""],
["fire", "fire", "fire", "michigan", ""]])
expected_output = [
[0, 1, 2, 1, 0, 0],
[1, 0, 0, 0, 3, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=6,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.COUNT,
pad_to_max_tokens=True,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
count_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=count_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_soft_maximum(self):
"""Check count output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "wind", ""],
["fire", "fire", "fire", "michigan", ""]])
expected_output = [
[0, 1, 2, 1, 0],
[1, 0, 0, 0, 3],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.COUNT,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
count_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=count_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_shape(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.COUNT,
vocabulary=["foo"],
dtype=tf.string)
count_data = layer(input_data)
self.assertAllEqual(count_data.shape.as_list(), [16, 2])
def test_ifidf_output_hard_maximum(self):
"""Check tf-idf output when pad_to_max_tokens=True."""
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth", ""],
["ohio", "fire", "earth", "michigan", ""]])
expected_output = [
[0.00, 0.80, 0.25, 0.75, 0.00, 0.00],
[1.00, 0.40, 0.00, 0.00, 0.60, 0.00],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=6,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
pad_to_max_tokens=True,
dtype=tf.string)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
layer_output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=layer_output)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_ifidf_output_soft_maximum(self):
"""Check tf-idf output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth", ""],
["ohio", "fire", "earth", "michigan", ""]])
expected_output = [
[0.00, 0.80, 0.25, 0.75, 0.00],
[1.00, 0.40, 0.00, 0.00, 0.60],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
dtype=tf.string)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
layer_output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=layer_output)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_ifidf_output_shape(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
dtype=tf.string)
layer.set_vocabulary(vocabulary=["foo"], idf_weights=[1.0])
layer_output = layer(input_data)
self.assertAllEqual(layer_output.shape.as_list(), [16, 2])
def test_int_output_file_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_non_int_output_file_vocab_in_tf_function(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.constant(
[["earth", "wind", "and", "fire", ""],
["fire", "and", "earth", "michigan", ""]],
dtype=tf.string)
expected_output = [
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
]
vocab_file = self._write_to_temp_file("temp", vocab_data)
@tf.function
def compute(data):
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
dtype=tf.string)
return layer(data)
output_dataset = compute(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_file_vocab_and_list_vocab_identical_attrs(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
file_layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
list_layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
expected_vocab = ["", "[OOV]", "earth", "wind", "and", "fire"]
self.assertAllEqual(expected_vocab, list_layer.get_vocabulary())
expected_vocab_size = 6
self.assertAllEqual(expected_vocab_size, list_layer.vocabulary_size())
self.assertAllEqual(list_layer.get_vocabulary(),
file_layer.get_vocabulary())
self.assertAllEqual(list_layer.vocabulary_size(),
file_layer.vocabulary_size())
def test_file_vocab_and_list_vocab_identical_attrs_multi_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
file_layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
list_layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
expected_vocab = ["", "[OOV]", "[OOV]", "earth", "wind", "and", "fire"]
self.assertAllEqual(expected_vocab, list_layer.get_vocabulary())
expected_vocab_size = 7
self.assertAllEqual(expected_vocab_size, list_layer.vocabulary_size())
self.assertAllEqual(list_layer.get_vocabulary(),
file_layer.get_vocabulary())
self.assertAllEqual(list_layer.vocabulary_size(),
file_layer.vocabulary_size())
def test_file_vocab_and_list_vocab_identical_attrs_no_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
file_layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=2,
mask_token=None,
oov_token="[OOV]",
dtype=tf.string)
list_layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=2,
mask_token=None,
oov_token="[OOV]",
dtype=tf.string)
expected_vocab = ["[OOV]", "[OOV]", "earth", "wind", "and", "fire"]
self.assertAllEqual(expected_vocab, list_layer.get_vocabulary())
expected_vocab_size = 6
self.assertAllEqual(expected_vocab_size, list_layer.vocabulary_size())
self.assertAllEqual(list_layer.get_vocabulary(),
file_layer.get_vocabulary())
self.assertAllEqual(list_layer.vocabulary_size(),
file_layer.vocabulary_size())
def test_int_output_file_vocab_no_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 0, 1, 0]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
mask_token=None,
num_oov_indices=1,
oov_token="[OOV]",
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_file_vocab_no_oov_or_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "wind", "earth", "and"]])
expected_output = [[0, 1, 2, 3], [3, 1, 0, 2]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
mask_token=None,
num_oov_indices=0,
oov_token=None,
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_file_vocab_inversion(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[1, 2, 3, 4], [4, 0, 1, 0]])
expected_output = [["earth", "wind", "and", "fire"],
["fire", "[OOV]", "earth", "[OOV]"]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
idata = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
mask_token=None,
num_oov_indices=1,
oov_token="[OOV]",
dtype=tf.string)
_ = layer(idata)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
invert_layer = index_lookup.IndexLookup(
vocabulary=layer.get_vocabulary(),
max_tokens=None,
oov_token="[OOV]",
mask_token=None,
num_oov_indices=1,
invert=True,
dtype=tf.string)
int_data = invert_layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_int_file_vocab(self):
vocab_data = ["10", "20", "30", "40"]
input_array = np.array([[10, 20, 30, 40], [40, 0, 10, 42]])
expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_dataset_map_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token=None,
oov_token="[OOV]",
vocabulary=vocab_data,
dtype=tf.string)
ds = tf.data.Dataset.from_tensor_slices([["earth"], ["wind"], ["and"]])
ds = ds.map(layer)
self.assertAllEqual(list(ds.as_numpy_iterator()), [[0], [1], [2]])
def test_dataset_map_output_layer_created_in_function(self):
vocab_data = ["earth", "wind", "and", "fire"]
def apply_lookup(data):
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token=None,
oov_token="[OOV]",
vocabulary=vocab_data,
dtype=tf.string)
return layer(data)
ds = tf.data.Dataset.from_tensor_slices([["earth"], ["wind"], ["and"]])
ds = ds.map(apply_lookup)
self.assertAllEqual(list(ds.as_numpy_iterator()), [[0], [1], [2]])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IndexLookupVocabularyTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[OOV]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_get_vocabulary_no_special_tokens(self):
vocab_data = ["", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary(include_special_tokens=False)
self.assertAllEqual(returned_vocab, ["wind", "and", "fire"])
self.assertAllEqual(layer.vocabulary_size(), 5)
def test_vocab_multi_oov(self):
vocab_data = ["", "[OOV]", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(returned_vocab, vocab_data)
def test_vocab_multi_oov_not_present(self):
vocab_data = ["wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=10,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(returned_vocab,
[""] + ["[OOV]"] * 10 + ["wind", "and", "fire"])
def test_vocab_with_max_cap(self):
vocab_data = ["", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 5)
def test_int_vocab_with_max_cap(self):
vocab_data = [0, -1, 42, 1276, 1138]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 5)
def test_vocab_with_multiple_oov_indices(self):
vocab_data = ["", "[OOV]", "[OOV]", "[OOV]", "wind"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=3,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_int_vocab_with_multiple_oov_indices(self):
vocab_data = [0, -1, -1, -1, 42]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=3,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
def test_vocab_with_oov_and_wrong_mask_fails(self):
vocab_data = ["custom_mask", "[OOV]", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError, ".*does not have the mask token.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_oov_and_no_mask_fails(self):
vocab_data = ["[OOV]", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError, ".*Reserved OOV.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_mask_but_no_oov_fails(self):
vocab_data = ["", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError, ".*does not have the OOV token.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_repeated_element_fails(self):
vocab_data = ["earth", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError, ".*repeated term.*earth.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_reserved_oov_element_fails(self):
vocab_data = ["earth", "test", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError, ".*Reserved OOV.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_reserved_mask_element_fails(self):
vocab_data = ["earth", "mask_token", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="mask_token",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError, ".*Reserved mask.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_size_changed_pad_to_max_false_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
pad_to_max_tokens=False,
output_mode=index_lookup.MULTI_HOT,
dtype=tf.string)
layer.set_vocabulary(vocab_data)
# Calling the layer should lock the vocabulary size.
_ = layer([["earth"]])
layer.set_vocabulary(vocab_data[:2])
with self.assertRaisesRegex(RuntimeError,
"vocabulary size cannot be changed"):
# Calling the layer again should cause an error.
_ = layer([["earth"]])
def test_vocab_with_idf_weights_non_tfidf_output_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
weight_data = [1, 1, 1, 1, 1]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
dtype=tf.string)
with self.assertRaisesRegex(ValueError,
"`idf_weights` should only be set if"):
layer.set_vocabulary(vocab_data, idf_weights=weight_data)
def test_vocab_with_idf_weights_length_mismatch_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
weight_data = [1, 1, 1, 1, 1] # too long
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
dtype=tf.string)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be the same length as vocab"):
layer.set_vocabulary(vocab_data, idf_weights=weight_data)
def test_vocab_without_idf_weights_tfidf_output_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
dtype=tf.string)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be set if output_mode is TF_IDF"):
layer.set_vocabulary(vocab_data)
def test_non_unique_int_vocab_fails(self):
vocab_data = [12, 13, 14, 15, 15]
with self.assertRaisesRegex(ValueError, "repeated term.*15"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
def test_int_vocab_with_oov_and_wrong_mask_fails(self):
vocab_data = [1234, -1, 11, 21, 13, 14]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
with self.assertRaisesRegex(ValueError, "does not have the mask token `0`"):
layer.set_vocabulary(vocab_data)
def test_int_vocab_with_oov_and_no_mask_fails(self):
vocab_data = [-1, 11, 12, 13, 14]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
with self.assertRaisesRegex(ValueError, "Reserved OOV"):
layer.set_vocabulary(vocab_data)
def test_int_vocab_with_mask_but_no_oov_fails(self):
vocab_data = [0, 11, 12, 13, 14]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
with self.assertRaisesRegex(ValueError, "does not have the OOV token `-1`"):
layer.set_vocabulary(vocab_data)
def test_int_vocab_with_repeated_element_fails(self):
vocab_data = [11, 11, 34, 23, 124]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
with self.assertRaisesRegex(ValueError, "repeated term.*11"):
layer.set_vocabulary(vocab_data)
def test_int_vocab_with_reserved_oov_element_fails(self):
vocab_data = [14, 38, -1, 34, 3, 84]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
with self.assertRaisesRegex(ValueError, "Reserved OOV"):
layer.set_vocabulary(vocab_data)
def test_int_vocab_with_reserved_mask_element_fails(self):
vocab_data = [125, 0, 3, 4, 94]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
with self.assertRaisesRegex(ValueError, "Reserved mask"):
layer.set_vocabulary(vocab_data)
def test_no_vocab_file_string_fails(self):
with self.assertRaisesRegex(ValueError, ".*non_existent_file.*"):
_ = index_lookup.IndexLookup(
vocabulary="non_existent_file",
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IndexLookupInverseVocabularyTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_int_output_explicit_vocab(self):
vocab_data = ["", "[OOV]", "earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[OOV]"]])
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_with_max_cap(self):
vocab_data = ["", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
invert=True)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_int_vocab_with_max_cap(self):
vocab_data = [0, -1, 42, 1276, 1138]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64,
invert=True)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
invert=True)
def test_non_int_output_fails(self):
with self.assertRaisesRegex(ValueError, "`output_mode` must be int"):
_ = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
output_mode=index_lookup.COUNT,
invert=True)
def test_vocab_with_repeated_element_fails(self):
vocab_data = ["earth", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
invert=True)
with self.assertRaisesRegex(ValueError, ".*repeated term.*earth.*"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_reserved_mask_element_fails(self):
vocab_data = ["earth", "mask_token", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="mask_token",
oov_token="[OOV]",
dtype=tf.string,
invert=True)
with self.assertRaisesRegex(ValueError, ".*Reserved mask.*"):
layer.set_vocabulary(vocab_data)
def test_non_unique_int_vocab_fails(self):
vocab_data = [12, 13, 14, 15, 15]
with self.assertRaisesRegex(ValueError, ".*repeated term.*15.*"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64,
invert=True)
def test_int_vocab_with_repeated_element_fails(self):
vocab_data = [11, 11, 34, 23, 124]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
dtype=tf.int64,
invert=True)
with self.assertRaisesRegex(ValueError, ".*repeated term.*11.*"):
layer.set_vocabulary(vocab_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IndexLookupErrorTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=4,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
with self.assertRaisesRegex(ValueError,
"vocabulary larger than the maximum vocab.*"):
layer.set_vocabulary(vocab_data)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, ".*max_tokens.*"):
_ = index_lookup.IndexLookup(
max_tokens=0,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class IndexLookupSavingTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_vocabulary_persistence_across_saving(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path, custom_objects={"IndexLookup": index_lookup.IndexLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_vocabulary_persistence_file_across_cloning(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Clone the model and set weights.
new_model = keras.models.clone_model(model)
new_model.set_weights(model.get_weights())
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, new_model)
# Validate correctness of the new model.
new_output_dataset = new_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocabs_tf_save_tf_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
tf.saved_model.save(obj=model, export_dir=output_path)
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = f(tf.constant(input_array))["index_lookup"]
self.assertAllEqual(new_output_dataset, expected_output)
def test_vocabulary_persistence_file_vocab_keras_save_tf_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = f(tf.constant(input_array))["index_lookup"]
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocab_keras_save_keras_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path, custom_objects={"IndexLookup": index_lookup.IndexLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer contained at
# a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model_2")
model_2.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path, custom_objects={"IndexLookup": index_lookup.IndexLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocab_keras_save_keras_load_tf_save_tf_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path, custom_objects={"IndexLookup": index_lookup.IndexLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer contained at
# a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model_2")
tf.saved_model.save(model_2, output_path)
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = f(tf.constant(input_array))["model"]
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocab_keras_save_keras_load_keras_save_keras_load(
self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path, custom_objects={"IndexLookup": index_lookup.IndexLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer contained at
# a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model_2")
model_2.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path, custom_objects={"IndexLookup": index_lookup.IndexLookup})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_static_table_config_weight_data_transfer_succeeds(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
layer_cls = index_lookup.IndexLookup
layer = layer_cls(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_file)
config = layer.get_config()
weights = layer.get_weights()
layer = layer_cls.from_config(config)
layer.set_weights(weights)
input_data = keras.Input(shape=(None,), dtype=tf.string)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
new_output_dataset = model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True)
class EagerExecutionDisabled(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_lookup(self):
# We need this test for model_to_estimator followed by export_saved_model,
# which will call the layer in a legacy session. This could also happen
# directly if a user calls disable_v2_behavior or disable_eager_execution.
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire"])
expected_output = [1, 2, 3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[OOV]",
dtype=tf.string,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model(input_array)
self.assertAllEqual(output_dataset, expected_output)
if __name__ == "__main__":
tf.test.main()
| 89,814 | 36.236733 | 80 | py |
keras | keras-master/keras/layers/preprocessing/__init__.py | 0 | 0 | 0 | py | |
keras | keras-master/keras/layers/preprocessing/normalization.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization preprocessing layer."""
from keras import backend
from keras.engine import base_preprocessing_layer
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
@keras_export('keras.layers.Normalization',
'keras.layers.experimental.preprocessing.Normalization')
class Normalization(base_preprocessing_layer.PreprocessingLayer):
"""Feature-wise normalization of the data.
This layer will coerce its inputs into a distribution centered around
0 with standard deviation 1. It accomplishes this by precomputing the mean and
variance of the data, and calling `(input - mean) / sqrt(var)` at runtime.
What happens in `adapt()`: Compute mean and variance of the data and store
them as the layer's weights. `adapt()` should be called before `fit()`,
`evaluate()`, or `predict()`.
Args:
axis: Integer, tuple of integers, or None. The axis or axes that should
have a separate mean and variance for each index in the shape. For
example, if shape is `(None, 5)` and `axis=1`, the layer will track 5
separate mean and variance values for the last axis. If `axis` is set to
`None`, the layer will normalize all elements in the input by a scalar
mean and variance. Defaults to -1, where the last axis of the input is
assumed to be a feature dimension and is normalized per index. Note that
in the specific case of batched scalar inputs where the only axis is the
batch axis, the default will normalize each index in the batch
separately. In this case, consider passing `axis=None`.
mean: The mean value(s) to use during normalization. The passed value(s)
will be broadcast to the shape of the kept axes above; if the value(s)
cannot be broadcast, an error will be raised when this layer's `build()`
method is called.
variance: The variance value(s) to use during normalization. The passed
value(s) will be broadcast to the shape of the kept axes above; if the
value(s) cannot be broadcast, an error will be raised when this layer's
`build()` method is called.
Examples:
Calculate a global mean and variance by analyzing the dataset in `adapt()`.
>>> adapt_data = np.array([1., 2., 3., 4., 5.], dtype='float32')
>>> input_data = np.array([1., 2., 3.], dtype='float32')
>>> layer = tf.keras.layers.Normalization(axis=None)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
<tf.Tensor: shape=(3,), dtype=float32, numpy=
array([-1.4142135, -0.70710677, 0.], dtype=float32)>
Calculate a mean and variance for each index on the last axis.
>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[0., 7., 4.]], dtype='float32')
>>> layer = tf.keras.layers.Normalization(axis=-1)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
<tf.Tensor: shape=(1, 3), dtype=float32, numpy=
array([0., 0., 0.], dtype=float32)>
Pass the mean and variance directly.
>>> input_data = np.array([[1.], [2.], [3.]], dtype='float32')
>>> layer = tf.keras.layers.Normalization(mean=3., variance=2.)
>>> layer(input_data)
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
array([[-1.4142135 ],
[-0.70710677],
[ 0. ]], dtype=float32)>
"""
def __init__(self, axis=-1, mean=None, variance=None, **kwargs):
super().__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True)
# Standardize `axis` to a tuple.
if axis is None:
axis = ()
elif isinstance(axis, int):
axis = (axis,)
else:
axis = tuple(axis)
self.axis = axis
# Set `mean` and `variance` if passed.
if isinstance(mean, tf.Variable):
raise ValueError('Normalization does not support passing a Variable '
'for the `mean` init arg.')
if isinstance(variance, tf.Variable):
raise ValueError('Normalization does not support passing a Variable '
'for the `variance` init arg.')
if (mean is not None) != (variance is not None):
raise ValueError(
'When setting values directly, both `mean` and `variance` '
'must be set. Got mean: {} and variance: {}'.format(mean, variance))
self.input_mean = mean
self.input_variance = variance
def build(self, input_shape):
super().build(input_shape)
if (isinstance(input_shape, (list, tuple)) and
all(isinstance(shape, tf.TensorShape) for shape in input_shape)):
raise ValueError('Normalization only accepts a single input. If you are '
'passing a python list or tuple as a single input, '
'please convert to a numpy array or `tf.Tensor`.')
input_shape = tf.TensorShape(input_shape).as_list()
ndim = len(input_shape)
if any(a < -ndim or a >= ndim for a in self.axis):
raise ValueError('All `axis` values must be in the range [-ndim, ndim). '
'Found ndim: `{}`, axis: {}'.format(ndim, self.axis))
# Axes to be kept, replacing negative values with positive equivalents.
# Sorted to avoid transposing axes.
self._keep_axis = sorted([d if d >= 0 else d + ndim for d in self.axis])
# All axes to be kept should have known shape.
for d in self._keep_axis:
if input_shape[d] is None:
raise ValueError(
'All `axis` values to be kept must have known shape. Got axis: {}, '
'input shape: {}, with unknown axis at index: {}'.format(
self.axis, input_shape, d))
# Axes to be reduced.
self._reduce_axis = [d for d in range(ndim) if d not in self._keep_axis]
# 1 if an axis should be reduced, 0 otherwise.
self._reduce_axis_mask = [
0 if d in self._keep_axis else 1 for d in range(ndim)
]
# Broadcast any reduced axes.
self._broadcast_shape = [
input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)
]
mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis)
if self.input_mean is None:
self.adapt_mean = self.add_weight(
name='mean',
shape=mean_and_var_shape,
dtype=self.dtype,
initializer='zeros',
trainable=False)
self.adapt_variance = self.add_weight(
name='variance',
shape=mean_and_var_shape,
dtype=self.dtype,
initializer='ones',
trainable=False)
self.count = self.add_weight(
name='count',
shape=(),
dtype=tf.int64,
initializer='zeros',
trainable=False)
self.finalize_state()
else:
# In the no adapt case, make constant tensors for mean and variance with
# proper broadcast shape for use during call.
mean = self.input_mean * np.ones(mean_and_var_shape)
variance = self.input_variance * np.ones(mean_and_var_shape)
mean = tf.reshape(mean, self._broadcast_shape)
variance = tf.reshape(variance, self._broadcast_shape)
self.mean = tf.cast(mean, self.compute_dtype)
self.variance = tf.cast(variance, self.compute_dtype)
def update_state(self, data):
if self.input_mean is not None:
raise ValueError(
'Cannot `adapt` a Normalization layer that is initialized with '
'static `mean` and `variance`, you passed mean {} and variance {}.'
.format(self.input_mean, self.input_variance))
if not self.built:
raise RuntimeError('`build` must be called before `update_state`.')
data = self._standardize_inputs(data)
data = tf.cast(data, self.adapt_mean.dtype)
batch_mean, batch_variance = tf.nn.moments(data, axes=self._reduce_axis)
batch_shape = tf.shape(data, out_type=self.count.dtype)
if self._reduce_axis:
batch_reduce_shape = tf.gather(batch_shape, self._reduce_axis)
batch_count = tf.reduce_prod(batch_reduce_shape)
else:
batch_count = 1
total_count = batch_count + self.count
batch_weight = (
tf.cast(batch_count, dtype=self.dtype) /
tf.cast(total_count, dtype=self.dtype))
existing_weight = 1. - batch_weight
total_mean = self.adapt_mean * existing_weight + batch_mean * batch_weight
# The variance is computed using the lack-of-fit sum of squares
# formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares).
total_variance = ((self.adapt_variance +
(self.adapt_mean - total_mean)**2) * existing_weight +
(batch_variance +
(batch_mean - total_mean)**2) * batch_weight)
self.adapt_mean.assign(total_mean)
self.adapt_variance.assign(total_variance)
self.count.assign(total_count)
def reset_state(self): # pylint: disable=method-hidden
if self.input_mean is not None or not self.built:
return
self.adapt_mean.assign(tf.zeros_like(self.adapt_mean))
self.adapt_variance.assign(tf.ones_like(self.adapt_variance))
self.count.assign(tf.zeros_like(self.count))
def finalize_state(self):
if self.input_mean is not None or not self.built:
return
# In the adapt case, we make constant tensors for mean and variance with
# proper broadcast shape and dtype each time `finalize_state` is called.
self.mean = tf.reshape(self.adapt_mean, self._broadcast_shape)
self.mean = tf.cast(self.mean, self.compute_dtype)
self.variance = tf.reshape(self.adapt_variance, self._broadcast_shape)
self.variance = tf.cast(self.variance, self.compute_dtype)
def call(self, inputs):
inputs = self._standardize_inputs(inputs)
# The base layer automatically casts floating-point inputs, but we
# explicitly cast here to also allow integer inputs to be passed
inputs = tf.cast(inputs, self.compute_dtype)
return ((inputs - self.mean) /
tf.maximum(tf.sqrt(self.variance), backend.epsilon()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
return input_spec
def get_config(self):
config = super().get_config()
config.update({
'axis': self.axis,
'mean': self._convert_to_list(self.input_mean),
'variance': self._convert_to_list(self.input_variance),
})
return config
def _standardize_inputs(self, inputs):
inputs = tf.convert_to_tensor(inputs)
if inputs.dtype != self.dtype:
inputs = tf.cast(inputs, self.dtype)
return inputs
def _convert_to_list(self, inputs):
if tf.is_tensor(inputs):
inputs = inputs.numpy()
if isinstance(inputs, (np.ndarray)):
inputs = inputs.tolist()
inputs = list(inputs)
return inputs
| 11,613 | 40.038869 | 80 | py |
keras | keras-master/keras/layers/preprocessing/text_vectorization.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import numpy as np
from keras import backend
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import index_lookup
from keras.layers.preprocessing import string_lookup
from keras.utils import layer_utils
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TF_IDF = index_lookup.TF_IDF
INT = index_lookup.INT
MULTI_HOT = index_lookup.MULTI_HOT
COUNT = index_lookup.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.TextVectorization",
"keras.layers.experimental.preprocessing.TextVectorization",
v1=[])
class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one example = one string) into either a list of
token indices (one example = 1D tensor of integer token indices) or a dense
representation (one example = 1D tensor of float values representing data
about the example's tokens).
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. When this layer is adapted, it will analyze the
dataset, determine the frequency of individual string values, and create a
vocabulary from them. This vocabulary can have unlimited size or be capped,
depending on the configuration options for this layer; if there are more
unique values in the input than the maximum vocabulary size, the most frequent
terms will be used to create the vocabulary.
The processing of each example contains the following steps:
1. Standardize each example (usually lowercasing + punctuation stripping)
2. Split each example into substrings (usually words)
3. Recombine substrings into tokens (usually ngrams)
4. Index tokens (associate a unique int value with each token)
5. Transform each example using this index, either into a vector of ints or
a dense float vector.
Some notes on passing callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to",
"split"], ["another", "string", "to", "split"]]`. This makes the callable
site natively compatible with `tf.strings.split()`.
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should only
be specified when adapting a vocabulary or when setting
`pad_to_max_tokens=True`. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens -
1 - (1 if output_mode == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
`"lower_and_strip_punctuation"` (lowercase and remove punctuation) or a
Callable. Default is `"lower_and_strip_punctuation"`.
split: Optional specification for splitting the input text. Values can be
None (no splitting), `"whitespace"` (split on ASCII whitespace), or a
Callable. The default is `"whitespace"`.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`, configuring the layer
as follows:
- `"int"`: Outputs integer indices, one integer index per split string
token. When `output_mode == "int"`, 0 is reserved for masked
locations; this reduces the vocab size to
`max_tokens - 2` instead of `max_tokens - 1`.
- `"multi_hot"`: Outputs a single int array per batch, of either
vocab_size or max_tokens size, containing 1s in all elements where the
token mapped to that index exists at least once in the batch item.
- `"count"`: Like `"multi_hot"`, but the int array contains a count of
the number of times the token at that index appeared in the
batch item.
- `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
For `"int"` output, any shape of input and output is supported. For all
other output modes, currently only rank 1 inputs (and rank 2 outputs after
splitting) are supported.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape
`(batch_size, output_sequence_length)` regardless of how many tokens
resulted from the splitting step. Defaults to None.
pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`, and `"tf_idf"`
modes. If True, the output will have its feature axis padded to
`max_tokens` even if the number of unique tokens in the vocabulary is less
than max_tokens, resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to False.
vocabulary: Optional. Either an array of strings or a string path to a text
file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D
tensor containing the string vocbulary terms. If passing a file path, the
file should contain one line per term in the vocabulary. If this argument
is set, there is no need to `adapt` the layer.
Example:
This example instantiates a `TextVectorization` layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"])
>>> max_features = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer.
>>> vectorize_layer = tf.keras.layers.TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len)
>>>
>>> # Now that the vocab layer has been created, call `adapt` on the text-only
>>> # dataset to create the vocabulary. You don't have to batch, but for large
>>> # datasets this means we're not keeping spare copies of the dataset.
>>> vectorize_layer.adapt(text_dataset.batch(64))
>>>
>>> # Create the model that uses the vectorize text layer
>>> model = tf.keras.models.Sequential()
>>>
>>> # Start by creating an explicit input layer. It needs to have a shape of
>>> # (1,) (because we need to guarantee that there is exactly one string
>>> # input per batch), and the dtype needs to be 'string'.
>>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
>>>
>>> # The first layer in our model is the vectorization layer. After this
>>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
>>> # indices.
>>> model.add(vectorize_layer)
>>>
>>> # Now, the model can map strings to integers, and you can add an embedding
>>> # layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> model.predict(input_data)
array([[2, 1, 4, 0],
[1, 3, 0, 0]])
Example:
This example instantiates a `TextVectorization` layer by passing a list
of vocabulary terms to the layer's `__init__()` method.
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = tf.keras.layers.TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>>
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize="lower_and_strip_punctuation",
split="whitespace",
ngrams=None,
output_mode="int",
output_sequence_length=None,
pad_to_max_tokens=False,
vocabulary=None,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != tf.string:
raise ValueError(
f"`TextVectorization` may only have a dtype of string. "
f"Received dtype: {kwargs['dtype']}.")
elif "dtype" not in kwargs:
kwargs["dtype"] = tf.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
if output_mode == "tf-idf":
output_mode = TF_IDF
# 'output_mode' must be one of (None, INT, COUNT, MULTI_HOT, TF_IDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, MULTI_HOT, TF_IDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError(
f"`output_sequence_length` must not be set if `output_mode` is not "
f"'int'. Received output_sequence_length={output_sequence_length}.")
self._max_tokens = max_tokens
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
# Drop deprecated config options.
kwargs.pop("vocabulary_size", None)
super().__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell("TextVectorization").set(
True)
self._index_lookup_layer = string_lookup.StringLookup(
max_tokens=max_tokens,
vocabulary=vocabulary,
pad_to_max_tokens=pad_to_max_tokens,
mask_token="",
output_mode=output_mode if output_mode is not None else INT)
def compute_output_shape(self, input_shape):
if self._output_mode == INT:
return tf.TensorShape([input_shape[0], self._output_sequence_length])
if self._split is None:
if len(input_shape) <= 1:
input_shape = tuple(input_shape) + (1,)
else:
input_shape = tuple(input_shape) + (None,)
return self._index_lookup_layer.compute_output_shape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = (tf.int64 if self._output_mode == INT
else backend.floatx())
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def update_state(self, data):
self._index_lookup_layer.update_state(self._preprocess(data))
def finalize_state(self):
self._index_lookup_layer.finalize_state()
def reset_state(self): # pylint: disable=method-hidden
self._index_lookup_layer.reset_state()
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If True, the returned vocabulary will include
the padding and OOV tokens, and a term's index in the vocabulary will
equal the term's index when calling the layer. If False, the returned
vocabulary will not include any padding or OOV tokens.
"""
return self._index_lookup_layer.get_vocabulary(include_special_tokens)
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the voculary, including optional mask and oov indices.
"""
return self._index_lookup_layer.vocabulary_size()
def get_config(self):
# This does not include the 'vocabulary' arg, since if the vocab was passed
# at init time it's now stored in variable state - we don't need to
# pull it off disk again.
config = {
"max_tokens": self._index_lookup_layer.max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._index_lookup_layer.pad_to_max_tokens,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through 'adapt'. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocabulary: Either an array or a string path to a text file. If passing an
array, can pass a tuple, list, 1D numpy array, or 1D tensor containing
the vocbulary terms. If passing a file path, the file should contain one
line per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary. Must be set
if `output_mode` is `"tf_idf"`. Should not be set otherwise.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when `"multi_hot"`, `"count"`, and "tf_idf" modes,
if `pad_to_max_tokens` is False and the layer itself has already been
called.
"""
self._index_lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None:
if input_shape.ndims > 1 and not input_shape[-1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the innermost "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if tf_utils.is_ragged(inputs):
lowercase_inputs = tf.ragged.map_flat_values(
tf.strings.lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = tf.identity(lowercase_inputs)
else:
lowercase_inputs = tf.strings.lower(inputs)
inputs = tf.strings.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
if inputs.shape.ndims > 1:
inputs = tf.squeeze(inputs, axis=-1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = tf.strings.split(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = tf.strings.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = tf.convert_to_tensor(inputs)
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
lookup_data = self._index_lookup_layer(inputs)
# For any non-int output, we can return directly from the underlying layer.
if self._output_mode is not INT:
return lookup_data
# If we have a ragged tensor, we can pad during the conversion to dense.
if tf_utils.is_ragged(lookup_data):
shape = lookup_data.shape.as_list()
# If output sequence length is None, to_tensor will pad the last dimension
# to the bounding shape of the ragged dimension.
shape[-1] = self._output_sequence_length
return lookup_data.to_tensor(default_value=0, shape=shape)
# If we have a dense tensor, we need to pad/trim directly.
if self._output_sequence_length is not None:
# Maybe trim the output.
lookup_data = lookup_data[..., :self._output_sequence_length]
# Maybe pad the output. We need to be careful to use dynamic shape here as
# required_space_to_batch_paddings requires a fully known shape.
shape = tf.shape(lookup_data)
padded_shape = tf.concat((shape[:-1], [self._output_sequence_length]), 0)
padding, _ = tf.required_space_to_batch_paddings(shape, padded_shape)
return tf.pad(lookup_data, padding)
return lookup_data
| 23,109 | 43.961089 | 101 | py |
keras | keras-master/keras/layers/preprocessing/normalization_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.normalization."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute import strategy_combinations
from keras.layers.preprocessing import normalization
from keras.layers.preprocessing import preprocessing_test_utils
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager"]), _get_layer_computation_test_cases()))
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_layer_computation(self, strategy, adapt_data, axis, test_data,
use_dataset, expected):
input_shape = tuple([None for _ in range(test_data.ndim - 1)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
with strategy.scope():
input_data = keras.Input(shape=input_shape)
layer = normalization.Normalization(axis=axis)
layer.adapt(adapt_data)
output = layer(input_data)
model = keras.Model(input_data, output)
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| 4,811 | 37.496 | 80 | py |
keras | keras-master/keras/layers/preprocessing/hashing_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hashing layer."""
import os
from absl.testing import parameterized
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import input_layer
from keras.engine import training
from keras.layers.preprocessing import hashing
import numpy as np
import tensorflow.compat.v2 as tf
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class HashingTest(keras_parameterized.TestCase):
def test_hash_single_bin(self):
layer = hashing.Hashing(num_bins=1)
inp = np.asarray([['A'], ['B'], ['C'], ['D'], ['E']])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the zeroth
# bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = [['omar'], ['stringer'], ['marlo'], ['wire'], ['skywalker']]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ['omar', 'stringer', 'marlo', 'wire', 'skywalker']
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
def test_hash_sparse_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
def test_hash_sparse_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
def test_hash_sparse_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
def test_hash_sparse_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_hash_ragged_string_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp_data = tf.ragged.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=tf.string)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[0, 0, 1, 0], [1, 0, 0]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_input_mask_value(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
inp_data = tf.ragged.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=tf.string)
empty_mask_output = empty_mask_layer(inp_data)
omar_mask_output = omar_mask_layer(inp_data)
# Outputs should be one more than test_hash_ragged_string_input_farmhash
# (the zeroth bin is now reserved for masks).
expected_output = [[1, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, empty_mask_output)
# 'omar' should map to 0.
expected_output = [[0, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, omar_mask_output)
def test_hash_ragged_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 0, 0, 2], [1, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_string_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp_data = tf.ragged.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=tf.string)
out_data = layer(inp_data)
# Same hashed output as test_hash_dense_input_siphash
expected_output = [[0, 1, 0, 1], [0, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
out_data = layer_2(inp_data)
expected_output = [[1, 0, 1, 0], [1, 1, 0]]
self.assertAllEqual(expected_output, out_data)
out_t = layer_2(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 1, 0, 1], [2, 1, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
_ = hashing.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
_ = hashing.Hashing(num_bins=-1)
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=2, salt='string')
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=1, salt=tf.constant([133, 137]))
def test_hash_compute_output_signature(self):
input_shape = tf.TensorShape([2, 3])
input_spec = tf.TensorSpec(input_shape, tf.string)
layer = hashing.Hashing(num_bins=2)
output_spec = layer.compute_output_signature(input_spec)
self.assertEqual(output_spec.shape.dims, input_shape.dims)
self.assertEqual(output_spec.dtype, tf.int64)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = hashing.Hashing(num_bins=2, name='hashing')
config = layer.get_config()
layer_1 = hashing.Hashing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_saved_model(self):
input_data = np.array(['omar', 'stringer', 'marlo', 'wire', 'skywalker'])
inputs = keras.Input(shape=(None,), dtype=tf.string)
outputs = hashing.Hashing(num_bins=100)(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
original_output_data = model(input_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')
model.save(output_path, save_format='tf')
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model(input_data)
self.assertAllClose(new_output_data, original_output_data)
@parameterized.named_parameters(
(
'list_input',
[1, 2, 3],
[1, 1, 1],
),
(
'list_input_2d',
[[1], [2], [3]],
[[1], [1], [1]],
),
(
'list_input_2d_multiple',
[[1, 2], [2, 3], [3, 4]],
[[1, 1], [1, 1], [1, 1]],
),
(
'list_input_3d',
[[[1], [2]], [[2], [3]], [[3], [4]]],
[[[1], [1]], [[1], [1]], [[1], [1]]],
),
)
def test_hash_list_input(self, input_data, expected):
layer = hashing.Hashing(num_bins=2)
out_data = layer(input_data)
self.assertAllEqual(expected, out_data.numpy().tolist())
if __name__ == '__main__':
tf.test.main()
| 13,272 | 40.092879 | 80 | py |
keras | keras-master/keras/layers/preprocessing/preprocessing_test_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras' base preprocessing layer."""
import tensorflow.compat.v2 as tf
import collections
import numpy as np
class PreprocessingLayerTest(tf.test.TestCase):
"""Base test class for preprocessing layer API validation."""
# TODO(b/137303934): Consider incorporating something like this Close vs All
# behavior into core tf.test.TestCase.
def assertAllCloseOrEqual(self, a, b, msg=None):
"""Asserts that elements are close (if numeric) or equal (if string)."""
if a is None or b is None:
self.assertAllEqual(a, b, msg=msg)
elif isinstance(a, (list, tuple)):
self.assertEqual(len(a), len(b))
for a_value, b_value in zip(a, b):
self.assertAllCloseOrEqual(a_value, b_value, msg=msg)
elif isinstance(a, collections.abc.Mapping):
self.assertEqual(len(a), len(b))
for key, a_value in a.items():
b_value = b[key]
error_message = "{} ({})".format(msg, key) if msg else None
self.assertAllCloseOrEqual(a_value, b_value, error_message)
elif (isinstance(a, float) or
hasattr(a, "dtype") and np.issubdtype(a.dtype, np.number)):
self.assertAllClose(a, b, msg=msg)
else:
self.assertAllEqual(a, b, msg=msg)
def assert_extracted_output_equal(self, combiner, acc1, acc2, msg=None):
data_1 = combiner.extract(acc1)
data_2 = combiner.extract(acc2)
self.assertAllCloseOrEqual(data_1, data_2, msg=msg)
# This is an injection seam so that tests like TextVectorizationTest can
# define their own methods for asserting that accumulators are equal.
compare_accumulators = assertAllCloseOrEqual
def validate_accumulator_computation(self, combiner, data, expected):
"""Validate that various combinations of compute and merge are identical."""
if len(data) < 4:
raise AssertionError(
f"Data must have at least 4 elements. Received "
f"len(data)={len(data)}.")
data_0 = np.array([data[0]])
data_1 = np.array([data[1]])
data_2 = np.array(data[2:])
single_compute = combiner.compute(data)
all_merge = combiner.merge([
combiner.compute(data_0),
combiner.compute(data_1),
combiner.compute(data_2)
])
self.compare_accumulators(
single_compute,
all_merge,
msg="Sharding data should not change the data output.")
unordered_all_merge = combiner.merge([
combiner.compute(data_1),
combiner.compute(data_2),
combiner.compute(data_0)
])
self.compare_accumulators(
all_merge,
unordered_all_merge,
msg="The order of merge arguments should not change the data "
"output.")
hierarchical_merge = combiner.merge([
combiner.compute(data_1),
combiner.merge([combiner.compute(data_2),
combiner.compute(data_0)])
])
self.compare_accumulators(
all_merge,
hierarchical_merge,
msg="Nesting merge arguments should not change the data output.")
nested_compute = combiner.compute(
data_0, combiner.compute(data_1, combiner.compute(data_2)))
self.compare_accumulators(
all_merge,
nested_compute,
msg="Nesting compute arguments should not change the data output.")
mixed_compute = combiner.merge([
combiner.compute(data_0),
combiner.compute(data_1, combiner.compute(data_2))
])
self.compare_accumulators(
all_merge,
mixed_compute,
msg="Mixing merge and compute calls should not change the data "
"output.")
single_merge = combiner.merge([
combiner.merge([combiner.compute(data_0)]),
combiner.compute(data_1, combiner.compute(data_2))
])
self.compare_accumulators(
all_merge,
single_merge,
msg="Calling merge with a data length of 1 should not change the data "
"output.")
self.compare_accumulators(
expected,
all_merge,
msg="Calculated accumulators "
"did not match expected accumulator.")
def validate_accumulator_extract(self, combiner, data, expected):
"""Validate that the expected results of computing and extracting."""
acc = combiner.compute(data)
extracted_data = combiner.extract(acc)
self.assertAllCloseOrEqual(expected, extracted_data)
def validate_accumulator_extract_and_restore(self, combiner, data, expected):
"""Validate that the extract<->restore loop loses no data."""
acc = combiner.compute(data)
extracted_data = combiner.extract(acc)
restored_acc = combiner.restore(extracted_data)
self.assert_extracted_output_equal(combiner, acc, restored_acc)
self.assertAllCloseOrEqual(expected, combiner.extract(restored_acc))
def validate_accumulator_serialize_and_deserialize(self, combiner, data,
expected):
"""Validate that the serialize<->deserialize loop loses no data."""
acc = combiner.compute(data)
serialized_data = combiner.serialize(acc)
deserialized_data = combiner.deserialize(serialized_data)
self.compare_accumulators(acc, deserialized_data)
self.compare_accumulators(expected, deserialized_data)
def validate_accumulator_uniqueness(self, combiner, data):
"""Validate that every call to compute creates a unique accumulator."""
acc = combiner.compute(data)
acc2 = combiner.compute(data)
self.assertIsNot(acc, acc2)
self.compare_accumulators(acc, acc2)
| 6,169 | 36.852761 | 80 | py |
keras | keras-master/keras/layers/preprocessing/category_crossing_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.category_crossing."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
from keras.distribute.strategy_combinations import all_strategies
from keras.layers.preprocessing import category_crossing
from keras.layers.preprocessing import preprocessing_test_utils
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
# Investigate why crossing is not supported with TPU.
distribution=all_strategies,
mode=['eager', 'graph']))
class CategoryCrossingDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, distribution):
input_array_1 = np.array([['a', 'b'], ['c', 'd']])
input_array_2 = np.array([['e', 'f'], ['g', 'h']])
inp_dataset = tf.data.Dataset.from_tensor_slices(
{'input_1': input_array_1, 'input_2': input_array_2})
inp_dataset = batch_wrapper(inp_dataset, 2, distribution)
# pyformat: disable
expected_output = [[b'a_X_e', b'a_X_f', b'b_X_e', b'b_X_f'],
[b'c_X_g', b'c_X_h', b'd_X_g', b'd_X_h']]
tf.config.set_soft_device_placement(True)
with distribution.scope():
input_data_1 = keras.Input(shape=(2,), dtype=tf.string,
name='input_1')
input_data_2 = keras.Input(shape=(2,), dtype=tf.string,
name='input_2')
input_data = [input_data_1, input_data_2]
layer = category_crossing.CategoryCrossing()
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(inp_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == '__main__':
tf.test.main()
| 3,041 | 39.026316 | 108 | py |
keras | keras-master/keras/layers/preprocessing/text_vectorization_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
import tensorflow.compat.v2 as tf
import gc
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import backend
from keras import keras_parameterized
from keras import testing_utils
from keras.layers import convolutional
from keras.layers import core
from keras.layers import embeddings
from keras.layers.preprocessing import preprocessing_test_utils
from keras.layers.preprocessing import text_vectorization
from keras.utils import generic_utils
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name":
"test_simple_tokens_int_mode",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name":
"test_simple_tokens_int_mode_hard_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 6,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name":
"test_special_tokens_int_mode",
# Mask tokens in the vocab data should be ignored, and mapped to 0 in
# from the input data.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
[""], [""], [""], ["[UNK]"], ["[UNK]"], ["[UNK]"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], [""], ["wind"], ["[UNK]"], ["and"], [""],
["fire"], ["and"], ["[UNK]"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],
},
{
"testcase_name":
"test_documents_int_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind and"], ["fire fire"], ["and earth"],
["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.INT
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name":
"test_documents_1d_input_int_mode",
"vocab_data":
np.array([
"fire earth earth", "earth earth", "wind wind", "and wind and"
]),
"input_data":
np.array([["earth wind and"], ["fire fire"], ["and earth"],
["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.INT
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name":
"test_simple_tokens_binary_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": None,
"output_mode": text_vectorization.MULTI_HOT
},
"expected_output": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_binary_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.MULTI_HOT
},
"expected_output": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1],
[1, 1, 0, 0, 0]],
},
{
"testcase_name":
"test_simple_tokens_count_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": None,
"output_mode": text_vectorization.COUNT
},
"expected_output": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_count_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.COUNT
},
"expected_output": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 2],
[1, 1, 0, 0, 0]],
},
{
"testcase_name":
"test_tokens_idf_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": None,
"output_mode": text_vectorization.TF_IDF
},
"expected_output": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_idf_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.TF_IDF
},
"expected_output": [[0., 0.847298, 0.847298, 0., 0.],
[0., 0., 0., 1.098612, 0.],
[0., 0., 0., 0., 2.197225],
[0.972955, 0.847298, 0., 0., 0.]],
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output):
cls = text_vectorization.TextVectorization
if kwargs.get("output_mode") == text_vectorization.INT:
expected_output_dtype = tf.int64
else:
expected_output_dtype = tf.float32
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# TextVectorization), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=tf.string,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
def test_scalar_input_int_mode_no_len_limit(self):
vocab_data = [
"fire earth earth", "earth earth", "wind wind", "and wind and"
]
input_data = "earth wind and fire fire and earth michigan"
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1])
layer.set_vocabulary(["earth", "wind", "and", "fire"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1])
def test_scalar_input_int_mode_trim_to_len_limit(self):
vocab_data = [
"fire earth earth", "earth earth", "wind wind", "and wind and"
]
input_data = "earth wind and fire fire and earth michigan"
layer = text_vectorization.TextVectorization(output_sequence_length=3)
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4])
layer.set_vocabulary(["earth", "wind", "and", "fire"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4])
def test_scalar_input_int_pad_to_len_limit(self):
vocab_data = [
"fire earth earth", "earth earth", "wind wind", "and wind and"
]
input_data = "earth wind and fire fire and earth michigan"
layer = text_vectorization.TextVectorization(output_sequence_length=10)
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1, 0, 0])
layer.set_vocabulary(["earth", "wind", "and", "fire"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1, 0, 0])
def test_list_inputs_1d(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_tensor_inputs(self):
vocab_data = tf.constant(
["two two two", "two three three", "three four four five"])
input_data = tf.constant(["two three", "four five"])
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_list_inputs_2d(self):
vocab_data = [
["two two two"], ["two three three"], ["three four four five"]]
input_data = [["two three"], ["four five"]]
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_dataset_of_single_strings(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
vocab_ds = tf.data.Dataset.from_tensor_slices(vocab_data) # unbatched
input_ds = tf.data.Dataset.from_tensor_slices(input_data) # unbatched
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_ds)
out = input_ds.map(layer)
self.assertAllClose(list(out.as_numpy_iterator()), [[2, 3], [4, 5]])
def test_dataset_of_single_strings_with_output_sequence(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
vocab_ds = tf.data.Dataset.from_tensor_slices(vocab_data) # unbatched
input_ds = tf.data.Dataset.from_tensor_slices(input_data) # unbatched
layer = text_vectorization.TextVectorization(output_sequence_length=3)
layer.adapt(vocab_ds)
out = input_ds.map(layer)
self.assertAllClose(list(out.as_numpy_iterator()), [[2, 3, 0], [4, 5, 0]])
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0", "a", "b", "c", "d", "e", "a", "b", "c", "d", "f"],
"expected": [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]
},
{
"testcase_name": "2d",
"data": [["0", "a", "b", "c", "d"], ["e", "a", "b", "c", "d"], ["f"]],
"expected": [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 0, 0, 0, 0]]
},
{
"testcase_name":
"3d",
"data": [[["0", "a", "b"], ["c", "d"]], [["e", "a"], ["b", "c", "d"]],
[["f"]]],
"expected": [[[1, 2, 3], [4, 5, 0]], [[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]]]
},
)
def test_layer_dimensionality_handling(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None, standardize=None, split=None, pad_to_max_tokens=False)
vectorization.set_vocabulary(vocab)
output = vectorization(tf.ragged.constant(data))
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0 a b c d e a b c d f"],
"expected": [[1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]]
},
{
"testcase_name":
"3d",
"data": [[["0 a b"], ["c d"]], [["e a"], ["b c d"]], [["f"]]],
"expected": [[[1, 2, 3], [4, 5, 0]], [[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]]]
},
)
def test_layer_dimensionality_handling_with_split(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
pad_to_max_tokens=False)
vectorization.set_vocabulary(vocab)
output = vectorization(tf.ragged.constant(data, inner_shape=(1,)))
self.assertAllEqual(expected, output)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationPreprocessingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_summary_before_adapt(self):
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
pad_to_max_tokens=True,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=text_vectorization.TF_IDF)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# We are testing that model.summary() can be called without erroring out.
# (b/145726907)
model.summary()
def test_normalization(self):
input_array = np.array([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"]])
expected_output = np.array([[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"]])
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_normalization_ragged_inputs(self):
input_array = tf.ragged.constant([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}"]])
expected_output = [[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth"]]
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_normalization(self):
input_array = np.array([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"]])
expected_output = np.array(
[[b"earth", b"wind", b"and", b"fire"],
[b"fire|", b"an<>d", b"{earth}", b"michigan@%$"]])
custom_standardization = tf.strings.lower
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardization,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting(self):
input_array = np.array([["earth wind and fire"],
["\tfire\tand\nearth michigan "]])
expected_output = [[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"]]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_string_splitting(self):
input_array = np.array([["earth>wind>and fire"],
["\tfire>and\nearth>michigan"]])
expected_output = [[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"]]
custom_split = lambda x: tf.strings.split(x, sep=">")
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=custom_split,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value_ragged_inputs(self):
input_array = tf.ragged.constant([["earth", "wind", "and", "fire"],
["fire", "and", "earth"]])
# pyformat: disable
expected_output = [[b"earth", b"wind", b"and", b"fire",
b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire", b"and", b"earth",
b"fire and", b"and earth",
b"fire and earth"]]
# pyformat: enable
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value(self):
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[b"earth", b"wind", b"and", b"fire",
b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire", b"and", b"earth", b"michigan",
b"fire and", b"and earth", b"earth michigan",
b"fire and earth", b"and earth michigan"]]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multiple_ngram_values(self):
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire and", b"and earth", b"earth michigan",
b"fire and earth", b"and earth michigan"]]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=(2, 3),
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_multiple_preprocessing_steps(self):
input_array = np.array([["earth wInD and firE"],
["\tfire\tand\nearth!! michig@n "]])
expected_output = [[
b"earth",
b"wind",
b"and",
b"fire",
b"earth wind",
b"wind and",
b"and fire",
],
[
b"fire",
b"and",
b"earth",
b"michign",
b"fire and",
b"and earth",
b"earth michign",
]]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=text_vectorization.SPLIT_ON_WHITESPACE,
ngrams=2,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting_with_non_1d_array_fails(self):
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=None)
with self.assertRaisesRegex(RuntimeError,
".*tokenize strings, the innermost dime.*"):
_ = layer(input_data)
def test_string_splitting_with_non_1d_raggedarray_fails(self):
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.string)
layer = text_vectorization.TextVectorization(
vocabulary=["a"],
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=None)
with self.assertRaisesRegex(RuntimeError,
".*tokenize strings, the innermost dime.*"):
_ = layer(input_data)
def test_standardization_with_invalid_standardize_arg(self):
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._standardize = "unsupported"
with self.assertRaisesRegex(ValueError,
".*is not a supported standardization.*"):
_ = layer(input_data)
def test_splitting_with_invalid_split_arg(self):
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._split = "unsupported"
with self.assertRaisesRegex(ValueError, ".*is not a supported splitting.*"):
_ = layer(input_data)
def test_vocab_setting_via_init(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_init_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_setter(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_with_oov_via_setter(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
strategy = tf.distribute.OneDeviceStrategy("/cpu:0")
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationOutputTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_int_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x5 tensor with a padding value in the
# second example.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4, 1, 5], [5, 4, 2, 1, 0]]
# This test doesn't explicitly set an output shape, so the 2nd dimension
# should stay 'None'.
expected_output_shape = [None, None]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x6 tensor with a padding value in the
# second example, since output_sequence_length is set to 6.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4, 1, 5, 0], [5, 4, 2, 1, 0, 0]]
output_sequence_length = 6
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_strips(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_dynamically_strips_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
# Create an input array that has 1 element in the first example and 2 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array_2 = np.array([["wind"], ["fire and"]])
expected_output_2 = [[3, 0, 0], [5, 4, 0]]
output_dataset = model.predict(input_array_2)
self.assertAllEqual(expected_output_2, output_dataset)
def test_binary_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_binary_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_set_vocabulary_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.set_vocabulary(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_adapt_after_build(self):
vocab_data = np.array([
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
])
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_multiple_adapts(self):
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
adapt_data = ["earth", "earth", "earth", "earth", "wind", "wind", "wind"]
first_expected_output = [
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
]
second_adapt_data = [
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]
second_expected_output = [
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Test the first adapt
layer.adapt(adapt_data)
first_output = model.predict(input_array)
# Test the second adapt
layer.adapt(second_adapt_data)
# We need to recompile the model to retrace our call graph.
model.compile()
second_output = model.predict(input_array)
self.assertAllEqual(first_expected_output, first_output)
self.assertAllEqual(second_expected_output, second_output)
def test_bag_output_soft_maximum_set_state_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=False)
layer.build(input_data.shape)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_size_changed_pad_to_max_false_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=False)
layer.adapt(vocab_data)
_ = layer(input_data)
layer.set_vocabulary(vocab_data[:2])
with self.assertRaisesRegex(RuntimeError,
"vocabulary size cannot be changed"):
_ = layer(input_data)
def test_count_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0],
[2, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_tfidf_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0, 0],
[ 1, .4, 0, 0, .6, 0]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ 1, .4, 0, 0, .6]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_set_oov_weight(self):
vocab_data = ["[UNK]", "earth", "wind", "and", "fire"]
idf_weights = [.1, .4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ .2, .4, 0, 0, .6]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_accept_1D_input(self):
input_array = np.array(["earth wind and fire",
"fire and earth michigan"])
layer = text_vectorization.TextVectorization(
standardize=None, split=None, output_mode="int")
layer.adapt(input_array)
_ = layer(input_array)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.COUNT
}, {
"testcase_name": "count_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.COUNT
}, {
"testcase_name": "binary_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.MULTI_HOT
}, {
"testcase_name": "binary_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.MULTI_HOT
}, {
"testcase_name": "tfidf_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.TF_IDF
}, {
"testcase_name": "tfidf_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.TF_IDF
})
def test_end_to_end_bagged_modeling(self, output_mode, pad_to_max_tokens):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [.5, .25, .2, .125]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=output_mode,
pad_to_max_tokens=pad_to_max_tokens)
if output_mode == text_vectorization.TF_IDF:
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
else:
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
def test_end_to_end_vocab_modeling(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
output_sequence_length = 6
max_tokens = 5
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
embedded_data = embeddings.Embedding(
input_dim=max_tokens + 1, output_dim=32)(
int_data)
output_data = convolutional.Conv1D(
250, 3, padding="valid", activation="relu", strides=1)(
embedded_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationVocbularyTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest,
):
def test_get_vocabulary(self):
vocab = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(vocabulary=vocab)
self.assertAllEqual(layer.get_vocabulary(),
["", "[UNK]", "earth", "wind", "and", "fire"])
def test_get_vocabulary_adapt(self):
vocab = np.array([["earth earth earth earth wind wind wind and and fire"]])
layer = text_vectorization.TextVectorization()
layer.adapt(vocab)
self.assertAllEqual(layer.get_vocabulary(),
["", "[UNK]", "earth", "wind", "and", "fire"])
def test_get_vocabulary_no_special_tokens(self):
vocab = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(vocabulary=vocab)
self.assertAllEqual(
layer.get_vocabulary(include_special_tokens=False),
["earth", "wind", "and", "fire"])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationErrorTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=4,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
with self.assertRaisesRegex(ValueError,
"vocabulary larger than the maximum vocab.*"):
layer.set_vocabulary(vocab_data)
def test_setting_vocab_without_idf_weights_fails_in_tfidf_mode(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be set if output_mode is TF_IDF"):
layer.set_vocabulary(vocab_data)
def test_idf_weights_length_mismatch_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be the same length as vocab"):
layer.set_vocabulary(vocab_data, idf_weights)
def test_set_tfidf_in_non_tfidf_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3, 4]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT)
with self.assertRaisesRegex(ValueError,
"`idf_weights` should only be set if"):
layer.set_vocabulary(vocab_data, idf_weights)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, "max_tokens.*"):
_ = text_vectorization.TextVectorization(max_tokens=0)
def test_non_string_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "dtype of string.*"):
_ = text_vectorization.TextVectorization(dtype=tf.int64)
def test_unknown_standardize_arg_fails(self):
with self.assertRaisesRegex(ValueError,
"`standardize` arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(standardize="unsupported_value")
def test_unknown_split_arg_fails(self):
with self.assertRaisesRegex(ValueError, "`split` arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(split="unsupported_value")
def test_unknown_output_mode_arg_fails(self):
with self.assertRaisesRegex(ValueError,
"`output_mode` arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(output_mode="unsupported_value")
def test_unknown_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*unsupported_value"):
_ = text_vectorization.TextVectorization(ngrams="unsupported_value")
def test_float_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*2.9"):
_ = text_vectorization.TextVectorization(ngrams=2.9)
def test_float_tuple_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*(1.3, 2.9)"):
_ = text_vectorization.TextVectorization(ngrams=(1.3, 2.9))
def test_non_int_output_sequence_length_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "output_sequence_length.*2.0"):
_ = text_vectorization.TextVectorization(
output_mode="int", output_sequence_length=2.0)
def test_non_none_output_sequence_length_fails_if_output_type_not_int(self):
with self.assertRaisesRegex(ValueError,
"`output_sequence_length` must not be set"):
_ = text_vectorization.TextVectorization(
output_mode="count", output_sequence_length=2)
# Custom functions for the custom callable serialization test. Declared here
# to avoid multiple registrations from run_all_keras_modes().
@generic_utils.register_keras_serializable(package="Test")
def custom_standardize_fn(x):
return tf.strings.lower(x)
@generic_utils.register_keras_serializable(package="Test")
def custom_split_fn(x):
return tf.strings.split(x, sep=">")
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationSavingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def tearDown(self):
keras.backend.clear_session()
gc.collect()
super(TextVectorizationSavingTest, self).tearDown()
def test_saving(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf.__internal__.tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_when_nested(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
outer_input = keras.Input(shape=(None,), dtype=tf.string)
outer_output = model(outer_input)
outer_model = keras.Model(inputs=outer_input, outputs=outer_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
outer_model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf.__internal__.tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_with_tfidf(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ 1, .4, 0, 0, .6]]
vocab_data = ["earth", "wind", "and", "fire"]
# pylint: enable=bad-whitespace
# pyformat: enable
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllClose(new_output_dataset, expected_output)
def test_serialization_with_custom_callables(self):
input_array = np.array([["earth>wind>and Fire"],
["\tfire>And\nearth>michigan"]])
expected_output = [[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"]]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardize_fn,
split=custom_split_fn,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
serialized_model_data = model.get_config()
new_model = keras.Model.from_config(serialized_model_data)
new_output_dataset = new_model.predict(input_array)
self.assertAllEqual(expected_output, new_output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationE2ETest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_keras_vocab_trimming_example(self):
vocab_data = np.array([
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
])
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[1, 2, 1],
[3, 1, 0]]
# pyformat: enable
max_tokens = 3
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(input_data, int_data)
output = model.predict(input_array)
self.assertAllEqual(expected_output, output)
if __name__ == "__main__":
tf.test.main()
| 68,742 | 38.897272 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_encoding_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras category_encoding preprocessing layer."""
import tensorflow as tf
import time
import numpy as np
import keras
from keras.layers.preprocessing import category_encoding
tf.compat.v1.enable_v2_behavior()
class BenchmarkLayer(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, output_mode, batch_size, sequence_length,
max_tokens):
input_t = keras.Input(shape=(sequence_length,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
max_tokens=max_tokens, output_mode=output_mode)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_tensor_slices(
tf.random.uniform([batch_size * 10, sequence_length],
minval=0,
maxval=max_tokens - 1,
dtype=tf.int32))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "category_encoding|batch_%s|seq_length_%s|%s_max_tokens" % (
batch_size, sequence_length, max_tokens)
self.report_benchmark(iters=num_repeats, wall_time=avg_time, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 256, 2048]:
for sequence_length in [10, 1000]:
for num_tokens in [100, 1000, 20000]:
self.run_dataset_implementation(
output_mode="count",
batch_size=batch,
sequence_length=sequence_length,
max_tokens=num_tokens)
if __name__ == "__main__":
tf.test.main()
| 2,694 | 33.551282 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_hash_dense_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of categorical hash columns with dense inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import hashing
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
num_buckets = 10000
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.0)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string))
model.add(hashing.Hashing(num_buckets))
# FC implementation
fc = tf.feature_column.sequence_categorical_column_with_hash_bucket("data", num_buckets)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "hash|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,709 | 33.303797 | 98 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_vocab_file_dense_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns from files with dense inputs."""
import tensorflow as tf
import os
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
class BenchmarkLayer(tf.test.TestCase, fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def embedding_varlen(self, batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab = fc_bm.create_vocabulary(32768)
path = self._write_to_temp_file("tmp", vocab)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string))
model.add(string_lookup.StringLookup(vocabulary=path, mask_token=None))
# FC implementation
fc = tf.feature_column.categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {
"data": data.to_tensor(
default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(
default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list|dense|batch_%s" % batch
k_time, f_time = self.embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 3,255 | 34.010753 | 100 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/hashing_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras hashing preprocessing layer."""
import tensorflow as tf
import itertools
import random
import string
import time
import numpy as np
import keras
from keras.layers.preprocessing import hashing
tf.compat.v1.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
class BenchmarkLayer(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_generator(word_gen, tf.string,
tf.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = tf.strings.to_hash_bucket(i, num_buckets=2)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
return avg_time
def bm_layer_implementation(self, batch_size):
input_1 = keras.Input(shape=(None,), dtype=tf.string, name="word")
layer = hashing.Hashing(num_bins=2)
_ = layer(input_1)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_generator(word_gen, tf.string,
tf.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "hashing|batch_%s" % batch_size
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
tf.test.main()
| 3,381 | 31.209524 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_varlen_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns + indicator from lists with varying-length inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab_size = 32768
vocab = fc_bm.create_vocabulary(vocab_size)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(
keras.Input(
shape=(max_length,), name="data", ragged=True, dtype=tf.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
model.add(
category_encoding.CategoryEncoding(
num_tokens=vocab_size + 1, output_mode="count"))
# FC implementation
fc = tf.feature_column.indicator_column(
tf.feature_column.sequence_categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1))
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list_indicator|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,960 | 35.109756 | 111 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/index_lookup_forward_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras text vectorization preprocessing layer's adapt method."""
import tensorflow as tf
import os
import random
import string
import time
import numpy as np
import keras
from keras.layers.preprocessing import index_lookup
tf.compat.v1.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def tensor_gen(batch, num_elements):
data = []
for _ in range(batch):
batch_element = []
for _ in range(num_elements - 1):
tok = "".join(random.choice(string.ascii_letters) for i in range(2))
batch_element.append(tok)
batch_element.append("") # Explicitly test the empty string.
data.append(batch_element)
return tf.constant(data)
def get_vocab():
vocab = list(
set([a + b for a in string.ascii_letters for b in string.ascii_letters])) # pylint:disable=g-complex-comprehension
vocab.sort()
return vocab
# This class uses TestCase for get_temp_dir().
class BenchmarkLookup(tf.test.Benchmark):
"""Benchmark the index lookup layer's forward pass."""
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def run_numpy_implementation(self, data, vocab):
"""Test the python implementation."""
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="OOV",
dtype=tf.string)
out_t = layer(input_t)
model = keras.Model(input_t, out_t)
num_repeats = 5
starts = []
ends = []
_ = model(data)
for _ in range(num_repeats):
starts.append(time.time())
out = model(data)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time, out
def bm_adapt_implementation(self, num_elements, batch_size):
"""Test the KPL adapt implementation."""
vocab = get_vocab()
vocab_file = self._write_to_temp_file("vocab", vocab)
vocabulary_initializer = tf.lookup.TextFileInitializer(
filename=vocab_file,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER,
value_index_offset=2)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocabulary_initializer,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="OOV",
dtype=tf.string)
out_t = layer(input_t)
model = keras.Model(input_t, out_t)
num_repeats = 5
starts = []
ends = []
data = tensor_gen(batch_size, num_elements)
_ = model(data)
for _ in range(num_repeats):
starts.append(time.time())
_ = model(data)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
baseline, _ = self.run_numpy_implementation(data, vocab)
extras = {
"numpy implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
name = "index_lookup_forward|%s_elements|batch_%s" % (num_elements,
batch_size)
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for tensor_size in [100, 1000, 10000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(tensor_size, batch)
if __name__ == "__main__":
tf.test.main()
| 4,597 | 32.079137 | 121 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/embedding_dense_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of embedding column with dense inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
embedding_size = 32768
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, embedding_size - 1, dtype=int)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(None,), name="data", dtype=tf.int64))
model.add(keras.layers.Embedding(embedding_size, 256))
model.add(keras.layers.Lambda(lambda x: tf.reduce_mean(x, axis=-1)))
# FC implementation
fc = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
"data", num_buckets=embedding_size - 1),
dimension=256)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data.to_tensor(default_value=0)}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_tensor(default_value=0)}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "embedding|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,744 | 34.649351 | 98 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/weighted_embedding_varlen_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of weighted embedding column with varying-length inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
embedding_size = 32768
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, embedding_size - 1, dtype=int)
weight = tf.ones_like(data, dtype=tf.float32)
# Keras implementation
data_input = keras.Input(
shape=(None,), ragged=True, name="data", dtype=tf.int64)
weight_input = keras.Input(
shape=(None,), ragged=True, name="weight", dtype=tf.float32)
embedded_data = keras.layers.Embedding(embedding_size, 256)(data_input)
weighted_embedding = tf.multiply(
embedded_data, tf.expand_dims(weight_input, -1))
reduced_embedding = tf.reduce_sum(weighted_embedding, axis=1)
model = keras.Model([data_input, weight_input], reduced_embedding)
# FC implementation
fc = tf.feature_column.embedding_column(
tf.feature_column.weighted_categorical_column(
tf.feature_column.categorical_column_with_identity(
"data", num_buckets=embedding_size - 1),
weight_feature_key="weight"),
dimension=256)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data, "weight": weight}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse(), "weight": weight.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "weighted_embedding|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 3,187 | 36.505882 | 98 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/normalization_adapt_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras text vectorization preprocessing layer's adapt method."""
import tensorflow as tf
import time
import numpy as np
import keras
from keras.layers.preprocessing import normalization
tf.compat.v1.enable_v2_behavior()
def reduce_fn(state, values):
"""tf.data.Dataset-friendly implementation of mean and variance."""
k, n, ex, ex2 = state
# If this is the first iteration, we pick the first value to be 'k',
# which helps with precision - we assume that k is close to an average
# value and calculate mean and variance with respect to that.
k = tf.cond(tf.equal(n, 0), lambda: values[0], lambda: k)
sum_v = tf.reduce_sum(values, axis=0)
sum_v2 = tf.reduce_sum(tf.square(values), axis=0)
ones = tf.ones_like(values, dtype=tf.int32)
batch_size = tf.reduce_sum(ones, axis=0)
batch_size_f = tf.cast(batch_size, tf.float32)
ex = 0 + sum_v - tf.multiply(batch_size_f, k)
ex2 = 0 + sum_v2 + tf.multiply(
batch_size_f, (tf.square(k) -
tf.multiply(tf.multiply(2.0, k), sum_v)))
return (k, n + batch_size, ex, ex2)
class BenchmarkAdapt(tf.test.Benchmark):
"""Benchmark adapt."""
def run_dataset_implementation(self, num_elements, batch_size):
input_t = keras.Input(shape=(1,))
layer = normalization.Normalization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.range(num_elements)
ds = ds.map(
lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
k, n, ex, ex2 = ds.reduce((0.0, 0, 0.0, 0.0), reduce_fn)
mean = k.numpy() + ex.numpy() / n.numpy()
var = (ex2.numpy() - (ex.numpy() * ex.numpy()) / n.numpy()) / (
n.numpy() - 1)
layer.set_weights([mean, var])
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size):
"""Test the KPL adapt implementation."""
input_t = keras.Input(shape=(1,), dtype=tf.float32)
layer = normalization.Normalization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.range(num_elements)
ds = ds.map(
lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
layer.adapt(ds)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "normalization_adapt|%s_elements|batch_%s" % (num_elements,
batch_size)
baseline = self.run_dataset_implementation(num_elements, batch_size)
extras = {
"tf.data implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(vocab_size, batch)
if __name__ == "__main__":
tf.test.main()
| 4,157 | 33.081967 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_vocab_file_varlen_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns from files with varying-length inputs."""
import tensorflow as tf
import os
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
class BenchmarkLayer(tf.test.TestCase, fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def embedding_varlen(self, batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab = fc_bm.create_vocabulary(32768)
path = self._write_to_temp_file("tmp", vocab)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(
keras.Input(
shape=(max_length,), name="data", ragged=True, dtype=tf.string))
model.add(string_lookup.StringLookup(vocabulary=path, mask_token=None))
# FC implementation
fc = tf.feature_column.sequence_categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list|varlen|batch_%s" % batch
k_time, f_time = self.embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 3,146 | 34.761364 | 100 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras text vectorization preprocessing layer's adapt method."""
import tensorflow as tf
import collections
import itertools
import random
import string
import time
import numpy as np
import keras
from keras.layers.preprocessing import index_lookup
tf.compat.v1.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
def get_top_k(dataset, k):
"""Python implementation of vocabulary building using a defaultdict."""
counts = collections.defaultdict(int)
for tensor in dataset:
data = tensor.numpy()
for element in data:
counts[element] += 1
sorted_vocab = [
k for k, _ in sorted(
counts.items(), key=lambda item: item[1], reverse=True)
]
if len(sorted_vocab) > k:
sorted_vocab = sorted_vocab[:k]
return sorted_vocab
class BenchmarkAdapt(tf.test.Benchmark):
"""Benchmark adapt."""
def run_numpy_implementation(self, num_elements, batch_size, k):
"""Test the python implementation."""
ds = tf.data.Dataset.from_generator(word_gen, tf.string,
tf.TensorShape([]))
batched_ds = ds.take(num_elements).batch(batch_size)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=k,
num_oov_indices=0,
mask_token=None,
oov_token="OOV",
dtype=tf.string)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
starts.append(time.time())
vocab = get_top_k(batched_ds, k)
layer.set_vocabulary(vocab)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size, k):
"""Test the KPL adapt implementation."""
ds = tf.data.Dataset.from_generator(word_gen, tf.string,
tf.TensorShape([]))
batched_ds = ds.take(num_elements).batch(batch_size)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=k,
num_oov_indices=0,
mask_token=None,
oov_token="OOV",
dtype=tf.string)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
starts.append(time.time())
layer.adapt(batched_ds)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "index_lookup_adapt|%s_elements|vocab_size_%s|batch_%s" % (
num_elements, k, batch_size)
baseline = self.run_numpy_implementation(num_elements, batch_size, k)
extras = {
"numpy implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(vocab_size, batch, int(vocab_size / 10))
if __name__ == "__main__":
tf.test.main()
| 4,079 | 32.170732 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/discretization_adapt_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras discretization preprocessing layer's adapt method."""
import tensorflow as tf
import time
import numpy as np
import keras
from keras.layers.preprocessing import discretization
EPSILON = 0.1
tf.compat.v1.enable_v2_behavior()
def reduce_fn(state, values, epsilon=EPSILON):
"""tf.data.Dataset-friendly implementation of mean and variance."""
state_, = state
summary = discretization.summarize(values, epsilon)
if np.sum(state_[:, 0]) == 0:
return (summary,)
return (discretization.merge_summaries(state_, summary, epsilon),)
class BenchmarkAdapt(tf.test.Benchmark):
"""Benchmark adapt."""
def run_dataset_implementation(self, num_elements, batch_size):
input_t = keras.Input(shape=(1,))
layer = discretization.Discretization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.range(num_elements)
ds = ds.map(
lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
state = ds.reduce((np.zeros((1, 2)),), reduce_fn)
bins = discretization.get_bucket_boundaries(state, 100)
layer.set_weights([bins])
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size):
"""Test the KPL adapt implementation."""
input_t = keras.Input(shape=(1,), dtype=tf.float32)
layer = discretization.Discretization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.range(num_elements)
ds = ds.map(
lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
layer.adapt(ds)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "discretization_adapt|%s_elements|batch_%s" % (num_elements,
batch_size)
baseline = self.run_dataset_implementation(num_elements, batch_size)
extras = {
"tf.data implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [64 * 2048]:
self.bm_adapt_implementation(vocab_size, batch)
if __name__ == "__main__":
tf.test.main()
| 3,550 | 30.990991 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/embedding_varlen_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of embedding column with varying-length inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
embedding_size = 32768
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, embedding_size - 1, dtype=int)
# Keras implementation
model = keras.Sequential()
model.add(
keras.Input(shape=(None,), ragged=True, name="data", dtype=tf.int64))
model.add(keras.layers.Embedding(embedding_size, 256))
model.add(keras.layers.Lambda(lambda x: tf.reduce_mean(x, axis=-1)))
# FC implementation
fc = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
"data", num_buckets=embedding_size - 1),
dimension=256)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "embedding|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,732 | 34.038462 | 98 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras image preprocessing layer."""
import tensorflow as tf
import functools
import time
import numpy as np
import keras
from keras.layers.preprocessing import image_preprocessing
tf.compat.v1.enable_v2_behavior()
LOWER = .2
UPPER = .4
BATCH_SIZE = 32
def rotate(inputs):
"""rotate image."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[1], tf.float32)
img_wd = tf.cast(inputs_shape[2], tf.float32)
min_angle = LOWER * 2. * np.pi
max_angle = UPPER * 2. * np.pi
angles = tf.random.uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle)
return image_preprocessing.transform(
inputs, image_preprocessing.get_rotation_matrix(angles, img_hd, img_wd))
def zoom(inputs):
"""zoom image."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[1], tf.float32)
img_wd = tf.cast(inputs_shape[2], tf.float32)
height_zoom = tf.random.uniform(
shape=[batch_size, 1], minval=1. + LOWER, maxval=1. + UPPER)
width_zoom = tf.random.uniform(
shape=[batch_size, 1], minval=1. + LOWER, maxval=1. + UPPER)
zooms = tf.cast(
tf.concat([width_zoom, height_zoom], axis=1), dtype=tf.float32)
return image_preprocessing.transform(
inputs, image_preprocessing.get_zoom_matrix(zooms, img_hd, img_wd))
def image_augmentation(inputs, batch_size):
"""image augmentation."""
img = inputs
img = tf.image.resize(img, size=[224, 224])
img = tf.image.random_crop(img, size=[batch_size, 224, 224, 3])
img = rotate(img)
img = zoom(img)
return img
class BenchmarkLayer(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_tensor_slices(
np.random.random((batch_size, 256, 256, 3)))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
img_augmentation = functools.partial(
image_augmentation, batch_size=batch_size)
ds = ds.map(img_augmentation)
starts.append(time.time())
count = 0
# Benchmarked code begins here.
for i in ds:
_ = i
count += 1
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / count
return avg_time
def bm_layer_implementation(self, batch_size):
with tf.device("/gpu:0"):
img = keras.Input(shape=(256, 256, 3), dtype=tf.float32)
preprocessor = keras.Sequential([
image_preprocessing.Resizing(224, 224),
image_preprocessing.RandomCrop(height=224, width=224),
image_preprocessing.RandomRotation(factor=(.2, .4)),
image_preprocessing.RandomFlip(mode="horizontal"),
image_preprocessing.RandomZoom(.2, .2)
])
_ = preprocessor(img)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_tensor_slices(
np.random.random((batch_size, 256, 256, 3)))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
starts.append(time.time())
count = 0
# Benchmarked code begins here.
for i in ds:
_ = preprocessor(i)
count += 1
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / count
name = "image_preprocessing|batch_%s" % batch_size
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
tf.test.main()
| 4,860 | 31.406667 | 80 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_hash_varlen_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of categorical hash columns with varying-length inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import hashing
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
num_buckets = 10000
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.0)
# Keras implementation
model = keras.Sequential()
model.add(
keras.Input(
shape=(max_length,), name="data", ragged=True, dtype=tf.string))
model.add(hashing.Hashing(num_buckets))
# FC implementation
fc = tf.feature_column.categorical_column_with_hash_bucket("data", num_buckets)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "hash|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,613 | 32.948052 | 98 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_vocab_list_dense_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns from lists with dense inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
# FC implementation
fc = tf.feature_column.categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,772 | 34.551282 | 98 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/category_vocab_list_varlen_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns from lists with varying-length inputs."""
import tensorflow as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(
keras.Input(
shape=(max_length,), name="data", ragged=True, dtype=tf.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
# FC implementation
fc = tf.feature_column.sequence_categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 2,694 | 34.460526 | 99 | py |
keras | keras-master/keras/layers/preprocessing/benchmarks/feature_column_benchmark.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark suite for KPL and feature column implementations."""
import tensorflow as tf
import itertools
import math
import random
import string
import time
import numpy as np
import keras
# This is required as of 3/2021 because otherwise we drop into graph mode.
tf.compat.v1.enable_v2_behavior()
class LayerBenchmark(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def report(self, name, keras_time, fc_time, iters):
"""Calculate and report benchmark statistics."""
extras = {
"fc_avg_time": fc_time,
"fc_vs_keras_sec": fc_time - keras_time,
"fc_vs_keras_pct": ((fc_time - keras_time) / fc_time) * 100,
"keras_faster_ratio": fc_time / keras_time
}
self.report_benchmark(
iters=iters, wall_time=keras_time, extras=extras, name=name)
class StepTimingCallback(keras.callbacks.Callback):
"""A callback that times non-warmup steps of a Keras predict call."""
def __init__(self):
self.t0 = None
self.steps = 0
def on_predict_batch_begin(self, batch_index, _):
if batch_index == 2:
self.t0 = time.time()
elif batch_index > 2:
self.steps += 1
def on_predict_end(self, _):
self.tn = time.time()
self.t_avg = (self.tn - self.t0) / self.steps
def create_data(length, num_entries, max_value, dtype):
"""Create a ragged tensor with random data entries."""
lengths = (np.random.random(size=num_entries) * length).astype(int)
total_length = np.sum(lengths)
values = (np.random.random(size=total_length) * max_value).astype(dtype)
return tf.RaggedTensor.from_row_lengths(values, lengths)
def create_string_data(length,
num_entries,
vocabulary,
pct_oov,
oov_string="__OOV__"):
"""Create a ragged tensor with random data entries."""
lengths = (np.random.random(size=num_entries) * length).astype(int)
total_length = np.sum(lengths)
num_oovs = int(pct_oov * total_length)
values = []
for _ in range(total_length):
values.append(random.choice(vocabulary))
if pct_oov > 0:
oov_cadence = int(total_length / num_oovs)
idx = 0
for _ in range(num_oovs):
if idx < total_length:
values[idx] = oov_string
idx += oov_cadence
return tf.RaggedTensor.from_row_lengths(values, lengths)
def create_vocabulary(vocab_size):
base = len(string.ascii_letters)
n = math.ceil(math.log(vocab_size, base))
vocab = []
for i in range(1, n + 1):
for item in itertools.product(string.ascii_letters, repeat=i):
if len(vocab) >= vocab_size:
break
vocab.append("".join(item))
return vocab
def run_keras(data, model, batch_size, num_runs, steps_per_repeat=100):
"""Benchmark a Keras model."""
ds = tf.data.Dataset.from_tensor_slices(data).repeat().prefetch(
tf.data.AUTOTUNE).batch(batch_size).cache()
steps = 0
times = []
for _ in range(num_runs):
steps += steps_per_repeat
timer = StepTimingCallback()
# Benchmarked code begins here.
model.predict(ds, steps=steps, callbacks=[timer])
# Benchmarked code ends here.
times.append(timer.t_avg)
avg_time = np.mean(times)
return avg_time
def run_fc(data, fc_fn, batch_size, num_runs, steps_per_repeat=100):
"""Benchmark a Feature Column."""
ds = tf.data.Dataset.from_tensor_slices(data).repeat().prefetch(
tf.data.AUTOTUNE).batch(batch_size).cache()
# Trace the fc_fn
ds_iter = ds.__iter__()
fc_fn(next(ds_iter))
fc_starts = []
fc_ends = []
for _ in range(num_runs):
fc_starts.append(time.time())
# Benchmarked code begins here.
for _ in range(steps_per_repeat):
_ = fc_fn(next(ds_iter))
# Benchmarked code ends here.
fc_ends.append(time.time())
avg_per_step_time = (np.array(fc_ends) -
np.array(fc_starts)) / steps_per_repeat
avg_time = np.mean(avg_per_step_time)
return avg_time
| 4,620 | 30.222973 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.