python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ChooseFastestBranchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ChooseFastestBranchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_ds(size):
dataset = dataset_ops.Dataset.range(size)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
return optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
dataset, [branch_0, branch_1],
ratio_numerator=10)
for size in [100, 1000]:
self.run_core_tests(lambda: build_ds(size), size // 10) # pylint: disable=cell-var-from-loop
def testWithCapture(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, 10)
def testWithPrefetch(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, 10)
def testWithMoreOutputThanInput(self):
def build_ds():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=100)
self.run_core_tests(build_ds, 1000)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_branch_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TextLineDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TextLineDatasetSerializationTest(
reader_dataset_ops_test_base.TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return core_readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
num_outputs)
# pylint: enable=cell-var-from-loop
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MatchingFilesDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.platform import test
class MatchingFilesDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_patterns):
return matching_files.MatchingFilesDataset(test_patterns)
def testMatchingFilesCore(self):
tmp_dir = tempfile.mkdtemp()
width = 16
depth = 8
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
if not os.path.exists(new_base):
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
num_outputs = width * len(patterns)
self.run_core_tests(lambda: self._build_iterator_graph(patterns),
num_outputs)
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/matching_files_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapAndBatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MapAndBatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testNumParallelBatches(self):
range_size = 11
num_repeats = 2
batch_size = 5
total_outputs = range_size * num_repeats
num_outputs_drop_remainder = total_outputs // batch_size
num_outputs_keep_remainder = int(math.ceil(total_outputs / batch_size))
num_parallel_batches = 2
def build_ds(range_start, drop_remainder=False):
def _map_fn(x):
return math_ops.square(x)
return dataset_ops.Dataset.range(
range_start, range_start + range_size).repeat(num_repeats).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_batches=num_parallel_batches,
drop_remainder=drop_remainder))
self.run_core_tests(lambda: build_ds(10), num_outputs_keep_remainder)
self.run_core_tests(lambda: build_ds(10, True), num_outputs_drop_remainder)
def testNumParallelCalls(self):
range_size = 11
num_repeats = 2
batch_size = 5
total_outputs = range_size * num_repeats
num_outputs_drop_remainder = total_outputs // batch_size
num_outputs_keep_remainder = int(math.ceil(total_outputs / batch_size))
num_parallel_calls = 7
def build_ds(range_start, drop_remainder=False):
def _map_fn(x):
return math_ops.square(x)
return dataset_ops.Dataset.range(
range_start, range_start + range_size).repeat(num_repeats).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
drop_remainder=drop_remainder))
self.run_core_tests(lambda: build_ds(10), num_outputs_keep_remainder)
self.run_core_tests(lambda: build_ds(10, True), num_outputs_drop_remainder)
def testSparse(self):
def build_dataset():
def map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
return dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(map_fn, 5))
self.run_core_tests(build_dataset, 2)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/map_and_batch_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SampleFromDatasets serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class SampleFromDatasetsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, probs, num_samples):
dataset = interleave_ops.sample_from_datasets(
[
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(len(probs))
],
probs,
seed=1813)
return dataset.take(num_samples)
def testSerializationCore(self):
self.run_core_tests(lambda: self._build_dataset([0.5, 0.5], 100), 100)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/sample_from_datasets_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TFRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TFRecordDatasetSerializationTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0),
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/tf_record_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the CacheDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class CacheDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def setUp(self):
self.range_size = 10
self.num_repeats = 3
self.num_outputs = self.range_size * self.num_repeats
self.cache_file_prefix = 'test'
def make_dataset_fn(self, is_memory):
if is_memory:
filename = ''
else:
filename = os.path.join(self.get_temp_dir(), self.cache_file_prefix)
def ds_fn():
return dataset_ops.Dataset.range(self.range_size).cache(filename).repeat(
self.num_repeats)
return ds_fn
def expected_outputs(self):
return list(range(self.range_size)) * self.num_repeats
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointBeforeOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 5 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointBeforeOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 8 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 8, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(8))
outputs = outputs[:5]
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointAfterOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 15 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointAfterOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 18 entries from iterator but save checkpoint after producing 15.
outputs = self.gen_outputs(
ds_fn, [15], 18, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(8)))
outputs = list(range(10)) + list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointBeforeOneEpochButRunCompleteEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 13 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 13, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(3)))
# Since we ran for more than one epoch, the cache was completely written.
# The ckpt was saved when the iterator was in cache-write mode. Test that
# the iterator falls back to read mode after restoring if the cache has
# been completely written.
outputs = list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointUnusedWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Checkpoint before get_next is called even once.
outputs = self.gen_outputs(ds_fn, [], 0, verify_exhausted=False)
self.assertSequenceEqual(outputs, [])
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, ckpt_saved=True, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointUnusedMidwayWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint, then produce no elements and checkpoint.
outputs.extend(
self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce rest of the elements.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testUnusedCheckpointError(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and save ckpt.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
if is_memory:
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, self.expected_outputs())
else:
# Since the complete cache has not been written, a new iterator which does
# not restore the checkpoint will throw an error since there is a partial
# cache shard.
with self.assertRaises(errors.AlreadyExistsError):
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testIgnoreCheckpointIfCacheWritten(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 15 elements and save ckpt. This will write the complete cache.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Build the iterator again but do not restore from ckpt. Since the cache
# has already been written we should be able to use it.
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/cache_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the FlatMapDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class FlatMapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
# Complicated way of saying range(start, start+25).
def build_ds(start):
def map_fn(x):
return dataset_ops.Dataset.range(x, x + 5)
return dataset_ops.Dataset.range(start, start + 5 * 5, 5).flat_map(map_fn)
self.run_core_tests(lambda: build_ds(0), 25)
def testMapThenFlatMap(self):
def build_ds():
def flat_map_fn(_):
def map_fn(y):
return 10 * math_ops.cast(y, dtypes.int32)
return dataset_ops.Dataset.range(100).map(map_fn)
return dataset_ops.Dataset.range(5).flat_map(flat_map_fn)
self.run_core_tests(build_ds, 500)
def testCaptureDefunInMapFn(self):
def build_ds():
def map_fn(x):
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.from_tensor_slices([defun_fn(x)])
return dataset_ops.Dataset.range(100).flat_map(map_fn)
self.run_core_tests(build_ds, 100)
def testDisallowVariableCapture(self):
def build_ds():
test_var = variable_scope.get_variable(
name="test_var", shape=(), use_resource=True)
return dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensor_slices([test_var]))
self.verify_error_on_save(build_ds, 5, errors.FailedPreconditionError)
def testDisallowCapturingStatefulOps(self):
def build_ds():
def flat_map_fn(_):
def map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(map_fn)
return dataset_ops.Dataset.range(5).flat_map(flat_map_fn)
self.verify_error_on_save(build_ds, 500, errors.FailedPreconditionError)
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_ds():
return dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
self.run_core_tests(_build_ds, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/flat_map_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the _RebatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class RebatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_dataset(num_elements, batch_size):
return distribute._RebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
4 * batch_size, drop_remainder=True),
num_replicas=4)
self.run_core_tests(lambda: build_dataset(200, 10), 20)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ParallelInterleaveDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class ParallelInterleaveDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self.input_values = np.array([4, 5, 6], dtype=np.int64)
self.num_repeats = 2
self.num_outputs = np.sum(self.input_values) * 2
def _build_ds(self, cycle_length, block_length, sloppy=False):
return (dataset_ops.Dataset.from_tensor_slices(
self.input_values).repeat(self.num_repeats).apply(
interleave_ops.parallel_interleave(
lambda x: dataset_ops.Dataset.range(10 * x, 11 * x),
cycle_length, block_length, sloppy)))
def testSerializationCore(self):
# cycle_length > 1, block_length > 1
cycle_length = 2
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
# cycle_length = 1
cycle_length = 1
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
# block_length = 1
cycle_length = 2
block_length = 1
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
def testSerializationWithSloppy(self):
break_points = self.gen_break_points(self.num_outputs, 10)
expected_outputs = np.repeat(
np.concatenate([np.arange(10 * x, 11 * x) for x in self.input_values]),
self.num_repeats).tolist()
def run_test(cycle_length, block_length):
actual = self.gen_outputs(
lambda: self._build_ds(cycle_length, block_length, True),
break_points, self.num_outputs)
self.assertSequenceEqual(sorted(actual), expected_outputs)
# cycle_length > 1, block_length > 1
run_test(2, 3)
# cycle_length = 1
run_test(1, 3)
# block_length = 1
run_test(2, 1)
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, 1))
self.run_core_tests(_build_dataset, 20)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/parallel_interleave_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for experimental iterator_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import model_fn
@test_util.run_v1_only('b/123904664')
class CheckpointInputPipelineHookTest(test.TestCase):
@staticmethod
def _model_fn(features, labels, mode, config):
del labels
del mode
del config
global_step = training_util.get_or_create_global_step()
update_global_step_op = global_step.assign_add(1)
latest_feature = variables.VariableV1(
0, name='latest_feature', dtype=dtypes.int64)
store_latest_feature_op = latest_feature.assign(features)
ops.add_to_collection('my_vars', global_step)
ops.add_to_collection('my_vars', latest_feature)
return model_fn.EstimatorSpec(
mode='train',
train_op=control_flow_ops.group(
[update_global_step_op, store_latest_feature_op]),
loss=constant_op.constant(2.0))
def _read_vars(self, model_dir):
"""Returns (global_step, latest_feature)."""
with ops.Graph().as_default() as g:
ckpt_path = checkpoint_management.latest_checkpoint(model_dir)
meta_filename = ckpt_path + '.meta'
saver_lib.import_meta_graph(meta_filename)
saver = saver_lib.Saver()
with self.session(graph=g) as sess:
saver.restore(sess, ckpt_path)
return sess.run(ops.get_collection('my_vars'))
def _build_iterator_saver_hook(self, est):
return iterator_ops.CheckpointInputPipelineHook(est)
def testReturnDatasetFromInputFn(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
def testBuildIteratorInInputFn(self):
def _input_fn():
ds = dataset_ops.Dataset.range(10)
iterator = ds.make_one_shot_iterator()
return iterator.get_next()
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
def testDoNotRestore(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
# Hook not provided, input pipeline was not restored.
est.train(_input_fn, steps=2)
self.assertSequenceEqual(self._read_vars(est.model_dir), (6, 1))
def testRaiseErrorIfNoIterator(self):
def _input_fn():
return constant_op.constant(1, dtype=dtypes.int64)
est = estimator.Estimator(model_fn=self._model_fn)
with self.assertRaises(ValueError):
est.train(
_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/checkpoint_input_pipeline_hook_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the FixedLengthRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class FixedLengthRecordDatasetSerializationTest(
reader_dataset_ops_test_base.FixedLengthRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return core_readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/fixed_length_record_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the InterleaveDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class InterleaveDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_iterator_graph(self, input_values, cycle_length, block_length,
num_parallel_calls):
repeat_count = 2
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
repeat_count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
@parameterized.named_parameters(
("1", 2, 3, None),
("2", 2, 3, 1),
("3", 2, 3, 2),
("4", 1, 3, None),
("5", 1, 3, 1),
("6", 2, 1, None),
("7", 2, 1, 1),
("8", 2, 1, 2),
)
def testSerializationCore(self, cycle_length, block_length,
num_parallel_calls):
input_values = np.array([4, 5, 6], dtype=np.int64)
num_outputs = np.sum(input_values) * 2
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(
input_values, cycle_length, block_length, num_parallel_calls),
num_outputs)
# pylint: enable=g-long-lambda
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
self.run_core_tests(_build_dataset, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/kernel_tests/serialization/interleave_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.CsvDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import tempfile
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class CsvDatasetBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.CsvDataset`."""
FLOAT_VAL = '1.23456E12'
STR_VAL = string.ascii_letters * 10
def _set_up(self, str_val):
# Since this isn't test.TestCase, have to manually create a test dir
gfile.MakeDirs(googletest.GetTempDir())
self._temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._num_cols = [4, 64, 256]
self._num_per_iter = 5000
self._filenames = []
for n in self._num_cols:
fn = os.path.join(self._temp_dir, 'file%d.csv' % n)
with open(fn, 'wb') as f:
# Just write 100 rows and use `repeat`... Assumes the cost
# of creating an iterator is not significant
row = ','.join([str_val for _ in range(n)])
f.write('\n'.join([row for _ in range(100)]))
self._filenames.append(fn)
def _tear_down(self):
gfile.DeleteRecursively(self._temp_dir)
def _run_benchmark(self, dataset, num_cols, prefix):
dataset = dataset.skip(self._num_per_iter - 1)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
deltas = []
for _ in range(10):
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
start = time.time()
# NOTE: This depends on the underlying implementation of skip, to have
# the net effect of calling `GetNext` num_per_iter times on the
# input dataset. We do it this way (instead of a python for loop, or
# batching N inputs in one iter) so that the overhead from session.run
# or batch doesn't dominate. If we eventually optimize skip, this has
# to change.
sess.run(next_element)
end = time.time()
deltas.append(end - start)
# Median wall time per CSV record read and decoded
median_wall_time = np.median(deltas) / self._num_per_iter
self.report_benchmark(
iters=self._num_per_iter,
wall_time=median_wall_time,
name='%s_with_cols_%d' % (prefix, num_cols))
def benchmark_map_with_floats(self):
self._set_up(self.FLOAT_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [[0.0]] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_float_map_decode_csv')
self._tear_down()
def benchmark_map_with_strings(self):
self._set_up(self.STR_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [['']] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_strings_map_decode_csv')
self._tear_down()
def benchmark_csv_dataset_with_floats(self):
self._set_up(self.FLOAT_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [[0.0]] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_float_fused_dataset')
self._tear_down()
def benchmark_csv_dataset_with_strings(self):
self._set_up(self.STR_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [['']] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_strings_fused_dataset')
self._tear_down()
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/csv_dataset_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.rejection_resample()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
def _time_resampling(data_np, target_dist, init_dist, num_to_sample): # pylint: disable=missing-docstring
dataset = dataset_ops.Dataset.from_tensor_slices(data_np).repeat()
# Reshape distribution via rejection sampling.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist,
seed=142))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
start_time = time.time()
for _ in xrange(num_to_sample):
sess.run(get_next)
end_time = time.time()
return end_time - start_time
class RejectionResampleBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.rejection_resample()`."""
def benchmark_resample_performance(self):
init_dist = [0.25, 0.25, 0.25, 0.25]
target_dist = [0.0, 0.0, 0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test a dirac-delta target distribution
num_samples = 1000
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
resample_time = _time_resampling(
data_np, target_dist, init_dist, num_to_sample=1000)
self.report_benchmark(iters=1000, wall_time=resample_time, name="resample")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/rejection_resample_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for autotuning performance knobs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class AutotuneBenchmark(test.Benchmark):
"""Benchmarks for autotuning performance knobs."""
def benchmark_map(self):
a = self._benchmark_map(autotune=False)
b = self._benchmark_map(autotune=True)
c = self._benchmark_map(
autotune=True, algorithm=dataset_ops.AutotuneAlgorithm.GRADIENT_DESCENT)
print("HillClimb vs Default speedup: %f" % (a / b))
print("GradientDescent vs Default speedup: %f" % (a / c))
def _benchmark_map(self,
autotune,
algorithm=dataset_ops.AutotuneAlgorithm.HILL_CLIMB):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
if autotune:
options.experimental_optimization.autotune_algorithm = algorithm.value
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="map" + (("_autotune_%s" % algorithm.name) if autotune else ""))
return np.median(deltas)
def benchmark_map_and_batch(self):
a = self._benchmark_map_and_batch(autotune=False)
b = self._benchmark_map_and_batch(autotune=True)
c = self._benchmark_map_and_batch(
autotune=True, algorithm=dataset_ops.AutotuneAlgorithm.GRADIENT_DESCENT)
print("HillClimb vs Default speedup: %f" % (a / b))
print("GradientDescent vs Default speedup: %f" % (a / c))
def _benchmark_map_and_batch(
self, autotune, algorithm=dataset_ops.AutotuneAlgorithm.HILL_CLIMB):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
options.experimental_optimization.autotune = autotune
if autotune:
options.experimental_optimization.autotune_algorithm = algorithm.value
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=1000,
wall_time=np.median(deltas),
name="map_and_batch" +
(("_autotune_%s" % algorithm.name) if autotune else ""))
return np.median(deltas)
def benchmark_interleave(self):
a = self._benchmark_interleave(autotune=False)
b = self._benchmark_interleave(autotune=True)
c = self._benchmark_interleave(
autotune=True, algorithm=dataset_ops.AutotuneAlgorithm.GRADIENT_DESCENT)
print("HillClimb vs Default speedup: %f" % (a / b))
print("GradientDescent vs Default speedup: %f" % (a / c))
def _benchmark_interleave(self,
autotune,
algorithm=dataset_ops.AutotuneAlgorithm.HILL_CLIMB):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
cycle_length=10,
num_parallel_calls=dataset_ops.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
if autotune:
options.experimental_optimization.autotune_algorithm = algorithm.value
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="interleave" +
(("_autotune_%s" % algorithm.name) if autotune else ""))
return np.median(deltas)
def benchmark_map_and_interleave(self):
a = self._benchmark_map_and_interleave(autotune=False)
b = self._benchmark_map_and_interleave(autotune=True)
c = self._benchmark_map_and_interleave(
autotune=True, algorithm=dataset_ops.AutotuneAlgorithm.GRADIENT_DESCENT)
print("HillClimb vs Default speedup: %f" % (a / b))
print("GradientDescent vs Default speedup: %f" % (a / c))
def _benchmark_map_and_interleave(
self, autotune, algorithm=dataset_ops.AutotuneAlgorithm.HILL_CLIMB):
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset_a = dataset_ops.Dataset.from_tensors(a).repeat()
dataset_b = dataset_ops.Dataset.from_tensors(b).repeat()
dataset_c = dataset_ops.Dataset.from_tensors(c).repeat()
def f1(x, y):
return math_ops.matmul(x, y)
def f2(a, b):
x, y = b
return a, math_ops.matmul(x, y)
dataset = dataset_a
dataset = dataset.map(f1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_b))
dataset = dataset.map(f2, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_c))
dataset = dataset.map(f2, num_parallel_calls=dataset_ops.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
if autotune:
options.experimental_optimization.autotune_algorithm = algorithm.value
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next)
for _ in range(10000):
start = time.time()
sess.run(get_next)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="map_and_interleave" +
(("_autotune_%s" % algorithm.name) if autotune else ""))
return np.median(deltas)
def benchmark_map_batch_and_interleave(self):
a = self._benchmark_map_batch_and_interleave(autotune=False)
b = self._benchmark_map_batch_and_interleave(autotune=True)
c = self._benchmark_map_batch_and_interleave(
autotune=True, algorithm=dataset_ops.AutotuneAlgorithm.GRADIENT_DESCENT)
print("HillClimb vs Default speedup: %f" % (a / b))
print("GradientDescent vs Default speedup: %f" % (a / c))
def _benchmark_map_batch_and_interleave(
self, autotune, algorithm=dataset_ops.AutotuneAlgorithm.HILL_CLIMB):
batch_size = 16
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset_a = dataset_ops.Dataset.from_tensors(a).repeat()
dataset_b = dataset_ops.Dataset.from_tensors(b).repeat()
dataset_c = dataset_ops.Dataset.from_tensors(c).repeat()
dataset = dataset_a
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_b))
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset_c = dataset_c.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset_c = dataset_c.batch(batch_size=batch_size)
dataset = dataset_ops.Dataset.zip((dataset, dataset_c))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
options.experimental_optimization.autotune = autotune
if autotune:
options.experimental_optimization.autotune_algorithm = algorithm.value
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next)
for _ in range(1000):
start = time.time()
sess.run(get_next)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=1000,
wall_time=np.median(deltas),
name="map_batch_and_interleave" +
(("_autotune_%s" % algorithm.name) if autotune else ""))
return np.median(deltas)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/autotune_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.snapshot()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.client import session
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors_impl as errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SnapshotDatasetBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.experimental.snapshot()`."""
def _makeSnapshotDirectory(self):
tmp_dir = test.get_temp_dir()
tmp_dir = os.path.join(tmp_dir, "snapshot")
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
return tmp_dir
def _createSimpleDataset(self, num_elems, tmp_dir=None,
compression=snapshot.COMPRESSION_NONE):
if not tmp_dir:
tmp_dir = self._makeSnapshotDirectory()
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(
lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))
dataset = dataset.repeat(num_elems)
dataset = dataset.apply(snapshot.snapshot(tmp_dir, compression=compression))
return dataset
def _consumeDataset(self, dataset, num_elems):
dataset = dataset.skip(num_elems)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
try:
sess.run(next_element)
except errors.OutOfRangeError:
pass
def benchmarkWriteSnapshotGzipCompression(self):
num_elems = 500000
dataset = self._createSimpleDataset(
num_elems, compression=snapshot.COMPRESSION_GZIP)
self.run_and_report_benchmark(dataset, num_elems, "write_gzip",
warmup=False, iters=1)
def benchmarkWriteSnapshotSimple(self):
num_elems = 500000
dataset = self._createSimpleDataset(num_elems)
# We only run one iteration here because running multiple iterations will
# cause the later iterations to simply read from the already written
# snapshot rather than write a new one.
self.run_and_report_benchmark(dataset, num_elems, "write_simple",
warmup=False, iters=1)
def benchmarkPassthroughSnapshotSimple(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(num_elems, tmp_dir)
# Consume only 1 element, thus making sure we don't finalize.
self._consumeDataset(dataset, 1)
self.run_and_report_benchmark(dataset, num_elems, "passthrough_simple")
def benchmarkReadSnapshotSimple(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(num_elems, tmp_dir)
# consume all the elements to let snapshot write things to disk
self._consumeDataset(dataset, num_elems)
self.run_and_report_benchmark(dataset, num_elems, "read_simple")
def benchmarkReadSnapshotGzipCompression(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elems, tmp_dir, compression=snapshot.COMPRESSION_GZIP)
self._consumeDataset(dataset, num_elems)
self.run_and_report_benchmark(dataset, num_elems, "read_gzip")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/snapshot_dataset_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for static optimizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class OptimizationBenchmark(test.Benchmark):
"""Benchmarks for static optimizations."""
def benchmark_map_fusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_map_fusion(chain_length, False)
self._benchmark_map_fusion(chain_length, True)
def _benchmark_map_fusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "noopt"
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="map_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
def benchmark_map_and_filter_fusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_map_and_filter_fusion(chain_length, False)
self._benchmark_map_and_filter_fusion(chain_length, True)
def _benchmark_map_and_filter_fusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x + 5).filter(
lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "noopt"
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="map_and_filter_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
# This benchmark compares the performance of pipeline with multiple chained
# filter with and without filter fusion.
def benchmark_filter_fusion(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_filter_fusion(chain_length, False)
self._benchmark_filter_fusion(chain_length, True)
def _benchmark_filter_fusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(5).repeat(None)
for _ in range(chain_length):
dataset = dataset.filter(lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "no-opt"
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="chain_length_{}_{}".format(opt_mark, chain_length))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/optimize_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for the `MapVectorization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
def _generate_csv_test_case():
"""Generates a `decode_csv()` test case."""
def csv_factory():
return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a",
"2.4:5:c"]).repeat(5)
def decode_csv_fn(x):
return parsing_ops.decode_csv(
x,
record_defaults=[
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.string)
],
field_delim=":")
return decode_csv_fn, csv_factory
def _generate_parse_single_example_test_case():
"""Generates a `parse_single_example()` test case."""
def parse_example_factory():
"""Parse example factory."""
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
return dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
]))
def parse_single_example_fn(x):
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
return parsing_ops.parse_single_example(x, features)
return parse_single_example_fn, parse_example_factory
# TODO(rachelim): Add a benchmark for more expensive transformations, such as
# vgg_preprocessing.
class MapVectorizationBenchmark(test.Benchmark):
"""Benchmarks for the `MapVectorization` optimization."""
def _run(self, x, num_iters=100, name=None):
deltas = []
with session.Session() as sess:
for _ in range(5):
# Warm up session...
sess.run(x)
for _ in range(num_iters):
start = time.time()
sess.run(x)
end = time.time()
deltas.append(end - start)
median_time = np.median(deltas)
self.report_benchmark(iters=num_iters, wall_time=median_time, name=name)
return median_time
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
num_elems = int(np.sum([np.prod(x) for x in input_size]))
name_template = "{}_batch_size_{}_input_element_size_{}_{}"
unoptimized_dataset = input_dataset.map(map_fn).batch(batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized_dataset = unoptimized_dataset.with_options(options)
unoptimized_next = dataset_ops.make_one_shot_iterator(
unoptimized_dataset).get_next()
options = dataset_ops.Options()
options.experimental_optimization.map_vectorization.enabled = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_next = dataset_ops.make_one_shot_iterator(
optimized_dataset).get_next()
unoptimized_time = self._run(
unoptimized_next,
name=name_template.format(str_id, batch_size, num_elems, "unoptimized"))
optimized_time = self._run(
optimized_next,
name=name_template.format(str_id, batch_size, num_elems, "optimized"))
print("Batch size: {}\n"
"Input element size: {}\n"
"Transformation: {}\n"
"Speedup: {}\n".format(batch_size, input_size, str_id,
(unoptimized_time / optimized_time)))
# Known cheap functions
def benchmark_identity(self):
self._benchmark_helper(lambda *args: [array_ops.identity(x) for x in args],
"identity")
def benchmark_add_const(self):
self._benchmark_helper(lambda *args: [x + 1 for x in args], "add_const")
def benchmark_return_const(self):
self._benchmark_helper(lambda *args: [constant_op.constant(2)], "ret_const")
def benchmark_select(self):
self._benchmark_helper(lambda *args: args[0], "select")
def benchmark_cast(self):
self._benchmark_helper(
lambda *args: [math_ops.cast(x, dtypes.float32) for x in args], "cast")
def benchmark_reshape(self):
self._benchmark_helper(
lambda *args: [array_ops.reshape(x, (-1, 30)) for x in args], "reshape")
def benchmark_decode_csv(self):
csv_fn, csv_factory = _generate_csv_test_case()
self._benchmark_helper(csv_fn, "decode_csv", lambda: [csv_factory()])
def benchmark_parse_single_example(self):
# NOTE: Since we haven't implemented a vectorizer for "SerializeSparse",
# this function is only naively vectorized.
parse_fn, parse_factory = _generate_parse_single_example_test_case()
self._benchmark_helper(parse_fn, "parse_single_example",
lambda: [parse_factory()])
def _default_dataset_factory(self):
input_sizes = [(10, 10, 3), (10, 100, 300)]
for sz in input_sizes:
yield dataset_ops.Dataset.from_tensor_slices(np.random.rand(*sz))
def _benchmark_helper(self, map_fn, str_id, base_dataset_factory=None):
if base_dataset_factory is None:
base_dataset_factory = self._default_dataset_factory
batch_size = 1000
for base_dataset in base_dataset_factory():
base_dataset = base_dataset.repeat()
input_size = [
tuple(shape.as_list())
for shape in nest.flatten(
dataset_ops.get_legacy_output_shapes(base_dataset))
]
self._compare(base_dataset, map_fn, batch_size, input_size, str_id)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/map_vectorization_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for static optimizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class ChooseFastestBenchmark(test.Benchmark):
"""Benchmarks for static optimizations."""
def benchmark_choose_fastest(self):
dataset = dataset_ops.Dataset.range(1000**2).repeat()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
map_batch_dataset = dataset.map(lambda x: x + 1).batch(100)
batch_map_dataset = dataset.batch(100).map(lambda x: x + 1)
merge_dataset = optimization._ChooseFastestDataset( # pylint: disable=protected-access
[batch_map_dataset, map_batch_dataset])
self._benchmark(map_batch_dataset, "map_batch_dataset")
self._benchmark(batch_map_dataset, "batch_map_dataset")
self._benchmark(merge_dataset, "merge_dataset")
def benchmark_choose_fastest_first_n_iterations(self):
dataset = dataset_ops.Dataset.range(1000**2).repeat()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
map_batch_dataset = dataset.map(lambda x: x + 1).batch(100)
batch_map_dataset = dataset.batch(100).map(lambda x: x + 1)
merge_dataset = optimization._ChooseFastestDataset( # pylint: disable=protected-access
[batch_map_dataset, map_batch_dataset])
self._benchmark_first_n(map_batch_dataset, "map_batch_dataset")
self._benchmark_first_n(batch_map_dataset, "batch_map_dataset")
self._benchmark_first_n(merge_dataset, "merge_dataset")
def _benchmark_first_n(self, dataset, name):
n = 10 # The default num_experiments for ChooseFastestDataset
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
deltas = []
for _ in range(100):
with session.Session() as sess:
start = time.time()
for _ in range(n):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / n
self.report_benchmark(
iters=n, wall_time=median_wall_time, name=name + "_first_%d" % n)
def _benchmark(self, dataset, name):
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
# Run 10 steps to warm up the session caches before taking the first
# measurement. Additionally, 10 is the default num_experiments for
# ChooseFastestDataset.
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(50):
start = time.time()
for _ in range(50):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
self.report_benchmark(iters=100, wall_time=median_wall_time, name=name)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/choose_fastest_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
_NUMPY_RANDOM_SEED = 42
class MapAndBatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
def benchmark_map_and_batch(self):
"""Measures the performance of parallelized batching."""
shapes = [(), (10,), (10, 10), (10, 10, 10), (224, 224, 3)]
batch_size_values = [1, 32, 64, 128, 1024]
shape_placeholder = array_ops.placeholder(dtypes.int64, shape=[None])
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset_ops.Dataset.range(1000000000)
dense_value = random_ops.random_normal(shape=shape_placeholder)
dataset = dataset.apply(batching.map_and_batch(
lambda _: dense_value, batch_size_placeholder))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
for shape in shapes:
for batch_size in batch_size_values:
with session.Session() as sess:
sess.run(iterator.initializer, feed_dict={
shape_placeholder: shape, batch_size_placeholder: batch_size})
# Use a C++ callable to minimize the Python overhead in the benchmark.
callable_opts = config_pb2.CallableOptions()
callable_opts.target.append(next_element.op.name)
op_callable = sess._make_callable_from_options(callable_opts) # pylint: disable=protected-access
# Run five steps to warm up the session caches before taking the
# first measurement.
for _ in range(5):
op_callable()
deltas = []
overall_start = time.time()
# Run at least five repetitions and for at least five seconds.
while len(deltas) < 5 or time.time() - overall_start < 5.0:
start = time.time()
for _ in range(100):
op_callable()
end = time.time()
deltas.append(end - start)
del op_callable
median_wall_time = np.median(deltas) / 100.0
iters = len(deltas) * 100
self.report_benchmark(
iters=iters, wall_time=median_wall_time,
name="num_elements_%d_batch_size_%d" % (np.prod(shape), batch_size))
def benchmark_map_and_batch_chaining_versus_fusing(self):
"""Compares the performance of chaining and fusing map and batch.
NOTE: It is recommended to build the benchmark with
`-c opt --copt=-mavx --copt=-mavx2 --copt=-mfma --copt=-gmlt`
and execute it on a machine with at least 32 CPU cores.
"""
# Sequential pipeline configurations.
seq_elem_size_series = itertools.product([1], [1], [1, 2, 4, 8], [16])
seq_batch_size_series = itertools.product([1], [1], [1], [8, 16, 32, 64])
# Parallel pipeline configuration.
par_elem_size_series = itertools.product([32], [32], [1, 2, 4, 8], [256])
par_batch_size_series = itertools.product([32], [32], [1],
[128, 256, 512, 1024])
par_num_calls_series = itertools.product([8, 16, 32, 64], [32], [1], [512])
par_inter_op_series = itertools.product([32], [8, 16, 32, 64], [1], [512])
def name(method, label, num_calls, inter_op, element_size, batch_size):
return ("%s_id_%s_num_calls_%d_inter_op_%d_elem_size_%d_batch_size_%d" % (
method,
hashlib.sha1(label).hexdigest()[:8],
num_calls,
inter_op,
element_size,
batch_size,
))
def benchmark(label, series):
"""Runs benchmark the given series."""
def make_dataset(element_size, num_calls, batch_size): # pylint: disable=missing-docstring
k = 1024 * 1024
x = constant_op.constant(np.random.rand(element_size, 4 * k))
y = constant_op.constant(np.random.rand(4 * k, 1))
dataset = dataset_ops.Dataset.range(1000000000000).map(lambda _: (x, y))
dataset = dataset.map(
math_ops.matmul,
num_parallel_calls=num_calls).batch(batch_size=batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
return dataset.with_options(options)
for num_calls, inter_op, element_size, batch_size in series:
num_iters = 1024 // (
(element_size * batch_size) // min(num_calls, inter_op))
# By default the chained map().batch() calls will not be fused.
chained_dataset = make_dataset(element_size, num_calls, batch_size)
chained_iterator = dataset_ops.make_one_shot_iterator(chained_dataset)
chained_get_next = chained_iterator.get_next()
chained_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(chained_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(chained_get_next.op)
end = time.time()
chained_deltas.append(end - start)
self.report_benchmark(
iters=num_iters,
wall_time=np.median(chained_deltas),
name=name("chained", label, num_calls, inter_op, element_size,
batch_size))
# Apply an option to the default dataset that will fuse map().batch().
options = dataset_ops.Options()
options.experimental_optimization.map_and_batch_fusion = True
fused_dataset = chained_dataset.with_options(options)
fused_iterator = dataset_ops.make_one_shot_iterator(fused_dataset)
fused_get_next = fused_iterator.get_next()
fused_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(fused_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(fused_get_next.op)
end = time.time()
fused_deltas.append(end - start)
self.report_benchmark(
iters=num_iters,
wall_time=np.median(fused_deltas),
name=name("fused", label, num_calls, inter_op, element_size,
batch_size))
print()
np.random.seed(_NUMPY_RANDOM_SEED)
benchmark("Sequential element size evaluation", seq_elem_size_series)
benchmark("Sequential batch size evaluation", seq_batch_size_series)
benchmark("Parallel element size evaluation", par_elem_size_series)
benchmark("Parallel batch size evaluation", par_batch_size_series)
benchmark("Transformation parallelism evaluation", par_num_calls_series)
benchmark("Threadpool size evaluation", par_inter_op_series)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for ChooseFastestBranchDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
class ChooseFastestBranchBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for ChooseFastestBranchDatast."""
def make_benchmark_datasets(self,
input_dataset,
branch_0,
branch_1,
ratio_numerator,
num_elements_per_branch=None):
ds_0 = branch_0(input_dataset)
ds_1 = branch_1(input_dataset)
choose_fastest_dataset = optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
input_dataset, [branch_0, branch_1],
ratio_numerator=ratio_numerator,
num_elements_per_branch=num_elements_per_branch)
return ds_0, ds_1, choose_fastest_dataset
def make_simple_benchmark_datasets(self):
dataset = dataset_ops.Dataset.range(1000**2).repeat()
def branch_0(dataset):
return dataset.map(lambda x: x + 1).batch(100)
def branch_1(dataset):
return dataset.batch(100).map(lambda x: x + 1)
return self.make_benchmark_datasets(dataset, branch_0, branch_1, 100)
def benchmark_choose_fastest(self):
map_batch, batch_map, choose_fastest = self.make_simple_benchmark_datasets()
def benchmark(dataset, name):
self.run_and_report_benchmark(dataset, 5000, name, iters=1)
benchmark(map_batch, "map_batch_dataset")
benchmark(batch_map, "batch_map_dataset")
benchmark(choose_fastest, "choose_fastest_dataset")
def benchmark_choose_fastest_first_n_iterations(self):
map_batch, batch_map, choose_fastest = self.make_simple_benchmark_datasets()
def benchmark(dataset, name):
self.run_and_report_benchmark(
dataset, num_elements=10, name="%s_first_10" % name, iters=5)
benchmark(map_batch, "map_batch_dataset")
benchmark(batch_map, "batch_map_dataset")
benchmark(choose_fastest, "choose_fastest_dataset")
def benchmark_with_input_skew(self):
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
# Dataset with 100 elements that emulates performance characteristics of a
# file-based dataset stored in remote storage, where the first element
# takes significantly longer to produce than the remaining elements.
input_dataset = make_dataset(1000 * 1000,
0).concatenate(make_dataset(1, 100)).take(100)
def slow_branch(dataset):
return dataset.apply(sleep.sleep(10000))
def fast_branch(dataset):
return dataset.apply(sleep.sleep(10))
def benchmark(dataset, name):
self.run_and_report_benchmark(
dataset, num_elements=100, name="%s_with_skew" % name, iters=1)
# ChooseFastestBranch dataset should choose the same branch regardless
# of the order of the branches, so we expect the iteration speed to be
# comparable for both versions.
slow_ds, fast_ds, choose_fastest_0 = self.make_benchmark_datasets(
input_dataset, slow_branch, fast_branch, 1, num_elements_per_branch=2)
_, _, choose_fastest_1 = self.make_benchmark_datasets(
input_dataset, fast_branch, slow_branch, 1, num_elements_per_branch=2)
benchmark(slow_ds, "slow_dataset")
benchmark(fast_ds, "fast_dataset")
benchmark(choose_fastest_0, "choose_fastest_dataset_0")
benchmark(choose_fastest_1, "choose_fastest_dataset_1")
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/choose_fastest_branch_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _make_fake_dataset_fn():
"""Returns a dataset that emulates a remote storage data source.
Returns a dataset factory which creates a dataset with 100 elements that
emulates the performance characteristic of a file-based dataset stored in a
remote storage. In particular, the first element will take an order of
magnitude longer to produce than the remaining elements (1s vs. 1ms).
"""
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset(1000 * 1000, 0).concatenate(make_dataset(1000,
100)).take(100)
return fake_dataset_fn
class ParallelInterleaveBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
def _benchmark(self, dataset_fn, iters, num_elements):
with ops.Graph().as_default():
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset_fn().with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
deltas = []
for _ in range(iters):
start = time.time()
for _ in range(num_elements):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
mean_wall_time = np.mean(deltas) / num_elements
self.report_benchmark(iters=iters, wall_time=mean_wall_time)
def benchmark_sequential_interleave(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(), cycle_length=10)
self._benchmark(dataset_fn=dataset_fn, iters=10, num_elements=100)
def benchmark_parallel_interleave_v1(self):
"""Benchmark for parallel interleave that does not support autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(
interleave_ops.parallel_interleave(
_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
def benchmark_parallel_interleave_v2(self):
"""Benchmark for parallel interleave that supports autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(),
cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UnbatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.unbatch()`."""
def benchmark_native_unbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="native_batch_size_%d" %
batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmark_old_unbatch_implementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="unfused_batch_size_%d" %
batch_size)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/unbatch_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for the experimental `MatchingFilesDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class MatchingFilesBenchmark(test.Benchmark):
"""Benchmark for the experimental `MatchingFilesDataset`."""
def benchmark_nested_directories(self):
tmp_dir = tempfile.mkdtemp()
width = 500
depth = 10
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
deltas = []
iters = 3
for _ in range(iters):
with ops.Graph().as_default():
dataset = matching_files.MatchingFilesDataset(patterns)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
sub_deltas = []
while True:
try:
start = time.time()
sess.run(next_element)
end = time.time()
sub_deltas.append(end - start)
except errors.OutOfRangeError:
break
deltas.append(sub_deltas)
median_deltas = np.median(deltas, axis=0)
self.report_benchmark(
iters=iters,
wall_time=np.sum(median_deltas),
extras={
'read first file:':
median_deltas[0],
'read second file:':
median_deltas[1],
'avg time for reading %d more filenames:' %
(len(median_deltas) - 2):
np.average(median_deltas[2:])
},
name='nested_directory(%d*%d)' %
(width, depth))
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/matching_files_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class MapDefunBenchmark(test.Benchmark):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
with session.Session() as sess:
for _ in range(5):
sess.run(op)
start = time.time()
for _ in range(num_iters):
sess.run(op)
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(100)
for input_size in [10, 100, 1000, 10000]:
num_iters = 100000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
map_defun_op, "with_defun_size_%d" % input_size, num_iters=num_iters)
self._run(
map_fn_op, "without_defun_size_%d" % input_size, num_iters=num_iters)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling distribution in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.DistributeOptions")
class DistributeOptions(options.OptionsBase):
"""Represents options for distributed data processing.
You can set the distribution options of a dataset through the
`experimental_distribute` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.DistributeOptions`.
```python
options = tf.data.Options()
options.experimental_distribute.auto_shard = False
dataset = dataset.with_options(options)
```
"""
auto_shard = options.create_option(
name="auto_shard",
ty=bool,
docstring=
"Whether the dataset should be automatically sharded when processed"
"in a distributed fashion. This is applicable when using Keras with "
"multi-worker/TPU distribution strategy, and by "
"using strategy.experimental_distribute_dataset(). In other cases, this "
"option does nothing. If None, defaults to True.",
default_factory=lambda: True)
_make_stateless = options.create_option(
name="_make_stateless",
ty=bool,
docstring=
"Determines whether the input pipeline should be rewritten to not "
"contain stateful transformations (so that its graph can be moved "
"between devices).")
num_devices = options.create_option(
name="num_devices",
ty=int,
docstring=
"The number of devices attached to this input pipeline. This will be "
"automatically set by MultiDeviceIterator.")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/distribute_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.dense_to_sparse_batch")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(
batch_size=2, row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.SparseTensor`. Each element of this dataset must have the same rank as
`row_shape`, and must have size less than or equal to `row_shape` in each
dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _DenseToSparseBatchDataset(dataset, batch_size, row_shape)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch()")
@tf_export(v1=["data.experimental.map_and_batch_with_legacy_function"])
def map_and_batch_with_legacy_function(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not
work with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map_and_batch` as this method will not be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError("The `num_parallel_batches` and `num_parallel_calls` "
"arguments are mutually exclusive.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder,
use_legacy_function=True)
return _apply_fn
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by "
"`tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data "
"optimizations will take care of using the fused implementation.")
@tf_export("data.experimental.map_and_batch")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. However, by fusing the two transformations together, the
implementation can be more efficient. Surfacing this transformation in the API
is temporary. Once automatic input pipeline optimization is implemented,
the fusing of `map` and `batch` will happen automatically and this API will be
deprecated.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError("The `num_parallel_batches` and `num_parallel_calls` "
"arguments are mutually exclusive.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.Dataset.unbatch()`.")
@tf_export("data.experimental.unbatch")
def unbatch():
"""Splits elements of a dataset into multiple elements on the batch dimension.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.unbatch()) == {
'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.unbatch()
return _apply_fn
class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that batches ragged dense elements into `tf.SparseTensor`s."""
def __init__(self, input_dataset, batch_size, row_shape):
"""See `Dataset.dense_to_sparse_batch()` for more details."""
if not isinstance(
dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType):
raise TypeError("DenseToSparseDataset requires an input whose elements "
"have a single component, whereas the input has %r." %
dataset_ops.get_legacy_output_types(input_dataset))
self._input_dataset = input_dataset
self._batch_size = batch_size
self._row_shape = row_shape
self._element_spec = sparse_tensor.SparseTensorSpec(
tensor_shape.TensorShape([None]).concatenate(self._row_shape),
dataset_ops.get_legacy_output_types(input_dataset))
variant_tensor = ged_ops.dense_to_sparse_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._batch_size,
row_shape=convert.partial_shape_to_tensor(self._row_shape),
**self._flat_structure)
super(_DenseToSparseBatchDataset, self).__init__(input_dataset,
variant_tensor)
@property
def element_spec(self):
return self._element_spec
class _MapAndBatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over a batch of elements."""
def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,
drop_remainder, use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
"tf.data.experimental.map_and_batch()",
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._batch_size_t = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._num_parallel_calls_t = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
self._drop_remainder_t = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
self._element_spec = nest.map_structure(
lambda component_spec: component_spec._batch(
tensor_util.constant_value(self._batch_size_t)),
self._map_func.output_structure)
else:
self._element_spec = nest.map_structure(
lambda component_spec: component_spec._batch(None),
self._map_func.output_structure)
# pylint: enable=protected-access
variant_tensor = ged_ops.map_and_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
batch_size=self._batch_size_t,
num_parallel_calls=self._num_parallel_calls_t,
drop_remainder=self._drop_remainder_t,
preserve_cardinality=True,
**self._flat_structure)
super(_MapAndBatchDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._element_spec
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/batching.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution Strategy-related dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class _AutoShardDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that shards the `Dataset` automatically.
This dataset takes in an existing dataset and tries to automatically figure
out how to shard the dataset in a multi-worker scenario. Currently, it uses
Grappler to walk up the dataset graph until it finds a reader dataset (e.g.
CSVDataset, TFRecordDataset), then inserts a ShardDataset op before that node
so that each worker only sees some files.
Args:
num_workers: Total number of workers to shard this dataset across.
index: The current worker index (out of the total number of workers) this
dataset is for.
Raises:
NotFoundError: If we cannot find a suitable reader dataset to begin
automatically sharding the dataset.
"""
def __init__(self, input_dataset, num_workers, index):
self._input_dataset = input_dataset
self._element_spec = input_dataset.element_spec
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
**self._flat_structure)
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
def _AutoShardDatasetV1(input_dataset, num_workers, index): # pylint: disable=invalid-name
return dataset_ops.DatasetV1Adapter(
_AutoShardDataset(input_dataset, num_workers, index))
class _RebatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that divides the batch size by `num_replicas`.
For each batch in the input dataset, the resulting dataset will produce
`num_replicas` minibatches whose sizes add up to the original batch size.
"""
def __init__(self, input_dataset, num_replicas, use_fallback=True):
self._input_dataset = input_dataset
def recalculate_output_shapes(output_shapes):
"""Recalculates the output_shapes after dividing it by num_replicas."""
if len(output_shapes) < 1:
raise ValueError(
"Input shape should have at least one dimension. "
"Perhaps your input dataset is not batched?")
output_dims = [d.value for d in output_shapes.dims]
if output_dims[0] is not None and output_dims[0] % num_replicas == 0:
output_dims[0] = output_dims[0] // num_replicas
else:
# Set the batch dimension to unknown. If the global batch size does not
# divide num_replicas evenly, the minibatches may have different sizes.
output_dims[0] = None
return tensor_shape.TensorShape(output_dims)
input_types = dataset_ops.get_legacy_output_types(self._input_dataset)
input_shapes = dataset_ops.get_legacy_output_shapes(self._input_dataset)
input_classes = dataset_ops.get_legacy_output_classes(self._input_dataset)
output_shapes = nest.map_structure(recalculate_output_shapes, input_shapes)
self._element_spec = structure.convert_legacy_structure(
input_types, output_shapes, input_classes)
variant_tensor = ged_ops.rebatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_replicas=num_replicas,
**self._flat_structure)
super(_RebatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
class _RemoteDataset(dataset_ops.DatasetSource):
"""Creates a dataset on a given `device` given a graph def."""
def __init__(self, graph_def, device, element_spec):
self._elem_spec = element_spec
with ops.device(device):
variant_tensor = ged_ops.dataset_from_graph(graph_def)
super(_RemoteDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._elem_spec
def replicate(dataset, devices):
"""A transformation that replicates `dataset` onto a list of devices.
Args:
dataset: A `tf.data.Dataset` object.
devices: A list of devices to replicate the dataset on.
Returns:
A dictionary mapping device name to a dataset on that device.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
# pylint: disable=protected-access
with ops.colocate_with(dataset._variant_tensor):
dataset = dataset._apply_options()
stateful_whitelist = dataset.options().experimental_stateful_whitelist
graph_def = dataset._as_serialized_graph(
stateful_whitelist=stateful_whitelist)
datasets = {}
for device in devices:
ds = _RemoteDataset(graph_def, device, dataset.element_spec)
datasets[device] = ds
return datasets
_AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/distribute.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import functools
import gzip
import numpy as np
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import parsing_ops
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util.tf_export import tf_export
_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.string)
def _is_valid_int32(str_val):
try:
# Checks equality to prevent int32 overflow
return dtypes.int32.as_numpy_dtype(str_val) == dtypes.int64.as_numpy_dtype(
str_val)
except (ValueError, OverflowError):
return False
def _is_valid_int64(str_val):
try:
dtypes.int64.as_numpy_dtype(str_val)
return True
except (ValueError, OverflowError):
return False
def _is_valid_float(str_val, float_dtype):
try:
return float_dtype.as_numpy_dtype(str_val) < np.inf
except ValueError:
return False
def _infer_type(str_val, na_value, prev_type):
"""Given a string, infers its tensor type.
Infers the type of a value by picking the least 'permissive' type possible,
while still allowing the previous type inference for this column to be valid.
Args:
str_val: String value to infer the type of.
na_value: Additional string to recognize as a NA/NaN CSV value.
prev_type: Type previously inferred based on values of this column that
we've seen up till now.
Returns:
Inferred dtype.
"""
if str_val in ("", na_value):
# If the field is null, it gives no extra information about its type
return prev_type
type_list = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
] # list of types to try, ordered from least permissive to most
type_functions = [
_is_valid_int32,
_is_valid_int64,
lambda str_val: _is_valid_float(str_val, dtypes.float32),
lambda str_val: _is_valid_float(str_val, dtypes.float64),
lambda str_val: True,
] # Corresponding list of validation functions
for i in range(len(type_list)):
validation_fn = type_functions[i]
if validation_fn(str_val) and (prev_type is None or
prev_type in type_list[:i + 1]):
return type_list[i]
def _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header,
file_io_fn):
"""Generator that yields rows of CSV file(s) in order."""
for fn in filenames:
with file_io_fn(fn) as f:
rdr = csv.reader(
f,
delimiter=field_delim,
quoting=csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE)
if header:
next(rdr) # Skip header lines
for csv_row in rdr:
if len(csv_row) != num_cols:
raise ValueError(
"Problem inferring types: CSV row has different number of fields "
"than expected.")
yield csv_row
def _infer_column_defaults(filenames, num_cols, field_delim, use_quote_delim,
na_value, header, num_rows_for_inference,
select_columns, file_io_fn):
"""Infers column types from the first N valid CSV records of files."""
if select_columns is None:
select_columns = range(num_cols)
inferred_types = [None] * len(select_columns)
for i, csv_row in enumerate(
_next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header,
file_io_fn)):
if num_rows_for_inference is not None and i >= num_rows_for_inference:
break
for j, col_index in enumerate(select_columns):
inferred_types[j] = _infer_type(csv_row[col_index], na_value,
inferred_types[j])
# Replace None's with a default type
inferred_types = [t or dtypes.string for t in inferred_types]
# Default to 0 or '' for null values
return [
constant_op.constant([0 if t is not dtypes.string else ""], dtype=t)
for t in inferred_types
]
def _infer_column_names(filenames, field_delim, use_quote_delim, file_io_fn):
"""Infers column names from first rows of files."""
csv_kwargs = {
"delimiter": field_delim,
"quoting": csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE
}
with file_io_fn(filenames[0]) as f:
try:
column_names = next(csv.reader(f, **csv_kwargs))
except StopIteration:
raise ValueError(("Received StopIteration when reading the header line "
"of %s. Empty file?") % filenames[0])
for name in filenames[1:]:
with file_io_fn(name) as f:
try:
if next(csv.reader(f, **csv_kwargs)) != column_names:
raise ValueError(
"Files have different column names in the header row.")
except StopIteration:
raise ValueError(("Received StopIteration when reading the header line "
"of %s. Empty file?") % filenames[0])
return column_names
def _get_sorted_col_indices(select_columns, column_names):
"""Transforms select_columns argument into sorted column indices."""
names_to_indices = {n: i for i, n in enumerate(column_names)}
num_cols = len(column_names)
for i, v in enumerate(select_columns):
if isinstance(v, int):
if v < 0 or v >= num_cols:
raise ValueError(
"Column index %d specified in select_columns out of valid range." %
v)
continue
if v not in names_to_indices:
raise ValueError(
"Value '%s' specified in select_columns not a valid column index or "
"name." % v)
select_columns[i] = names_to_indices[v]
# Sort and ensure there are no duplicates
result = sorted(set(select_columns))
if len(result) != len(select_columns):
raise ValueError("select_columns contains duplicate columns")
return result
def _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed):
"""Optionally shuffle and repeat dataset, as requested."""
if num_epochs != 1 and shuffle:
# Use shuffle_and_repeat for perf
return dataset.apply(
shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
shuffle_seed))
elif shuffle:
return dataset.shuffle(shuffle_buffer_size, shuffle_seed)
elif num_epochs != 1:
return dataset.repeat(num_epochs)
return dataset
def make_tf_record_dataset(file_pattern,
batch_size,
parser_fn=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=None,
shuffle_seed=None,
prefetch_buffer_size=None,
num_parallel_reads=None,
num_parallel_parser_calls=None,
drop_final_batch=False):
"""Reads and optionally parses TFRecord files into a dataset.
Provides common functionality such as batching, optional parsing, shuffling,
and performant defaults.
Args:
file_pattern: List of files or patterns of TFRecord file paths.
See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
parser_fn: (Optional.) A function accepting string input to parse
and process the record contents. This function must map records
to components of a fixed shape, so they may be batched. By
default, uses the record contents unmodified.
num_epochs: (Optional.) An int specifying the number of times this
dataset is repeated. If None (the default), cycles through the
dataset forever.
shuffle: (Optional.) A bool that indicates whether the input
should be shuffled. Defaults to `True`.
shuffle_buffer_size: (Optional.) Buffer size to use for
shuffling. A large buffer size ensures better shuffling, but
increases memory usage and startup time.
shuffle_seed: (Optional.) Randomization seed to use for shuffling.
prefetch_buffer_size: (Optional.) An int specifying the number of
feature batches to prefetch for performance improvement.
Defaults to auto-tune. Set to 0 to disable prefetching.
num_parallel_reads: (Optional.) Number of threads used to read
records from files. By default or if set to a value >1, the
results will be interleaved. Defaults to `24`.
num_parallel_parser_calls: (Optional.) Number of parallel
records to parse in parallel. Defaults to `batch_size`.
drop_final_batch: (Optional.) Whether the last batch should be
dropped in case its size is smaller than `batch_size`; the
default behavior is not to drop the smaller batch.
Returns:
A dataset, where each element matches the output of `parser_fn`
except it will have an additional leading `batch-size` dimension,
or a `batch_size`-length 1-D tensor of strings if `parser_fn` is
unspecified.
"""
if num_parallel_reads is None:
# NOTE: We considered auto-tuning this value, but there is a concern
# that this affects the mixing of records from different files, which
# could affect training convergence/accuracy, so we are defaulting to
# a constant for now.
num_parallel_reads = 24
if num_parallel_parser_calls is None:
# TODO(josh11b): if num_parallel_parser_calls is None, use some function
# of num cores instead of `batch_size`.
num_parallel_parser_calls = batch_size
if prefetch_buffer_size is None:
prefetch_buffer_size = dataset_ops.AUTOTUNE
files = dataset_ops.Dataset.list_files(
file_pattern, shuffle=shuffle, seed=shuffle_seed)
dataset = core_readers.TFRecordDataset(
files, num_parallel_reads=num_parallel_reads)
if shuffle_buffer_size is None:
# TODO(josh11b): Auto-tune this value when not specified
shuffle_buffer_size = 10000
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_final_batch=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
drop_final_batch = drop_final_batch or num_epochs is None
if parser_fn is None:
dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)
else:
dataset = dataset.map(
parser_fn, num_parallel_calls=num_parallel_parser_calls)
dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)
if prefetch_buffer_size == 0:
return dataset
else:
return dataset.prefetch(buffer_size=prefetch_buffer_size)
@tf_export("data.experimental.make_csv_dataset", v1=[])
def make_csv_dataset_v2(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
num_parallel_reads=None,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
ignore_errors=False,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
columns, in order. One per column of the input record. If this is not
provided, infers the column names from the first row of the records.
These names will be the keys of the features dict of each dataset element.
column_defaults: A optional list of default values for the CSV fields. One
item per selected column of the input record. Each item in the list is
either a valid CSV dtype (float32, float64, int32, int64, or string), or a
`Tensor` with one of the aforementioned types. The tensor can either be
a scalar default value (if the column is optional), or an empty tensor (if
the column is required). If a dtype is provided instead of a tensor, the
column is also treated as required. If this list is not provided, tries
to infer types based on reading the first num_rows_for_inference rows of
files specified, and assumes all columns are optional, defaulting to `0`
for numeric values and `""` for string values. If both this and
`select_columns` are specified, these must have the same lengths, and
`column_defaults` is assumed to be sorted in order of increasing column
index.
label_name: A optional string corresponding to the label column. If
provided, the data for this column is returned as a separate `Tensor` from
the features dictionary, so that the dataset complies with the format
expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
function.
select_columns: An optional list of integer indices or string column
names, that specifies a subset of columns of CSV data to select. If
column names are provided, these must correspond to names provided in
`column_names` or inferred from the file header lines. When this argument
is specified, only a subset of CSV columns will be parsed and returned,
corresponding to the columns specified. Using this results in faster
parsing and lower memory usage. If both this and `column_defaults` are
specified, these must have the same lengths, and `column_defaults` is
assumed to be sorted in order of increasing column index.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
na_value: Additional string to recognize as NA/NaN.
header: A bool that indicates whether the first rows of provided CSV files
correspond to header lines with column names, and should not be included
in the data.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but increases memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature
batches to prefetch for performance improvement. Recommended value is the
number of batches consumed per training step. Defaults to auto-tune.
num_parallel_reads: Number of threads used to read CSV records from files.
If >1, the results will be interleaved. Defaults to `1`.
sloppy: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing,
such as malformed data or empty lines, and moves on to the next valid
CSV record. Otherwise, the dataset raises an error and stops processing
when encountering any invalid records. Defaults to `False`.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_name`.
Raises:
ValueError: If any of the arguments is malformed.
"""
if num_parallel_reads is None:
num_parallel_reads = 1
if prefetch_buffer_size is None:
prefetch_buffer_size = dataset_ops.AUTOTUNE
# Create dataset of all matching filenames
filenames = _get_file_names(file_pattern, False)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if shuffle:
dataset = dataset.shuffle(len(filenames), shuffle_seed)
# Clean arguments; figure out column names and defaults
if column_names is None or column_defaults is None:
# Find out which io function to open the file
file_io_fn = lambda filename: file_io.FileIO(filename, "r")
if compression_type is not None:
compression_type_value = tensor_util.constant_value(compression_type)
if compression_type_value is None:
raise ValueError("Received unkown compression_type")
if compression_type_value == "GZIP":
file_io_fn = lambda filename: gzip.open(filename, "rt")
elif compression_type_value == "ZLIB":
raise ValueError(
"compression_type (%s) is not supported for probing columns" %
compression_type)
elif compression_type_value != "":
raise ValueError("compression_type (%s) is not supported" %
compression_type)
if column_names is None:
if not header:
raise ValueError("Cannot infer column names without a header line.")
# If column names are not provided, infer from the header lines
column_names = _infer_column_names(filenames, field_delim, use_quote_delim,
file_io_fn)
if len(column_names) != len(set(column_names)):
raise ValueError("Cannot have duplicate column names.")
if select_columns is not None:
select_columns = _get_sorted_col_indices(select_columns, column_names)
if column_defaults is not None:
column_defaults = [
constant_op.constant([], dtype=x)
if not tensor_util.is_tensor(x) and x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
else:
# If column defaults are not provided, infer from records at graph
# construction time
column_defaults = _infer_column_defaults(filenames, len(column_names),
field_delim, use_quote_delim,
na_value, header,
num_rows_for_inference,
select_columns, file_io_fn)
if select_columns is not None and len(column_defaults) != len(select_columns):
raise ValueError(
"If specified, column_defaults and select_columns must have same "
"length."
)
if select_columns is not None and len(column_names) > len(select_columns):
# Pick the relevant subset of column names
column_names = [column_names[i] for i in select_columns]
if label_name is not None and label_name not in column_names:
raise ValueError("`label_name` provided must be one of the columns.")
def filename_to_dataset(filename):
dataset = CsvDataset(
filename,
record_defaults=column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
na_value=na_value,
select_cols=select_columns,
header=header,
compression_type=compression_type
)
if ignore_errors:
dataset = dataset.apply(error_ops.ignore_errors())
return dataset
def map_fn(*columns):
"""Organizes columns into a features dictionary.
Args:
*columns: list of `Tensor`s corresponding to one csv record.
Returns:
An OrderedDict of feature names to values for that particular record. If
label_name is provided, extracts the label feature to be returned as the
second element of the tuple.
"""
features = collections.OrderedDict(zip(column_names, columns))
if label_name is not None:
label = features.pop(label_name)
return features, label
return features
if num_parallel_reads == dataset_ops.AUTOTUNE:
dataset = dataset.interleave(
filename_to_dataset, num_parallel_calls=num_parallel_reads)
options = dataset_ops.Options()
options.experimental_deterministic = not sloppy
dataset = dataset.with_options(options)
else:
# Read files sequentially (if num_parallel_reads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
filename_to_dataset, cycle_length=num_parallel_reads,
sloppy=sloppy))
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# Apply batch before map for perf, because map has high overhead relative
# to the size of the computation in each map.
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(batch_size=batch_size,
drop_remainder=num_epochs is None)
dataset = dataset_ops.MapDataset(
dataset, map_fn, use_inter_op_parallelism=False)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_csv_dataset"])
def make_csv_dataset_v1(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
num_parallel_reads=None,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
ignore_errors=False,
): # pylint: disable=missing-docstring
return dataset_ops.DatasetV1Adapter(make_csv_dataset_v2(
file_pattern, batch_size, column_names, column_defaults, label_name,
select_columns, field_delim, use_quote_delim, na_value, header,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, num_parallel_reads, sloppy, num_rows_for_inference,
compression_type, ignore_errors))
make_csv_dataset_v1.__doc__ = make_csv_dataset_v2.__doc__
_DEFAULT_READER_BUFFER_SIZE_BYTES = 4 * 1024 * 1024 # 4 MB
@tf_export("data.experimental.CsvDataset", v1=[])
class CsvDatasetV2(dataset_ops.DatasetSource):
"""A Dataset comprising lines from one or more CSV files."""
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
"""Creates a `CsvDataset` by reading and decoding CSV files.
The elements of this dataset correspond to records from the file(s).
RFC 4180 format is expected for CSV files
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
For example, suppose we have a file 'my_file0.csv' with four CSV columns of
different data types:
```
abcdefg,4.28E10,5.55E6,12
hijklmn,-5.3E14,,2
```
We can construct a CsvDataset from it as follows:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.data.experimental.CsvDataset(
"my_file*.csv",
[tf.float32, # Required field, use dtype or empty tensor
tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0
tf.int32, # Required field, use dtype or empty tensor
],
select_cols=[1,2,3] # Only parse last three columns
)
```
The expected output of its iterations is:
```python
for element in dataset:
print(element)
>> (4.28e10, 5.55e6, 12)
>> (-5.3e14, 0.0, 2)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_defaults: A list of default values for the CSV fields. Each item in
the list is either a valid CSV `DType` (float32, float64, int32, int64,
string), or a `Tensor` object with one of the above types. One per
column of CSV data, with either a scalar `Tensor` default value for the
column if it is optional, or `DType` or empty `Tensor` if required. If
both this and `select_columns` are specified, these must have the same
lengths, and `column_defaults` is assumed to be sorted in order of
increasing column index.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
compression.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer while reading files. Defaults to 4MB.
header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
have header line(s) that should be skipped when parsing. Defaults to
`False`.
field_delim: (Optional.) A `tf.string` scalar containing the delimiter
character that separates fields in a record. Defaults to `","`.
use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
double quotation marks as regular characters inside of string fields
(ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
na_value: (Optional.) A `tf.string` scalar indicating a value that will
be treated as NA/NaN.
select_cols: (Optional.) A sorted list of column indices to select from
the input data. If specified, only this subset of columns will be
parsed. Defaults to parsing all columns.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
record_defaults = [
constant_op.constant([], dtype=x)
if not tensor_util.is_tensor(x) and x in _ACCEPTABLE_CSV_TYPES else x
for x in record_defaults
]
self._record_defaults = ops.convert_n_to_tensor(
record_defaults, name="record_defaults")
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
self._header = ops.convert_to_tensor(
header, dtype=dtypes.bool, name="header")
self._field_delim = ops.convert_to_tensor(
field_delim, dtype=dtypes.string, name="field_delim")
self._use_quote_delim = ops.convert_to_tensor(
use_quote_delim, dtype=dtypes.bool, name="use_quote_delim")
self._na_value = ops.convert_to_tensor(
na_value, dtype=dtypes.string, name="na_value")
self._select_cols = convert.optional_param_to_tensor(
"select_cols",
select_cols,
argument_default=[],
argument_dtype=dtypes.int64,
)
self._element_spec = tuple(
tensor_spec.TensorSpec([], d.dtype) for d in self._record_defaults)
variant_tensor = gen_experimental_dataset_ops.csv_dataset(
filenames=self._filenames,
record_defaults=self._record_defaults,
buffer_size=self._buffer_size,
header=self._header,
output_shapes=self._flat_shapes,
field_delim=self._field_delim,
use_quote_delim=self._use_quote_delim,
na_value=self._na_value,
select_cols=self._select_cols,
compression_type=self._compression_type)
super(CsvDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._element_spec
@tf_export(v1=["data.experimental.CsvDataset"])
class CsvDatasetV1(dataset_ops.DatasetV1Adapter):
"""A Dataset comprising lines from one or more CSV files."""
@functools.wraps(CsvDatasetV2.__init__)
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
wrapped = CsvDatasetV2(filenames, record_defaults, compression_type,
buffer_size, header, field_delim, use_quote_delim,
na_value, select_cols)
super(CsvDatasetV1, self).__init__(wrapped)
@tf_export("data.experimental.make_batched_features_dataset", v1=[])
def make_batched_features_dataset_v2(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
reader_num_threads=None,
parser_num_threads=None,
sloppy_ordering=False,
drop_final_batch=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
If label_key argument is provided, returns a `Dataset` of tuple
comprising of feature dictionaries and label.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
label_key: (Optional) A string corresponding to the key labels are stored in
`tf.Examples`. If provided, it must be one of the `features` key,
otherwise results in `ValueError`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step. Defaults to auto-tune.
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved. Defaults to `1`.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors. Defaults to `2`.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
drop_final_batch: If `True`, and the batch size does not evenly divide the
input dataset size, the final smaller batch will be dropped. Defaults to
`False`.
Returns:
A dataset of `dict` elements, (or a tuple of `dict` elements and label).
Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.
Raises:
TypeError: If `reader` is a `tf.compat.v1.ReaderBase` subclass.
ValueError: If `label_key` is not one of the `features` keys.
"""
if reader_num_threads is None:
reader_num_threads = 1
if parser_num_threads is None:
parser_num_threads = 2
if prefetch_buffer_size is None:
prefetch_buffer_size = dataset_ops.AUTOTUNE
# Create dataset of all matching filenames
dataset = dataset_ops.Dataset.list_files(
file_pattern, shuffle=shuffle, seed=shuffle_seed)
if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase):
raise TypeError("The `reader` argument must return a `Dataset` object. "
"`tf.ReaderBase` subclasses are not supported. For "
"example, pass `tf.data.TFRecordDataset` instead of "
"`tf.TFRecordReader`.")
# Read `Example` records from files as tensor objects.
if reader_args is None:
reader_args = []
if reader_num_threads == dataset_ops.AUTOTUNE:
dataset = dataset.interleave(
lambda filename: reader(filename, *reader_args),
num_parallel_calls=reader_num_threads)
options = dataset_ops.Options()
options.experimental_deterministic = not sloppy_ordering
dataset = dataset.with_options(options)
else:
# Read files sequentially (if reader_num_threads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
sloppy=sloppy_ordering))
# Extract values if the `Example` tensors are stored as key-value tuples.
if dataset_ops.get_legacy_output_types(dataset) == (
dtypes.string, dtypes.string):
dataset = dataset_ops.MapDataset(
dataset, lambda _, v: v, use_inter_op_parallelism=False)
# Apply dataset repeat and shuffle transformations.
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(
batch_size, drop_remainder=drop_final_batch or num_epochs is None)
# Parse `Example` tensors to a dictionary of `Feature` tensors.
dataset = dataset.apply(
parsing_ops.parse_example_dataset(
features, num_parallel_calls=parser_num_threads))
if label_key:
if label_key not in features:
raise ValueError(
"The `label_key` provided (%r) must be one of the `features` keys." %
label_key)
dataset = dataset.map(lambda x: (x, x.pop(label_key)))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_batched_features_dataset"])
def make_batched_features_dataset_v1(file_pattern, # pylint: disable=missing-docstring
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
reader_num_threads=None,
parser_num_threads=None,
sloppy_ordering=False,
drop_final_batch=False):
return dataset_ops.DatasetV1Adapter(make_batched_features_dataset_v2(
file_pattern, batch_size, features, reader, label_key, reader_args,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, reader_num_threads, parser_num_threads,
sloppy_ordering, drop_final_batch))
make_batched_features_dataset_v1.__doc__ = (
make_batched_features_dataset_v2.__doc__)
def _get_file_names(file_pattern, shuffle):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
shuffle: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
# Sort files so it will be deterministic for unit tests.
if not shuffle:
file_names = sorted(file_names)
return file_names
@tf_export("data.experimental.SqlDataset", v1=[])
class SqlDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` consisting of the results from a SQL query."""
def __init__(self, driver_name, data_source_name, query, output_types):
"""Creates a `SqlDataset`.
`SqlDataset` allows a user to read data from the result set of a SQL query.
For example:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.data.experimental.SqlDataset("sqlite", "/foo/bar.sqlite3",
"SELECT name, age FROM people",
(tf.string, tf.int32))
# Prints the rows of the result set of the above query.
for element in dataset:
print(element)
```
Args:
driver_name: A 0-D `tf.string` tensor containing the database type.
Currently, the only supported value is 'sqlite'.
data_source_name: A 0-D `tf.string` tensor containing a connection string
to connect to the database.
query: A 0-D `tf.string` tensor containing the SQL query to execute.
output_types: A tuple of `tf.DType` objects representing the types of the
columns returned by `query`.
"""
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._element_spec = nest.map_structure(
lambda dtype: tensor_spec.TensorSpec([], dtype), output_types)
variant_tensor = gen_experimental_dataset_ops.sql_dataset(
self._driver_name, self._data_source_name, self._query,
**self._flat_structure)
super(SqlDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._element_spec
@tf_export(v1=["data.experimental.SqlDataset"])
class SqlDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` consisting of the results from a SQL query."""
@functools.wraps(SqlDatasetV2.__init__)
def __init__(self, driver_name, data_source_name, query, output_types):
wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types)
super(SqlDatasetV1, self).__init__(wrapped)
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
CsvDataset = CsvDatasetV1
SqlDataset = SqlDatasetV1
make_batched_features_dataset = make_batched_features_dataset_v1
make_csv_dataset = make_csv_dataset_v1
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/readers.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StatsAggregator for aggregating statistics from `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_MAX_QUEUE = 10
@tf_export("data.experimental.StatsAggregator", v1=[])
class StatsAggregatorV2(object):
"""A stateful resource that aggregates statistics from one or more iterators.
To record statistics, use one of the custom transformation functions defined
in this module when defining your `tf.data.Dataset`. All statistics will be
aggregated by the `StatsAggregator` that is associated with a particular
iterator (see below). For example, to record the latency of producing each
element by iterating over a dataset:
```python
dataset = ...
dataset = dataset.apply(tf.data.experimental.latency_stats("total_bytes"))
```
To associate a `StatsAggregator` with a `tf.data.Dataset` object, use
the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
dataset = ...
# Apply `StatsOptions` to associate `dataset` with `aggregator`.
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
```
Note: This interface is experimental and expected to change. In particular,
we expect to add other implementations of `StatsAggregator` that provide
different ways of exporting statistics, and add more types of statistics.
"""
def __init__(self):
self._resource = ged_ops.stats_aggregator_handle_v2()
# There could be a conflict with multiple file writer in the same logdir,
# (b/37351340). Possible workarounds till this bug is resolved are a) having
# multiple dataset stats specific file inside log_dir and b) get default
# summary writer, getting default summary writer quite doesn't solve the
# problem as there might be summary writers in log dir not set as default
# e.g. in Keras calback.
# Creating a summary_writer here could potentially be replaced with getting
# the default summary_writer if any, creating it otherwise or a public
# method to associate summary writer.
self._logdir = tempfile.mkdtemp()
self._summary_writer = summary_ops_v2.create_file_writer_v2(
self._logdir, max_queue=_DEFAULT_MAX_QUEUE)
ged_ops.stats_aggregator_set_summary_writer(self._resource,
self._summary_writer._resource) # pylint: disable=protected-access
@tf_export(v1=["data.experimental.StatsAggregator"])
class StatsAggregatorV1(object):
"""A stateful resource that aggregates statistics from one or more iterators.
To record statistics, use one of the custom transformation functions defined
in this module when defining your `tf.data.Dataset`. All statistics will be
aggregated by the `StatsAggregator` that is associated with a particular
iterator (see below). For example, to record the latency of producing each
element by iterating over a dataset:
```python
dataset = ...
dataset = dataset.apply(tf.data.experimental.latency_stats("total_bytes"))
```
To associate a `StatsAggregator` with a `tf.data.Dataset` object, use
the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
dataset = ...
# Apply `StatsOptions` to associate `dataset` with `aggregator`.
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
```
To get a protocol buffer summary of the currently aggregated statistics,
use the `StatsAggregator.get_summary()` tensor. The easiest way to do this
is to add the returned tensor to the `tf.GraphKeys.SUMMARIES` collection,
so that the summaries will be included with any existing summaries.
```python
aggregator = tf.data.experimental.StatsAggregator()
# ...
stats_summary = aggregator.get_summary()
tf.compat.v1.add_to_collection(tf.GraphKeys.SUMMARIES, stats_summary)
```
Note: This interface is experimental and expected to change. In particular,
we expect to add other implementations of `StatsAggregator` that provide
different ways of exporting statistics, and add more types of statistics.
"""
def __init__(self):
"""Creates a `StatsAggregator`."""
self._resource = ged_ops.stats_aggregator_handle()
def get_summary(self):
"""Returns a string `tf.Tensor` that summarizes the aggregated statistics.
The returned tensor will contain a serialized `tf.compat.v1.summary.Summary`
protocol
buffer, which can be used with the standard TensorBoard logging facilities.
Returns:
A scalar string `tf.Tensor` that summarizes the aggregated statistics.
"""
return ged_ops.stats_aggregator_summary(self._resource)
# TODO(b/116314787): Change this to StatsAggregatorV2 when we have stable
# SummaryWriterInterface, and do not break any users.
StatsAggregator = StatsAggregatorV1
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/stats_aggregator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cardinality analysis of `Dataset` objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util.tf_export import tf_export
INFINITE = -1
UNKNOWN = -2
tf_export("data.experimental.INFINITE_CARDINALITY").export_constant(
__name__, "INFINITE")
tf_export("data.experimental.UNKNOWN_CARDINALITY").export_constant(
__name__, "UNKNOWN")
@tf_export("data.experimental.cardinality")
def cardinality(dataset):
"""Returns the cardinality of `dataset`, if known.
The operation returns the cardinality of `dataset`. The operation may return
`tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite
number of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the
analysis fails to determine the number of elements in `dataset` (e.g. when the
dataset source is a file).
Args:
dataset: A `tf.data.Dataset` for which to determine cardinality.
Returns:
A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If
the cardinality is infinite or unknown, the operation returns the named
constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively.
"""
return ged_ops.dataset_cardinality(dataset._variant_tensor) # pylint: disable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/cardinality.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.rejection_resample")
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
"""A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist")
class_values_ds = dataset.map(class_func)
# Get initial distribution.
if initial_dist is not None:
initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist")
acceptance_dist, prob_of_original = (
_calculate_acceptance_probs_with_mixing(initial_dist_t,
target_dist_t))
initial_dist_ds = dataset_ops.Dataset.from_tensors(
initial_dist_t).repeat()
acceptance_dist_ds = dataset_ops.Dataset.from_tensors(
acceptance_dist).repeat()
prob_of_original_ds = dataset_ops.Dataset.from_tensors(
prob_of_original).repeat()
else:
initial_dist_ds = _estimate_initial_dist_ds(
target_dist_t, class_values_ds)
acceptance_and_original_prob_ds = initial_dist_ds.map(
lambda initial: _calculate_acceptance_probs_with_mixing( # pylint: disable=g-long-lambda
initial, target_dist_t))
acceptance_dist_ds = acceptance_and_original_prob_ds.map(
lambda accept_prob, _: accept_prob)
prob_of_original_ds = acceptance_and_original_prob_ds.map(
lambda _, prob_original: prob_original)
filtered_ds = _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds,
class_values_ds, seed)
# Prefetch filtered dataset for speed.
filtered_ds = filtered_ds.prefetch(3)
prob_original_static = _get_prob_original_static(
initial_dist_t, target_dist_t) if initial_dist is not None else None
if prob_original_static == 1:
return dataset_ops.Dataset.zip((class_values_ds, dataset))
elif prob_original_static == 0:
return filtered_ds
else:
return interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.zip((class_values_ds, dataset)), filtered_ds],
weights=prob_of_original_ds.map(lambda prob: [(prob, 1.0 - prob)]),
seed=seed)
return _apply_fn
def _get_prob_original_static(initial_dist_t, target_dist_t):
"""Returns the static probability of sampling from the original.
`tensor_util.constant_value(prob_of_original)` returns `None` if it encounters
an Op that it isn't defined for. We have some custom logic to avoid this.
Args:
initial_dist_t: A tensor of the initial distribution.
target_dist_t: A tensor of the target distribution.
Returns:
The probability of sampling from the original distribution as a constant,
if it is a constant, or `None`.
"""
init_static = tensor_util.constant_value(initial_dist_t)
target_static = tensor_util.constant_value(target_dist_t)
if init_static is None or target_static is None:
return None
else:
return np.min(target_static / init_static)
def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_values_ds,
seed):
"""Filters a dataset based on per-class acceptance probabilities.
Args:
dataset: The dataset to be filtered.
acceptance_dist_ds: A dataset of acceptance probabilities.
initial_dist_ds: A dataset of the initial probability distribution, given or
estimated.
class_values_ds: A dataset of the corresponding classes.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A dataset of (class value, data) after filtering.
"""
def maybe_warn_on_large_rejection(accept_dist, initial_dist):
proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist)
return control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_dist,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_dist, [proportion_rejected, initial_dist, accept_dist],
message="Proportion of examples rejected by sampler is high: ",
summarize=100,
first_n=10))
acceptance_dist_ds = (dataset_ops.Dataset.zip((acceptance_dist_ds,
initial_dist_ds))
.map(maybe_warn_on_large_rejection))
def _gather_and_copy(class_val, acceptance_prob, data):
return class_val, array_ops.gather(acceptance_prob, class_val), data
current_probabilities_and_class_and_data_ds = dataset_ops.Dataset.zip(
(class_values_ds, acceptance_dist_ds, dataset)).map(_gather_and_copy)
filtered_ds = (
current_probabilities_and_class_and_data_ds
.filter(lambda _1, p, _2: random_ops.random_uniform([], seed=seed) < p))
return filtered_ds.map(lambda class_value, _, data: (class_value, data))
def _estimate_initial_dist_ds(
target_dist_t, class_values_ds, dist_estimation_batch_size=32,
smoothing_constant=10):
num_classes = (target_dist_t.shape[0] or array_ops.shape(target_dist_t)[0])
initial_examples_per_class_seen = array_ops.fill(
[num_classes], np.int64(smoothing_constant))
def update_estimate_and_tile(num_examples_per_class_seen, c):
updated_examples_per_class_seen, dist = _estimate_data_distribution(
c, num_examples_per_class_seen)
tiled_dist = array_ops.tile(
array_ops.expand_dims(dist, 0), [dist_estimation_batch_size, 1])
return updated_examples_per_class_seen, tiled_dist
initial_dist_ds = (class_values_ds.batch(dist_estimation_batch_size)
.apply(scan_ops.scan(initial_examples_per_class_seen,
update_estimate_and_tile))
.apply(batching.unbatch()))
return initial_dist_ds
def _get_target_to_initial_ratio(initial_probs, target_probs):
# Add tiny to initial_probs to avoid divide by zero.
denom = (initial_probs + np.finfo(initial_probs.dtype.as_numpy_dtype).tiny)
return target_probs / denom
def _estimate_data_distribution(c, num_examples_per_class_seen):
"""Estimate data distribution as labels are seen.
Args:
c: The class labels. Type `int32`, shape `[batch_size]`.
num_examples_per_class_seen: Type `int64`, shape `[num_classes]`,
containing counts.
Returns:
num_examples_per_lass_seen: Updated counts. Type `int64`, shape
`[num_classes]`.
dist: The updated distribution. Type `float32`, shape `[num_classes]`.
"""
num_classes = num_examples_per_class_seen.get_shape()[0]
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = math_ops.add(
num_examples_per_class_seen, math_ops.reduce_sum(
array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0))
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
dist = math_ops.cast(init_prob_estimate, dtypes.float32)
return num_examples_per_class_seen, dist
def _calculate_acceptance_probs_with_mixing(initial_probs, target_probs):
"""Calculates the acceptance probabilities and mixing ratio.
In this case, we assume that we can *either* sample from the original data
distribution with probability `m`, or sample from a reshaped distribution
that comes from rejection sampling on the original distribution. This
rejection sampling is done on a per-class basis, with `a_i` representing the
probability of accepting data from class `i`.
This method is based on solving the following analysis for the reshaped
distribution:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
If we try to minimize the amount of data rejected, we get the following:
M_max = max_i [ t_i / p_i ]
M_min = min_i [ t_i / p_i ]
The desired probability of accepting data if it comes from class `i`:
a_i = (t_i/p_i - m) / (M_max - m)
The desired probability of pulling a data element from the original dataset,
rather than the filtered one:
m = M_min
Args:
initial_probs: A Tensor of the initial probability distribution, given or
estimated.
target_probs: A Tensor of the corresponding classes.
Returns:
(A 1D Tensor with the per-class acceptance probabilities, the desired
probability of pull from the original distribution.)
"""
ratio_l = _get_target_to_initial_ratio(initial_probs, target_probs)
max_ratio = math_ops.reduce_max(ratio_l)
min_ratio = math_ops.reduce_min(ratio_l)
# Target prob to sample from original distribution.
m = min_ratio
# TODO(joelshor): Simplify fraction, if possible.
a_i = (ratio_l - m) / (max_ratio - m)
return a_i, m
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/resampling.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import function
from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.prefetch_to_device")
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
# return dataset.apply(
# copy_to_device(target_device=device)).prefetch(buffer_size)
options = dataset_ops.Options()
options.experimental_optimization.prefetch_to_device = device
return dataset.with_options(options)
return _apply_fn
@tf_export("data.experimental.copy_to_device")
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
return _CopyToDeviceDataset(
dataset, target_device=target_device,
source_device=source_device).with_options(options)
return _apply_fn
# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate
# all inputs to the Op are in host memory, thereby avoiding some unnecessary
# Sends and Recvs.
class _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that copies elements to another device."""
def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
"""Constructs a _CopyToDeviceDataset.
Args:
input_dataset: `Dataset` to be copied
target_device: The name of the device to which elements would be copied.
source_device: Device where input_dataset would be placed.
"""
self._input_dataset = input_dataset
self._target_device = target_device
spec = framework_device.DeviceSpec().from_string(self._target_device)
self._is_gpu_target = (spec.device_type == "GPU")
self._source_device_string = source_device
self._source_device = ops.convert_to_tensor(source_device)
wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(
self._input_dataset._variant_tensor) # pylint: disable=protected-access
@function.defun()
def _init_func():
"""Creates an iterator for the input dataset.
Returns:
A `string` tensor that encapsulates the iterator created.
"""
ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)
resource = gen_dataset_ops.anonymous_iterator(
**self._input_dataset._flat_structure) # pylint: disable=protected-access
with ops.control_dependencies(
[gen_dataset_ops.make_iterator(ds_variant, resource)]):
return gen_dataset_ops.iterator_to_string_handle(resource)
init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun()
def _remote_init_func():
return functional_ops.remote_call(
target=self._source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _next_func(string_handle):
"""Calls get_next for created iterator.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
The elements generated from `input_dataset`
"""
with ops.device(self._source_device_string):
iterator = iterator_ops.Iterator.from_string_handle(
string_handle,
dataset_ops.get_legacy_output_types(self),
dataset_ops.get_legacy_output_shapes(self),
dataset_ops.get_legacy_output_classes(self))
return structure.to_tensor_list(self.element_spec, iterator.get_next())
next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True})
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=self._input_dataset._flat_types, # pylint: disable=protected-access
f=next_func_concrete)
self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access
self._next_captured_args = self._next_func.captured_inputs
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _finalize_func(string_handle):
"""Destroys the iterator resource created.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
Tensor constant 0
"""
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
**self._input_dataset._flat_structure) # pylint: disable=protected-access
with ops.control_dependencies([
resource_variable_ops.destroy_resource_op(
iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
g = ops.get_default_graph()
self._init_func.add_to_graph(g)
self._next_func.add_to_graph(g)
self._finalize_func.add_to_graph(g)
# pylint: enable=protected-scope
with ops.device(self._target_device):
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._input_dataset._flat_structure) # pylint: disable=protected-access
super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)
# The one_shot_iterator implementation needs a 0 arg _make_dataset function
# that thereby captures all the inputs required to create the dataset. Since
# there are strings that are inputs to the GeneratorDataset which can't be
# placed on a GPU, this fails for the GPU case. Therefore, disabling it for
# GPU
def make_one_shot_iterator(self):
if self._is_gpu_target:
raise ValueError("Cannot create a one shot iterator when using "
"`tf.data.experimental.copy_to_device()` on GPU. Please "
"use `Dataset.make_initializable_iterator()` instead.")
else:
return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
class _MapOnGpuDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over elements in its using a GPU."""
def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
defun_kwargs={"experimental_ints_on_device": True})
variant_tensor = ged_ops.experimental_map_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
**self._flat_structure)
super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "map_on_gpu()"
def map_on_gpu(map_func):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs
`map_func` on GPU. It must be used after applying the
`tf.data.experimental.copy_to_device` transformation with a GPU device
argument.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _MapOnGpuDataset(dataset, map_func)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/prefetching_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.ThreadingOptions")
class ThreadingOptions(options.OptionsBase):
"""Represents options for dataset threading.
You can set the threading options of a dataset through the
`experimental_threading` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.ThreadingOptions`.
```python
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 10
dataset = dataset.with_options(options)
```
"""
max_intra_op_parallelism = options.create_option(
name="max_intra_op_parallelism",
ty=int,
docstring=
"If set, it overrides the maximum degree of intra-op parallelism.")
private_threadpool_size = options.create_option(
name="private_threadpool_size",
ty=int,
docstring=
"If set, the dataset will use a private threadpool of the given size.")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/threading_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.group_by_reducer")
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByReducerDataset(dataset, key_func, reducer)
return _apply_fn
@tf_export("data.experimental.group_by_window")
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
if (window_size is not None and window_size_func or
not (window_size is not None or window_size_func)):
raise ValueError("Must pass either window_size or window_size_func.")
if window_size is not None:
def constant_window_func(unused_key):
return ops.convert_to_tensor(window_size, dtype=dtypes.int64)
window_size_func = constant_window_func
assert window_size_func is not None
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByWindowDataset(dataset, key_func, reduce_func,
window_size_func)
return _apply_fn
@tf_export("data.experimental.bucket_by_sequence_length")
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False,
drop_remainder=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
bucket), and caller must ensure that the source `Dataset` does not contain
any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.SparseTensor` or of same shape).
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
with ops.name_scope("bucket_by_seq_length"):
if len(bucket_batch_sizes) != (len(bucket_boundaries) + 1):
raise ValueError(
"len(bucket_batch_sizes) must equal len(bucket_boundaries) + 1")
batch_sizes = constant_op.constant(bucket_batch_sizes, dtype=dtypes.int64)
def element_to_bucket_id(*args):
"""Return int64 id of the length bucket for this element."""
seq_length = element_length_func(*args)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, seq_length),
math_ops.less(seq_length, buckets_max))
bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
# The window size is set to the batch size for this bucket
window_size = batch_sizes[bucket_id]
return window_size
def make_padded_shapes(shapes, none_filler=None):
padded = []
for shape in nest.flatten(shapes):
shape = tensor_shape.TensorShape(shape)
shape = [
none_filler if tensor_shape.dimension_value(d) is None else d
for d in shape
]
padded.append(shape)
return nest.pack_sequence_as(shapes, padded)
def batching_fn(bucket_id, grouped_dataset):
"""Batch elements in dataset."""
batch_size = window_size_fn(bucket_id)
if no_padding:
return grouped_dataset.batch(batch_size, drop_remainder=drop_remainder)
none_filler = None
if pad_to_bucket_boundary:
err_msg = ("When pad_to_bucket_boundary=True, elements must have "
"length < max(bucket_boundaries).")
check = check_ops.assert_less(
bucket_id,
constant_op.constant(len(bucket_batch_sizes) - 1,
dtype=dtypes.int64),
message=err_msg)
with ops.control_dependencies([check]):
boundaries = constant_op.constant(bucket_boundaries,
dtype=dtypes.int64)
bucket_boundary = boundaries[bucket_id]
none_filler = bucket_boundary - 1
input_shapes = dataset_ops.get_legacy_output_shapes(grouped_dataset)
shapes = make_padded_shapes(padded_shapes or input_shapes,
none_filler=none_filler)
return grouped_dataset.padded_batch(
batch_size, shapes, padding_values, drop_remainder=drop_remainder)
def _apply_fn(dataset):
return dataset.apply(
group_by_window(element_to_bucket_id, batching_fn,
window_size_func=window_size_fn))
return _apply_fn
class _GroupByReducerDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a reduction."""
def __init__(self, input_dataset, key_func, reducer):
"""See `group_by_reducer()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_init_func(reducer.init_func)
self._make_reduce_func(reducer.reduce_func, input_dataset)
self._make_finalize_func(reducer.finalize_func)
variant_tensor = ged_ops.experimental_group_by_reducer_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._init_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
key_func=self._key_func.function,
init_func=self._init_func.function,
reduce_func=self._reduce_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GroupByReducerDataset, self).__init__(input_dataset, variant_tensor)
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
self._key_func = dataset_ops.StructuredFunctionWrapper(
key_func, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
"`key_func` must return a single tf.int64 tensor. "
"Got type=%s and shape=%s"
% (self._key_func.output_types, self._key_func.output_shapes))
def _make_init_func(self, init_func):
"""Make wrapping defun for init_func."""
self._init_func = dataset_ops.StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=tensor_spec.TensorSpec([], dtypes.int64))
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
# Iteratively rerun the reduce function until reaching a fixed point on
# `self._state_structure`.
self._state_structure = self._init_func.output_structure
state_types = self._init_func.output_types
state_shapes = self._init_func.output_shapes
state_classes = self._init_func.output_classes
need_to_rerun = True
while need_to_rerun:
wrapped_func = dataset_ops.StructuredFunctionWrapper(
reduce_func,
self._transformation_name(),
input_structure=(self._state_structure, input_dataset.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
for new_state_class, state_class in zip(
nest.flatten(wrapped_func.output_classes),
nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(self._state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
for new_state_type, state_type in zip(
nest.flatten(wrapped_func.output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(self._init_func.output_types, wrapped_func.output_types))
# Extract shape information from the returned values.
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
state_shapes = nest.pack_sequence_as(
self._init_func.output_shapes, weakened_state_shapes)
self._state_structure = structure.convert_legacy_structure(
state_types, state_shapes, state_classes)
self._reduce_func = wrapped_func
self._reduce_func.function.add_to_graph(ops.get_default_graph())
def _make_finalize_func(self, finalize_func):
"""Make wrapping defun for finalize_func."""
self._finalize_func = dataset_ops.StructuredFunctionWrapper(
finalize_func, self._transformation_name(),
input_structure=self._state_structure)
@property
def element_spec(self):
return self._finalize_func.output_structure
def _functions(self):
return [
self._key_func, self._init_func, self._reduce_func, self._finalize_func
]
def _transformation_name(self):
return "tf.data.experimental.group_by_reducer()"
class _GroupByWindowDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a windowed reduction."""
def __init__(self, input_dataset, key_func, reduce_func, window_size_func):
"""See `group_by_window()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_reduce_func(reduce_func, input_dataset)
self._make_window_size_func(window_size_func)
variant_tensor = ged_ops.group_by_window_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._window_size_func.function.captured_inputs,
key_func=self._key_func.function,
reduce_func=self._reduce_func.function,
window_size_func=self._window_size_func.function,
**self._flat_structure)
super(_GroupByWindowDataset, self).__init__(input_dataset, variant_tensor)
def _make_window_size_func(self, window_size_func):
"""Make wrapping defun for window_size_func."""
def window_size_func_wrapper(key):
return ops.convert_to_tensor(window_size_func(key), dtype=dtypes.int64)
self._window_size_func = dataset_ops.StructuredFunctionWrapper(
window_size_func_wrapper,
self._transformation_name(),
input_structure=tensor_spec.TensorSpec([], dtypes.int64))
if not self._window_size_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
"`window_size_func` must return a single tf.int64 scalar tensor.")
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
def key_func_wrapper(*args):
return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64)
self._key_func = dataset_ops.StructuredFunctionWrapper(
key_func_wrapper, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
"`key_func` must return a single tf.int64 scalar tensor.")
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
nested_dataset = dataset_ops.DatasetSpec(
input_dataset.element_spec)
input_structure = (tensor_spec.TensorSpec([], dtypes.int64), nested_dataset)
self._reduce_func = dataset_ops.StructuredFunctionWrapper(
reduce_func, self._transformation_name(),
input_structure=input_structure)
if not isinstance(
self._reduce_func.output_structure, dataset_ops.DatasetSpec):
raise TypeError("`reduce_func` must return a `Dataset` object.")
# pylint: disable=protected-access
self._element_spec = (
self._reduce_func.output_structure._element_spec)
@property
def element_spec(self):
return self._element_spec
def _functions(self):
return [self._key_func, self._reduce_func, self._window_size_func]
def _transformation_name(self):
return "tf.data.experimental.group_by_window()"
@tf_export("data.experimental.Reducer")
class Reducer(object):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
1) initialization function: key => initial state
2) reduce function: (old state, input) => new state
3) finalization function: state => result
"""
def __init__(self, init_func, reduce_func, finalize_func):
self._init_func = init_func
self._reduce_func = reduce_func
self._finalize_func = finalize_func
@property
def init_func(self):
return self._init_func
@property
def reduce_func(self):
return self._reduce_func
@property
def finalize_func(self):
return self._finalize_func
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/grouping.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
# TODO(b/73383364): Properly export in the `tf.data.experimental` API when
# stable or make private / remove.
class PrivateThreadPool(object):
"""A stateful resource that represents a private thread pool."""
def __init__(self, num_threads, display_name=None,
max_intra_op_parallelism=1):
"""Creates a `PrivateThreadPool` with the given number of threads."""
if context.executing_eagerly():
shared_name = _generate_shared_name("privatethreadpool")
self._resource = ged_ops.thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name,
shared_name=shared_name)
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device=context.context().device_name)
else:
self._resource = ged_ops.thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name)
class _ThreadPoolDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a custom threadpool."""
def __init__(self, input_dataset, thread_pool):
self._input_dataset = input_dataset
self._thread_pool = thread_pool
variant_tensor = ged_ops.thread_pool_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._thread_pool._resource, # pylint: disable=protected-access
**self._flat_structure)
super(_ThreadPoolDataset, self).__init__(input_dataset, variant_tensor)
# TODO(b/73383364): Properly export in the `tf.data.experimental` API when
# stable or make private / remove.
def override_threadpool(dataset, thread_pool):
"""Returns a new dataset that uses the given thread pool for its operations.
Args:
dataset: A `tf.data.Dataset` object.
thread_pool: A `PrivateThreadPool` object.
Returns:
A dataset containing the same values as `dataset`, but which uses
`thread_pool` to compute any of its parallel operations (such as
`tf.data.Dataset.map`).
"""
return _ThreadPoolDataset(dataset, thread_pool)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/threadpool.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `dataset` API for parsing example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.util.tf_export import tf_export
class _ParseExampleDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that parses `example` dataset into a `dict` dataset."""
def __init__(self, input_dataset, features, num_parallel_calls):
self._input_dataset = input_dataset
if not structure.are_compatible(
input_dataset.element_spec,
tensor_spec.TensorSpec([None], dtypes.string)):
raise TypeError("Input dataset should be a dataset of vectors of strings")
self._num_parallel_calls = num_parallel_calls
# pylint: disable=protected-access
self._features = parsing_ops._prepend_none_dimension(features)
# sparse_keys and dense_keys come back sorted here.
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = parsing_ops._features_to_raw_params(
self._features, [
parsing_ops.VarLenFeature, parsing_ops.SparseFeature,
parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature
])
# TODO(b/112859642): Pass sparse_index and sparse_values for SparseFeature.
(_, dense_defaults_vec, sparse_keys, sparse_types, dense_keys, dense_shapes,
dense_shape_as_shape) = parsing_ops._process_raw_parameters(
None, dense_defaults, sparse_keys, sparse_types, dense_keys,
dense_types, dense_shapes)
# pylint: enable=protected-access
self._sparse_keys = sparse_keys
self._sparse_types = sparse_types
self._dense_keys = dense_keys
self._dense_defaults = dense_defaults_vec
self._dense_shapes = dense_shapes
self._dense_types = dense_types
input_dataset_shape = dataset_ops.get_legacy_output_shapes(
self._input_dataset)
dense_output_shapes = [input_dataset_shape.concatenate(shape)
for shape in dense_shape_as_shape]
sparse_output_shapes = [input_dataset_shape.concatenate([None])
for _ in range(len(sparse_keys))]
output_shapes = dict(
zip(self._dense_keys + self._sparse_keys,
dense_output_shapes + sparse_output_shapes))
output_types = dict(
zip(self._dense_keys + self._sparse_keys,
self._dense_types + self._sparse_types))
output_classes = dict(
zip(self._dense_keys + self._sparse_keys,
[ops.Tensor for _ in range(len(self._dense_defaults))] +
[sparse_tensor.SparseTensor for _ in range(len(self._sparse_keys))
]))
self._element_spec = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
variant_tensor = (
gen_experimental_dataset_ops.parse_example_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_parallel_calls,
self._dense_defaults,
self._sparse_keys,
self._dense_keys,
self._sparse_types,
self._dense_shapes,
**self._flat_structure))
super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
# TODO(b/111553342): add arguments names and example names as well.
@tf_export("data.experimental.parse_example_dataset")
def parse_example_dataset(features, num_parallel_calls=1):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
if features is None:
raise ValueError("Missing: features was %s." % features)
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls)
if any(
isinstance(feature, parsing_ops.SparseFeature)
for _, feature in features.items()
):
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
out_dataset = out_dataset.map(
lambda x: parsing_ops._construct_sparse_tensors_for_sparse_features(
features, x), num_parallel_calls=num_parallel_calls)
return out_dataset
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/parsing_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.unique")
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _UniqueDataset(dataset)
return _apply_fn
class _UniqueDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` contains the unique elements from its input."""
def __init__(self, input_dataset):
"""See `unique()` for details."""
self._input_dataset = input_dataset
if dataset_ops.get_legacy_output_types(input_dataset) not in (
dtypes.int32, dtypes.int64, dtypes.string):
raise TypeError(
"`tf.data.experimental.unique()` only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component.")
variant_tensor = gen_experimental_dataset_ops.unique_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure)
super(_UniqueDataset, self).__init__(input_dataset, variant_tensor)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/unique.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for matching input filenames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class MatchingFilesDataset(dataset_ops.DatasetSource):
"""A `Dataset` that list the files according to the input patterns."""
def __init__(self, patterns):
self._patterns = ops.convert_to_tensor(
patterns, dtype=dtypes.string, name="patterns")
variant_tensor = ged_ops.matching_files_dataset(self._patterns)
super(MatchingFilesDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.string)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/matching_files.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets and Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.get_single_element")
def get_single_element(dataset):
"""Returns the single element in `dataset` as a nested structure of tensors.
This function enables you to use a `tf.data.Dataset` in a stateless
"tensor-in tensor-out" expression, without creating a
`tf.compat.v1.data.Iterator`.
This can be useful when your preprocessing transformations are expressed
as a `Dataset`, and you want to use the transformation at serving time.
For example:
```python
input_batch = tf.compat.v1.placeholder(tf.string, shape=[BATCH_SIZE])
def preprocessing_fn(input_str):
# ...
return image, label
dataset = (tf.data.Dataset.from_tensor_slices(input_batch)
.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
.batch(BATCH_SIZE))
image_batch, label_batch = tf.data.experimental.get_single_element(dataset)
```
Args:
dataset: A `tf.data.Dataset` object containing a single element.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
TypeError: if `dataset` is not a `tf.data.Dataset` object.
InvalidArgumentError (at runtime): if `dataset` does not contain exactly
one element.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
# pylint: disable=protected-access
return structure.from_compatible_tensor_list(
dataset.element_spec,
gen_dataset_ops.dataset_to_single_element(
dataset._variant_tensor, **dataset._flat_structure)) # pylint: disable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/get_single_element.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for optimizing `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
# TODO(jsimsa): Support RE matching for both individual transformation (e.g. to
# account for indexing) and transformation sequence.
def assert_next(transformations):
"""A transformation that asserts which transformations happen next.
Args:
transformations: A `tf.string` vector `tf.Tensor` identifying the
transformations that are expected to happen next.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _AssertNextDataset(dataset, transformations)
return _apply_fn
def model():
"""A transformation that models performance.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._ModelDataset(dataset) # pylint: disable=protected-access
return _apply_fn
def non_serializable():
"""A non-serializable identity transformation.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _NonSerializableDataset(dataset)
return _apply_fn
def optimize(optimizations=None):
"""A transformation that applies optimizations.
Args:
optimizations: (Optional.) A `tf.string` vector `tf.Tensor` identifying
optimizations to use. If not specified, the default set of optimizations
is applied.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._OptimizeDataset(dataset, optimizations) # pylint: disable=protected-access
return _apply_fn
class _AssertNextDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that asserts which transformations happen next."""
def __init__(self, input_dataset, transformations):
"""See `assert_next()` for details."""
self._input_dataset = input_dataset
if transformations is None:
raise ValueError("At least one transformation should be specified")
self._transformations = ops.convert_to_tensor(
transformations, dtype=dtypes.string, name="transformations")
variant_tensor = (
gen_experimental_dataset_ops.assert_next_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._transformations,
**self._flat_structure))
super(_AssertNextDataset, self).__init__(input_dataset, variant_tensor)
class _NonSerializableDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that performs non-serializable identity transformation."""
def __init__(self, input_dataset):
"""See `non_serializable()` for details."""
self._input_dataset = input_dataset
variant_tensor = (
gen_experimental_dataset_ops.non_serializable_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure))
super(_NonSerializableDataset, self).__init__(input_dataset, variant_tensor)
class _ChooseFastestDataset(dataset_ops.DatasetV2):
"""A `Dataset` that merges two input datasets."""
def __init__(self, datasets, num_experiments=10):
"""Chooses the fastest of some input datasets.
Given input datasets, produces elements as quickly as the fastest of the
inputs. Note that this dataset assumes that input datasets have the same
elements in the same order, though this is not enforced besides checking
that the input datasets have compatible output types, output shapes, and
cardinality at runtime. The resulting dataset produces elements that are
identical to the input elements, and in the same order.
Note that the time to first iteration is longer when this dataset is used
due to the overhead of dynamically picking the faster dataset. Namely,
for the first num_experiments iterations, this dataset will pull from all
of its inputs simultaneously in order to determine which input is the
fastest. For all subsequent iterations, that input will be used.
Args:
datasets: A list of `Datasets` that all have the same elements in the same
order.
num_experiments: The number of experiments to run before deciding which
dataset is fastest. In each "experiment" iteration, the dataset will
call from all its inputs simultaneously, and update its knowledge of
which input is the fastest.
Returns:
A `Dataset` that has the same elements the inputs.
"""
self._datasets = list(datasets)
self._element_spec = self._datasets[0].element_spec
variant_tensor = (
gen_experimental_dataset_ops.choose_fastest_dataset(
[dataset._variant_tensor for dataset in self._datasets], # pylint: disable=protected-access
num_experiments=num_experiments,
**self._flat_structure))
super(_ChooseFastestDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._datasets
@property
def element_spec(self):
return self._element_spec
class _ChooseFastestBranchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that merges two input datasets."""
def __init__(self,
input_dataset,
functions,
ratio_numerator=1,
ratio_denominator=1,
num_elements_per_branch=None):
"""Chooses the fastest of some dataset functions.
Given dataset functions that take input_dataset as input and output
another dataset, produces elements as quickly as the fastest of these
output datasets. Note that datasets in the dataset functions are assumed
to be stateless, and the iterators created by the functions' output datasets
will, given the same input elements, all produce the same output elements.
Datasets in the functions are also expected to iterate over the input
dataset at most once. The violation of these conditions may lead to
undefined behavior.
For example:
```python
dataset = tf.data.Dataset.range(100)
dataset = _ChooseFastestDataset(
dataset,
[
lambda ds: ds.map(lambda x: tf.reshape(x, [1])).batch(10),
lambda ds: ds.batch(10).map(lambda x: tf.reshape(x, [10, 1]))
],
ratio=10,
num_elements_per_branch=10
)
```
The resulting dataset will produce elements equivalent to
`tf.data.Dataset.range(100).map(lambda x: tf.reshape(x, [1])).batch(10)`, or
`tf.data.Dataset.range(100).batch(10).map(lambda x: tf.reshape(x, [10, 1]))`
Note that the first `num_elements_per_branch` iterations may be slower due
to the
overhead of dynamically picking the fastest dataset. Namely, for these
iterations, the dataset will produce elements from any of branches to
determine which input is the fastest. For all subsequent iterations, that
input will be used.
Args:
input_dataset: A `Dataset` that can be used as input to `functions`.
functions: A list of callables, each of which takes a `Dataset` as input
and returns a `Dataset`.
ratio_numerator: The numerator in the ratio of input elements consumed to
output elements produced for each function. This should be the same for
all functions. For example, if the function is
`lambda ds: ds.batch(10)`, the ratio is 10:1, i.e. the input dataset
must produce 10 elements for every element of the output dataset. In
this case, ratio_numerator should be 10.
ratio_denominator: The denominator in the ratio of input elements consumed
to output elements produced for each function. This should be the same
for all functions. For example, if the function is
`lambda ds: ds.batch(10)`, the ratio is 10:1, i.e. the input dataset
must produce 10 elements for every element of the output dataset. In
this case, ratio_denominator should be 1.
num_elements_per_branch: The number of elements to get from each branch
before deciding which dataset is fastest. In the first len(functions) *
num_elements_per_branch iterations, the dataset will call from one of
the branches, and update its knowledge of which input is the fastest.
Note that (num_elements_per_branch * ratio) is expected to be an
integer.
Returns:
A `Dataset` that has the same elements the inputs.
"""
input_structure = dataset_ops.DatasetSpec(input_dataset.element_spec)
self._funcs = [
dataset_ops.StructuredFunctionWrapper(
f, "ChooseFastestV2", input_structure=input_structure)
for f in functions
]
self._element_spec = self._funcs[0].output_structure._element_spec # pylint: disable=protected-access
self._captured_arguments = []
for f in self._funcs:
self._captured_arguments.extend(f.function.captured_inputs)
self._capture_lengths = [
len(f.function.captured_inputs) for f in self._funcs
]
if ratio_numerator <= 0 or ratio_denominator <= 0:
raise ValueError("ratio must be positive.")
if num_elements_per_branch is None:
# Pick a sensible default based on `ratio_denominator`
num_elements_per_branch = 10 * ratio_denominator
variant_tensor = (
gen_experimental_dataset_ops.choose_fastest_branch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
ratio_numerator=ratio_numerator,
ratio_denominator=ratio_denominator,
other_arguments=self._captured_arguments,
num_elements_per_branch=num_elements_per_branch,
branches=[f.function for f in self._funcs],
other_arguments_lengths=self._capture_lengths,
**self._flat_structure))
super(_ChooseFastestBranchDataset, self).__init__(input_dataset,
variant_tensor)
@property
def element_spec(self):
return self._element_spec
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/optimization.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Scan dataset transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
class _ScanDataset(dataset_ops.UnaryDataset):
"""A dataset that scans a function across its input."""
def __init__(self, input_dataset, initial_state, scan_func):
"""See `scan()` for details."""
self._input_dataset = input_dataset
self._initial_state = structure.normalize_element(initial_state)
# Compute initial values for the state classes, shapes and types based on
# the initial state. The shapes may be refined by running `tf_scan_func` one
# or more times below.
self._state_structure = structure.type_spec_from_value(self._initial_state)
# Iteratively rerun the scan function until reaching a fixed point on
# `self._state_shapes`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = dataset_ops.StructuredFunctionWrapper(
scan_func,
self._transformation_name(),
input_structure=(self._state_structure,
input_dataset.element_spec),
add_to_graph=False)
if not (isinstance(wrapped_func.output_types, collections_abc.Sequence)
and len(wrapped_func.output_types) == 2):
raise TypeError("The scan function must return a pair comprising the "
"new state and the output value.")
new_state_classes, self._output_classes = wrapped_func.output_classes
# Extract and validate class information from the returned values.
new_state_classes, output_classes = wrapped_func.output_classes
old_state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._state_structure)
for new_state_class, old_state_class in zip(
nest.flatten(new_state_classes),
nest.flatten(old_state_classes)):
if not issubclass(new_state_class, old_state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(old_state_classes, new_state_classes))
# Extract and validate type information from the returned values.
new_state_types, output_types = wrapped_func.output_types
old_state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._state_structure)
for new_state_type, old_state_type in zip(
nest.flatten(new_state_types), nest.flatten(old_state_types)):
if new_state_type != old_state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(old_state_types, new_state_types))
# Extract shape information from the returned values.
new_state_shapes, output_shapes = wrapped_func.output_shapes
old_state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._state_structure)
self._element_spec = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
flat_state_shapes = nest.flatten(old_state_shapes)
flat_new_state_shapes = nest.flatten(new_state_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# in this method.
self._state_structure = structure.convert_legacy_structure(
old_state_types,
nest.pack_sequence_as(old_state_shapes, weakened_state_shapes),
old_state_classes)
self._scan_func = wrapped_func
self._scan_func.function.add_to_graph(ops.get_default_graph())
# pylint: disable=protected-access
variant_tensor = gen_experimental_dataset_ops.scan_dataset(
self._input_dataset._variant_tensor,
structure.to_tensor_list(self._state_structure, self._initial_state),
self._scan_func.function.captured_inputs,
f=self._scan_func.function,
preserve_cardinality=True,
**self._flat_structure)
super(_ScanDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._scan_func]
@property
def element_spec(self):
return self._element_spec
def _transformation_name(self):
return "tf.data.experimental.scan()"
@tf_export("data.experimental.scan")
def scan(initial_state, scan_func):
"""A transformation that scans a function across an input dataset.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
Args:
initial_state: A nested structure of tensors, representing the initial state
of the accumulator.
scan_func: A function that maps `(old_state, input_element)` to
`(new_state, output_element). It must take two arguments and return a
pair of nested structures of tensors. The `new_state` must match the
structure of `initial_state`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _ScanDataset(dataset, initial_state, scan_func)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/scan_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental shuffle ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import random_seed
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
class _ShuffleAndRepeatDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that fuses `shuffle` and `repeat`."""
def __init__(self, input_dataset, buffer_size, count=None, seed=None):
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
self._seed, self._seed2 = random_seed.get_seed(seed)
variant_tensor = gen_dataset_ops.shuffle_and_repeat_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
count=self._count,
seed=self._seed,
seed2=self._seed2,
**self._flat_structure)
super(_ShuffleAndRepeatDataset, self).__init__(input_dataset,
variant_tensor)
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.shuffle(buffer_size, seed)` followed by "
"`tf.data.Dataset.repeat(count)`. Static tf.data optimizations will take "
"care of using the fused implementation.")
@tf_export("data.experimental.shuffle_and_repeat")
def shuffle_and_repeat(buffer_size, count=None, seed=None):
"""Shuffles and repeats a Dataset returning a new permutation for each epoch.
`dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size, count))`
is equivalent to
`dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat(count)`
The difference is that the latter dataset is not serializable. So,
if you need to checkpoint an input pipeline with reshuffling you must use
this implementation.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
maximum number elements that will be buffered when prefetching.
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior
(if `count` is `None` or `-1`) is for the dataset be repeated
indefinitely.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset): # pylint: disable=missing-docstring
return _ShuffleAndRepeatDataset(dataset, buffer_size, count, seed)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/shuffle_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for tf.data writers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.TFRecordWriter")
class TFRecordWriter(object):
"""Writes data to a TFRecord file.
To write a `dataset` to a single TFRecord file:
```python
dataset = ... # dataset to be written
writer = tf.data.experimental.TFRecordWriter(PATH)
writer.write(dataset)
```
To shard a `dataset` across multiple TFRecord files:
```python
dataset = ... # dataset to be written
def reduce_func(key, dataset):
filename = tf.strings.join([PATH_PREFIX, tf.strings.as_string(key)])
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(dataset.map(lambda _, x: x))
return tf.data.Dataset.from_tensors(filename)
dataset = dataset.enumerate()
dataset = dataset.apply(tf.data.experimental.group_by_window(
lambda i, _: i % NUM_SHARDS, reduce_func, tf.int64.max
))
```
"""
def __init__(self, filename, compression_type=None):
self._filename = ops.convert_to_tensor(
filename, dtypes.string, name="filename")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
def write(self, dataset):
"""Returns a `tf.Operation` to write a dataset to a file.
Args:
dataset: a `tf.data.Dataset` whose elements are to be written to a file
Returns:
A `tf.Operation` that, when run, writes contents of `dataset` to a file.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
if not dataset_ops.get_structure(dataset).is_compatible_with(
tensor_spec.TensorSpec([], dtypes.string)):
raise TypeError(
"`dataset` must produce scalar `DT_STRING` tensors whereas it "
"produces shape {0} and types {1}".format(
dataset_ops.get_legacy_output_shapes(dataset),
dataset_ops.get_legacy_output_types(dataset)))
return gen_experimental_dataset_ops.dataset_to_tf_record(
dataset._variant_tensor, self._filename, self._compression_type) # pylint: disable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/writers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for gathering statistics from `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(None, "Use `tf.data.experimental.StatsOptions`.")
def set_stats_aggregator(stats_aggregator, prefix="", counter_prefix=""):
"""Set the given `stats_aggregator` for aggregating the input dataset stats.
Args:
stats_aggregator: A `tf.data.experimental.StatsAggregator` object.
prefix: (Optional) String, all statistics recorded for the input `dataset`
will have given `prefix` prepend with the name.
counter_prefix: (Optional) String, all statistics recorded as `counters`
will have the given `prefix` for the counter. Defaults to "/tensorflow".
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset_ops._SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, stats_aggregator, prefix, counter_prefix)
return _apply_fn
@tf_export("data.experimental.bytes_produced_stats")
def bytes_produced_stats(tag):
"""Records the number of bytes produced by each element of the input dataset.
To consume the statistics, associate a `StatsAggregator` with the output
dataset.
Args:
tag: String. All statistics recorded by the returned transformation will
be associated with the given `tag`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _StatsDataset(
dataset, gen_experimental_dataset_ops.bytes_produced_stats_dataset, tag)
return _apply_fn
@tf_export("data.experimental.latency_stats")
def latency_stats(tag):
"""Records the latency of producing each element of the input dataset.
To consume the statistics, associate a `StatsAggregator` with the output
dataset.
Args:
tag: String. All statistics recorded by the returned transformation will
be associated with the given `tag`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _StatsDataset(
dataset, gen_experimental_dataset_ops.latency_stats_dataset, tag)
return _apply_fn
class _StatsDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and also records statistics."""
def __init__(self, input_dataset, op_function, tag):
self._input_dataset = input_dataset
self._op_function = op_function
self._tag = ops.convert_to_tensor(tag, dtype=dtypes.string)
variant_tensor = self._op_function(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._tag,
**self._flat_structure)
super(_StatsDataset, self).__init__(input_dataset, variant_tensor)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/stats_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.make_saveable_from_iterator")
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restoring iterator state using Saver.
Args:
iterator: Iterator.
Returns:
A SaveableObject for saving/restoring iterator state using Saver.
Raises:
ValueError: If iterator does not support checkpointing.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.compat.v1.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return _Saveable(iterator._iterator_resource) # pylint: disable=protected-access
class _Saveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(self, iterator_resource):
serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource)
specs = [
saver_lib.BaseSaverBuilder.SaveSpec(serialized_iterator, "",
iterator_resource.name + "-state")
]
super(_Saveable, self).__init__(iterator_resource, specs,
iterator_resource.name)
def restore(self, restored_tensors, unused_restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])
@tf_export("data.experimental.CheckpointInputPipelineHook")
class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.data.experimental.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
def __init__(self, estimator):
"""Initializes a `CheckpointInputPipelineHook`.
Args:
estimator: Estimator.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
"""
# `checkpoint_basename` is "input.ckpt" for non-distributed pipelines or
# of the form "input_<task_type>_<task_id>.ckpt" for distributed pipelines.
# Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is
# "model.ckpt". We intentionally choose the input pipeline checkpoint prefix
# to be different to avoid conflicts with the model checkpoint.
# pylint: disable=protected-access
checkpoint_prefix = "input"
if estimator._config.num_worker_replicas > 1:
# Distributed setting.
suffix = "_{}_{}".format(estimator._config.task_type,
estimator._config.task_id)
checkpoint_prefix += suffix
# pylint: enable=protected-access
# We use a composition paradigm instead of inheriting from
# `CheckpointSaverHook` because `Estimator` does an `isinstance` check
# to check whether a `CheckpointSaverHook` is already present in the list
# of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`
# would thwart this behavior. This hook checkpoints *only the iterators*
# and not the graph variables.
self._checkpoint_saver_hook = basic_session_run_hooks.CheckpointSaverHook(
estimator.model_dir,
save_secs=estimator._config.save_checkpoints_secs, # pylint: disable=protected-access
save_steps=estimator._config.save_checkpoints_steps, # pylint: disable=protected-access
checkpoint_basename=checkpoint_prefix + ".ckpt")
# Name for the protocol buffer file that will contain the list of most
# recent checkpoints stored as a `CheckpointState` protocol buffer.
# This file, kept in the same directory as the checkpoint files, is
# automatically managed by the `Saver` to keep track of recent checkpoints.
# The default name used by the `Saver` for this file is "checkpoint". Here
# we use the name "checkpoint_<checkpoint_prefix>" so that in case the
# `checkpoint_dir` is the same as the model checkpoint directory, there are
# no conflicts during restore.
self._latest_filename = "checkpoint_" + checkpoint_prefix
self._first_run = True
def begin(self):
# Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`
# collection if no `Saver` or `Scaffold` is provided.
# pylint: disable=protected-access
if (self._checkpoint_saver_hook._saver is None and
self._checkpoint_saver_hook._scaffold is None):
iterators = ops.get_collection(iterator_ops.GLOBAL_ITERATORS)
saveables = [_Saveable(i) for i in iterators]
self._checkpoint_saver_hook._saver = _CustomSaver(saveables,
self._latest_filename)
# pylint: enable=protected-access
self._checkpoint_saver_hook.begin()
def _restore_or_save_initial_ckpt(self, session):
# Ideally this should be run in after_create_session but is not for the
# following reason:
# Currently there is no way of enforcing an order of running the
# `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`
# is run *after* this hook. That is troublesome because
# 1. If a checkpoint exists and this hook restores it, the initializer hook
# will override it.
# 2. If no checkpoint exists, this hook will try to save an uninitialized
# iterator which will result in an exception.
#
# As a temporary fix we enter the following implicit contract between this
# hook and the _DatasetInitializerHook.
# 1. The _DatasetInitializerHook initializes the iterator in the call to
# after_create_session.
# 2. This hook saves the iterator on the first call to `before_run()`, which
# is guaranteed to happen after `after_create_session()` of all hooks
# have been run.
# Check if there is an existing checkpoint. If so, restore from it.
# pylint: disable=protected-access
latest_checkpoint_path = checkpoint_management.latest_checkpoint(
self._checkpoint_saver_hook._checkpoint_dir,
latest_filename=self._latest_filename)
if latest_checkpoint_path:
self._checkpoint_saver_hook._get_saver().restore(session,
latest_checkpoint_path)
else:
# The checkpoint saved here is the state at step "global_step".
# Note: We do not save the GraphDef or MetaGraphDef here.
global_step = session.run(self._checkpoint_saver_hook._global_step_tensor)
self._checkpoint_saver_hook._save(session, global_step)
self._checkpoint_saver_hook._timer.update_last_triggered_step(global_step)
# pylint: enable=protected-access
def before_run(self, run_context):
if self._first_run:
self._restore_or_save_initial_ckpt(run_context.session)
self._first_run = False
return self._checkpoint_saver_hook.before_run(run_context)
def after_run(self, run_context, run_values):
self._checkpoint_saver_hook.after_run(run_context, run_values)
def end(self, session):
self._checkpoint_saver_hook.end(session)
class _CustomSaver(saver_lib.Saver):
"""`Saver` with a different default `latest_filename`.
This is used in the `CheckpointInputPipelineHook` to avoid conflicts with
the model ckpt saved by the `CheckpointSaverHook`.
"""
def __init__(self, var_list, latest_filename):
super(_CustomSaver, self).__init__(var_list)
self._latest_filename = latest_filename
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
return super(_CustomSaver, self).save(
sess, save_path, global_step, latest_filename or self._latest_filename,
meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/iterator_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for optimizing `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
def map_defun(fn,
elems,
output_dtypes,
output_shapes,
max_intra_op_parallelism=1):
"""Map a function on the list of tensors unpacked from `elems` on dimension 0.
Args:
fn: A function (`function.defun`) that takes a list of tensors and returns
another list of tensors. The output list has the same types as
output_dtypes. The elements of the output list have the same dimension 0
as `elems`, and the remaining dimensions correspond to those of
`fn_output_shapes`.
elems: A list of tensors.
output_dtypes: A list of dtypes corresponding to the output types of the
function.
output_shapes: A list of `TensorShape`s corresponding to the output shapes
from each invocation of the function on slices of inputs.
max_intra_op_parallelism: An integer. If positive, sets the max parallelism
limit of each function call to this.
Raises:
ValueError: if any of the inputs are malformed.
Returns:
A list of `Tensor` objects with the same types as `output_dtypes`.
"""
if not isinstance(elems, list):
raise ValueError("`elems` must be a list of tensors.")
if not isinstance(output_dtypes, list):
raise ValueError("`output_dtypes` must be a list of `tf.DType` objects.")
if not isinstance(output_shapes, list):
raise ValueError("`output_shapes` must be a list of `tf.TensorShape` "
"objects.")
concrete_fn = fn._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(shivaniagrawal/rachelim): what about functions created without
# input_signature.
elems = [ops.convert_to_tensor(e) for e in elems]
output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]
return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs,
output_dtypes, output_shapes, concrete_fn,
max_intra_op_parallelism)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/map_defun.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling optimizations in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.MapVectorizationOptions")
class MapVectorizationOptions(options.OptionsBase):
"""Represents options for the MapVectorization optimization."""
# TODO(rachelim): Other configuration parameters can go here, for example,
# how many "experiments" to run with ChooseFastestBranchDataset.
enabled = options.create_option(
name="enabled",
ty=bool,
docstring=
"Whether to vectorize map transformations. If None, defaults to False."
)
use_choose_fastest = options.create_option(
name="use_choose_fastest",
ty=bool,
docstring="Whether to use ChooseFastestBranchDataset with this "
"transformation. If True, the pipeline picks between the vectorized and "
"original segment at runtime based on their iterations speed. If None, "
"defaults to False.")
def _static_optimizations(self):
if self.enabled:
return ["map_vectorization"]
return []
def _static_optimization_configs(self):
if self.use_choose_fastest:
return ["map_vectorization:use_choose_fastest:true"]
else:
return ["map_vectorization:use_choose_fastest:false"]
@tf_export("data.experimental.OptimizationOptions")
class OptimizationOptions(options.OptionsBase):
"""Represents options for dataset optimizations.
You can set the optimization options of a dataset through the
`experimental_optimization` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.OptimizationOptions`.
```python
options = tf.data.Options()
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_vectorization.enabled = True
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
```
"""
apply_default_optimizations = options.create_option(
name="apply_default_optimizations",
ty=bool,
docstring=
"Whether to apply default static optimizations. If False, only static "
"optimizations that have been explicitly enabled will be applied.")
autotune = options.create_option(
name="autotune",
ty=bool,
docstring=
"Whether to automatically tune performance knobs. If None, defaults to "
"True.")
autotune_algorithm = options.create_option(
name="autotune_algorithm",
ty=int,
docstring=
"When autotuning is enabled (through `autotune`), identifies the "
"algorithm to use for the autotuning optimization.")
autotune_buffers = options.create_option(
name="autotune_buffers",
ty=bool,
docstring=
"When autotuning is enabled (through `autotune`), determines whether to "
"also autotune buffer sizes for datasets with parallelism. If None,"
" defaults to False.")
autotune_cpu_budget = options.create_option(
name="autotune_cpu_budget",
ty=int,
docstring=
"When autotuning is enabled (through `autotune`), determines the CPU "
"budget to use. Values greater than the number of schedulable CPU cores "
"are allowed but may result in CPU contention. If None, defaults to the "
"number of schedulable CPU cores.")
filter_fusion = options.create_option(
name="filter_fusion",
ty=bool,
docstring=
"Whether to fuse filter transformations. If None, defaults to False.")
filter_with_random_uniform_fusion = options.create_option(
name="filter_with_random_uniform_fusion",
ty=bool,
docstring=
"Whether to fuse filter dataset that predicts random_uniform < rate into "
"a sampling dataset. If None, defaults to False.")
hoist_random_uniform = options.create_option(
name="hoist_random_uniform",
ty=bool,
docstring=
"Whether to hoist `tf.random_uniform()` ops out of map transformations. "
"If None, defaults to False.")
map_and_batch_fusion = options.create_option(
name="map_and_batch_fusion",
ty=bool,
docstring=
"Whether to fuse map and batch transformations. If None, defaults to "
"True.")
map_and_filter_fusion = options.create_option(
name="map_and_filter_fusion",
ty=bool,
docstring=
"Whether to fuse map and filter transformations. If None, defaults to "
"False.")
map_fusion = options.create_option(
name="map_fusion",
ty=bool,
docstring="Whether to fuse map transformations. If None, defaults to "
"False.")
map_parallelization = options.create_option(
name="map_parallelization",
ty=bool,
docstring=
"Whether to parallelize stateless map transformations. If None, defaults "
"to False.")
map_vectorization = options.create_option(
name="map_vectorization",
ty=MapVectorizationOptions,
docstring=
"The map vectorization options associated with the dataset. See "
"`tf.data.experimental.MapVectorizationOptions` for more details.",
default_factory=MapVectorizationOptions)
noop_elimination = options.create_option(
name="noop_elimination",
ty=bool,
docstring=
"Whether to eliminate no-op transformations. If None, defaults to True.")
parallel_batch = options.create_option(
name="parallel_batch",
ty=bool,
docstring="Whether to parallelize copying of batch elements. If None, "
"defaults to False.")
prefetch_to_device = options.create_option(
name="prefetch_to_device",
ty=str,
docstring="Whether to prefetch the next batch on device. Defaults to "
"`/gpu:0`, deactivate by setting to None.",
default_factory=lambda: "/gpu:0"
)
shuffle_and_repeat_fusion = options.create_option(
name="shuffle_and_repeat_fusion",
ty=bool,
docstring="Whether to fuse shuffle and repeat transformations. If None, "
"defaults to True.")
def _static_optimizations(self):
"""Produces the list of enabled static optimizations."""
result = set()
all_optimizations = [
"filter_fusion",
"filter_with_random_uniform_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_parallelization",
"map_fusion",
"noop_elimination",
"parallel_batch",
"shuffle_and_repeat_fusion",
]
for optimization in all_optimizations:
if getattr(self, optimization):
result.add(optimization)
if self.apply_default_optimizations is not False:
# The following optimizations are turned on by default, unless the user
# explicitly disables them.
optimizations_to_disable = [
"map_and_batch_fusion",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
for optimization in optimizations_to_disable:
if getattr(self, optimization) is not False:
result.add(optimization)
if self.map_vectorization is not None:
result.update(self.map_vectorization._static_optimizations()) # pylint: disable=protected-access
if self.autotune is not False and self.autotune_buffers: # pylint: disable=g-bool-id-comparison
result.add("inject_prefetch")
return sorted(list(result))
def _static_optimization_configs(self):
if self.map_vectorization is not None:
return self.map_vectorization._static_optimization_configs() # pylint: disable=protected-access
return []
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/optimization_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, "
"num_parallel_calls=tf.data.experimental.AUTOTUNE)` instead. If sloppy "
"execution is desired, use `tf.data.Options.experimental_determinstic`.")
@tf_export("data.experimental.parallel_interleave")
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
dataset, map_func, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements)
return _apply_fn
class _DirectedInterleaveDataset(dataset_ops.Dataset):
"""A substitute for `Dataset.interleave()` on a fixed list of datasets."""
def __init__(self, selector_input, data_inputs):
self._selector_input = selector_input
self._data_inputs = list(data_inputs)
first_output_types = dataset_ops.get_legacy_output_types(data_inputs[0])
first_output_classes = dataset_ops.get_legacy_output_classes(data_inputs[0])
for data_input in data_inputs[1:]:
if (dataset_ops.get_legacy_output_types(data_input) != first_output_types
or dataset_ops.get_legacy_output_classes(data_input)
!= first_output_classes):
raise TypeError("All datasets must have the same type and class.")
output_shapes = dataset_ops.get_legacy_output_shapes(self._data_inputs[0])
for data_input in self._data_inputs[1:]:
output_shapes = nest.pack_sequence_as(output_shapes, [
ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip(
nest.flatten(output_shapes),
nest.flatten(dataset_ops.get_legacy_output_shapes(data_input)))
])
self._element_spec = structure.convert_legacy_structure(
first_output_types, output_shapes, first_output_classes)
super(_DirectedInterleaveDataset, self).__init__()
def _as_variant_tensor(self):
# pylint: disable=protected-access
return (
gen_experimental_dataset_ops.directed_interleave_dataset(
self._selector_input._variant_tensor,
[data_input._variant_tensor for data_input in self._data_inputs],
**self._flat_structure))
def _inputs(self):
return [self._selector_input] + self._data_inputs
@property
def element_spec(self):
return self._element_spec
@tf_export("data.experimental.sample_from_datasets", v1=[])
def sample_from_datasets_v2(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
`weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError: If the `weights` argument is specified and does not match the
length of the `datasets` element.
"""
num_datasets = len(datasets)
if not isinstance(weights, dataset_ops.DatasetV2):
if weights is None:
# Select inputs with uniform probability.
logits = [[1.0] * num_datasets]
else:
# Use the given `weights` as the probability of choosing the respective
# input.
weights = ops.convert_to_tensor(weights, name="weights")
if weights.dtype not in (dtypes.float32, dtypes.float64):
raise TypeError("`weights` must be convertible to a tensor of "
"`tf.float32` or `tf.float64` elements.")
if not weights.shape.is_compatible_with([num_datasets]):
raise ValueError(
"`weights` must be a vector of length `len(datasets)`.")
# The `stateless_multinomial()` op expects log-probabilities, as opposed
# to weights.
logits = array_ops.expand_dims(math_ops.log(weights, name="logits"), 0)
# NOTE(mrry): We only specialize when `weights` is not a `Dataset`. When it
# is a `Dataset`, it is possible that evaluating it has a side effect the
# user depends on.
if len(datasets) == 1:
return datasets[0]
def select_dataset_constant_logits(seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
selector_input = dataset_ops.MapDataset(
random_ops.RandomDataset(seed).batch(2),
select_dataset_constant_logits,
use_inter_op_parallelism=False)
else:
# Use each element of the given `weights` dataset as the probability of
# choosing the respective input.
# The `stateless_multinomial()` op expects log-probabilities, as opposed to
# weights.
logits_ds = weights.map(lambda *p: math_ops.log(p, name="logits"))
def select_dataset_varying_logits(logits, seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
logits_and_seeds = dataset_ops.Dataset.zip(
(logits_ds, random_ops.RandomDataset(seed).batch(2)))
selector_input = dataset_ops.MapDataset(
logits_and_seeds,
select_dataset_varying_logits,
use_inter_op_parallelism=False)
return _DirectedInterleaveDataset(selector_input, datasets)
@tf_export(v1=["data.experimental.sample_from_datasets"])
def sample_from_datasets_v1(datasets, weights=None, seed=None):
return dataset_ops.DatasetV1Adapter(
sample_from_datasets_v2(datasets, weights, seed))
sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__
@tf_export("data.experimental.choose_from_datasets", v1=[])
def choose_from_datasets_v2(datasets, choice_dataset):
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
A dataset that interleaves elements from `datasets` according to the values
of `choice_dataset`.
Raises:
TypeError: If the `datasets` or `choice_dataset` arguments have the wrong
type.
"""
if not structure.are_compatible(choice_dataset.element_spec,
tensor_spec.TensorSpec([], dtypes.int64)):
raise TypeError("`choice_dataset` must be a dataset of scalar "
"`tf.int64` tensors.")
return _DirectedInterleaveDataset(choice_dataset, datasets)
@tf_export(v1=["data.experimental.choose_from_datasets"])
def choose_from_datasets_v1(datasets, choice_dataset):
return dataset_ops.DatasetV1Adapter(
choose_from_datasets_v2(datasets, choice_dataset))
choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
choose_from_datasets = choose_from_datasets_v1
sample_from_datasets = sample_from_datasets_v1
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/interleave_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(None, "Use `tf.data.Dataset.enumerate()")
@tf_export("data.experimental.enumerate_dataset")
def enumerate_dataset(start=0):
"""A transformation that enumerates the elements of a dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.data.experimental.enumerate_dataset(start=5))
=> { (5, 1), (6, 2), (7, 3) }
b.apply(tf.data.experimental.enumerate_dataset())
=> { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.enumerate(start)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/enumerate_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for manually injecting delays into `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
class _SleepDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that sleeps before producing each upstream element."""
def __init__(self, input_dataset, sleep_microseconds):
self._input_dataset = input_dataset
self._sleep_microseconds = sleep_microseconds
variant_tensor = gen_experimental_dataset_ops.sleep_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._sleep_microseconds,
**self._flat_structure)
super(_SleepDataset, self).__init__(input_dataset, variant_tensor)
def sleep(sleep_microseconds):
"""Sleeps for `sleep_microseconds` before producing each input element.
Args:
sleep_microseconds: The number of microseconds to sleep before producing an
input element.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SleepDataset(dataset, sleep_microseconds)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/sleep.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignore_errors dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.ignore_errors")
def ignore_errors():
"""Creates a `Dataset` from another `Dataset` and silently ignores any errors.
Use this transformation to produce a dataset that contains the same elements
as the input, but silently drops any elements that caused an error. For
example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
InvalidArgumentError.
dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
# Using `ignore_errors()` will drop the element that causes an error.
dataset =
dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _IgnoreErrorsDataset(dataset)
return _apply_fn
class _IgnoreErrorsDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that silently ignores errors when computing its input."""
def __init__(self, input_dataset):
"""See `Dataset.ignore_errors()` for details."""
self._input_dataset = input_dataset
variant_tensor = (
gen_experimental_dataset_ops.ignore_errors_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure))
super(_IgnoreErrorsDataset, self).__init__(input_dataset, variant_tensor)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/error_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StatsOptions to configure stats aggregation options for `tf.data` pipelines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.StatsOptions")
class StatsOptions(options.OptionsBase):
"""Represents options for collecting dataset stats using `StatsAggregator`.
You can set the stats options of a dataset through the `experimental_stats`
property of `tf.data.Options`; the property is an instance of
`tf.data.experimental.StatsOptions`. For example, to collect latency stats
on all dataset edges, use the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
options.experimental_stats.latency_all_edges = True
dataset = dataset.with_options(options)
```
"""
aggregator = options.create_option(
name="aggregator",
ty=(stats_aggregator.StatsAggregatorV2,
stats_aggregator.StatsAggregatorV1),
docstring=
"Associates the given statistics aggregator with the dataset pipeline.")
prefix = options.create_option(
name="prefix",
ty=str,
docstring=
"Prefix to prepend all statistics recorded for the input `dataset` with.",
default_factory=lambda: "")
counter_prefix = options.create_option(
name="counter_prefix",
ty=str,
docstring="Prefix for the statistics recorded as counter.",
default_factory=lambda: "")
latency_all_edges = options.create_option(
name="latency_all_edges",
ty=bool,
docstring=
"Whether to add latency measurements on all edges. Defaults to False.")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/stats_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets for random number generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import random_seed
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.RandomDataset", v1=[])
class RandomDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` of pseudorandom values."""
def __init__(self, seed=None):
"""A `Dataset` of pseudorandom values."""
self._seed, self._seed2 = random_seed.get_seed(seed)
variant_tensor = gen_experimental_dataset_ops.random_dataset(
seed=self._seed, seed2=self._seed2, **self._flat_structure)
super(RandomDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.int64)
@tf_export(v1=["data.experimental.RandomDataset"])
class RandomDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` of pseudorandom values."""
@functools.wraps(RandomDatasetV2.__init__)
def __init__(self, seed=None):
wrapped = RandomDatasetV2(seed)
super(RandomDatasetV1, self).__init__(wrapped)
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
RandomDataset = RandomDatasetV1
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/random_ops.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""take-while dataset transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
class _TakeWhileDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A dataset that stops iteration when `predicate` returns false."""
def __init__(self, input_dataset, predicate):
"""See `take_while()` for details."""
self._input_dataset = input_dataset
wrapped_func = dataset_ops.StructuredFunctionWrapper(
predicate,
"tf.data.experimental.take_while()",
dataset=self._input_dataset)
if not wrapped_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.bool)):
raise ValueError("`predicate` must return a scalar boolean tensor.")
self._predicate = wrapped_func
var_tensor = gen_experimental_dataset_ops.take_while_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**self._flat_structure)
super(_TakeWhileDataset, self).__init__(input_dataset, var_tensor)
def _functions(self):
return [self._predicate]
@tf_export("data.experimental.take_while")
def take_while(predicate):
"""A transformation that stops dataset iteration based on a `predicate`.
Args:
predicate: A function that maps a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _TakeWhileDataset(dataset, predicate)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/take_while_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Counter Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.Counter", v1=[])
def CounterV2(start=0, step=1, dtype=dtypes.int64):
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
For example:
```python
Dataset.count() == [0, 1, 2, ...)
Dataset.count(2) == [2, 3, ...)
Dataset.count(2, 5) == [2, 7, 12, ...)
Dataset.count(0, -1) == [0, -1, -2, ...)
Dataset.count(10, -1) == [10, 9, ...)
```
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
with ops.name_scope("counter"):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
step = ops.convert_to_tensor(step, dtype=dtype, name="step")
return dataset_ops.Dataset.from_tensors(0).repeat(None).apply(
scan_ops.scan(start, lambda state, _: (state + step, state)))
@tf_export(v1=["data.experimental.Counter"])
def CounterV1(start=0, step=1, dtype=dtypes.int64):
return dataset_ops.DatasetV1Adapter(CounterV2(start, step, dtype))
CounterV1.__doc__ = CounterV2.__doc__
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
Counter = CounterV1 # pylint: disable=invalid-name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/counter.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset snapshot and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
COMPRESSION_GZIP = "GZIP"
COMPRESSION_NONE = None
class _SnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A Dataset that captures a snapshot or reads from a snapshot."""
def __init__(self,
input_dataset,
path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None):
self._compression = compression if compression is not None else ""
self._reader_path_prefix = (
reader_path_prefix if reader_path_prefix is not None else "")
self._writer_path_prefix = (
writer_path_prefix if writer_path_prefix is not None else "")
self._shard_size_bytes = (
shard_size_bytes if shard_size_bytes is not None else -1)
self._pending_snapshot_expiry_seconds = (
pending_snapshot_expiry_seconds
if pending_snapshot_expiry_seconds is not None else -1)
self._num_reader_threads = (
num_reader_threads if num_reader_threads is not None else -1)
self._reader_buffer_size = (
reader_buffer_size if reader_buffer_size is not None else -1)
self._num_writer_threads = (
num_writer_threads if num_writer_threads is not None else -1)
self._writer_buffer_size = (
writer_buffer_size if writer_buffer_size is not None else -1)
self._input_dataset = input_dataset
self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
variant_tensor = ged_ops.snapshot_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
path=self._path,
compression=self._compression,
reader_path_prefix=self._reader_path_prefix,
writer_path_prefix=self._writer_path_prefix,
shard_size_bytes=self._shard_size_bytes,
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
num_reader_threads=self._num_reader_threads,
reader_buffer_size=self._reader_buffer_size,
num_writer_threads=self._num_writer_threads,
writer_buffer_size=self._writer_buffer_size,
**self._flat_structure)
super(_SnapshotDataset, self).__init__(input_dataset, variant_tensor)
def snapshot(path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None):
"""Writes to/reads from a snapshot of a dataset.
This function attempts to determine whether a valid snapshot exists at the
`path`, and reads from the snapshot if so. If not, it will run the
preprocessing pipeline as usual, and write out a snapshot of the data
processed for future use.
Args:
path: A directory where we want to save our snapshots and/or read from a
previously saved snapshot.
compression: The type of compression to apply to the Dataset. Currently
supports "GZIP" or None. Defaults to None (no compression).
reader_path_prefix: A prefix to add to the path when reading from snapshots.
Defaults to None.
writer_path_prefix: A prefix to add to the path when writing to snapshots.
Defaults to None.
shard_size_bytes: The size of each shard to be written by the snapshot
dataset op. Defaults to 10 GiB.
pending_snapshot_expiry_seconds: How long to wait (in seconds) before
the snapshot op considers a previously unfinished snapshot to be stale.
num_reader_threads: Number of threads to parallelize reading from snapshot.
Especially useful if compression is turned on since the decompression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are
read from the snapshot are different from the order they're written.
reader_buffer_size: Maximum number of elements we can prefetch reading from
the snapshot. Defaults to 1. Increasing this might improve performance
but will increase memory consumption.
num_writer_threads: Number of threads to parallelize writing from snapshot.
We'll open up `num_writer_threads` files and write to them in parallel.
Especially useful if compression is turned on since the compression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are
read from the upstream iterator are different from the order they're
written.
writer_buffer_size: Maximum number of pipeline elements to fill up the
buffer before writing them out using `num_writer_threads`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SnapshotDataset(dataset, path, compression, reader_path_prefix,
writer_path_prefix, shard_size_bytes,
pending_snapshot_expiry_seconds, num_reader_threads,
reader_buffer_size, num_writer_threads,
writer_buffer_size)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/experimental/ops/snapshot.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for tf.data options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _internal_attr_name(name):
return "_" + name
class OptionsBase(object):
"""Base class for representing a set of tf.data options.
Attributes:
_options: Stores the option values.
"""
def __init__(self):
# NOTE: Cannot use `self._options` here as we override `__setattr__`
object.__setattr__(self, "_options", {})
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
for name in set(self._options) | set(other._options): # pylint: disable=protected-access
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
else:
return NotImplemented
def __setattr__(self, name, value):
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError(
"Cannot set the property %s on %s." % (name, type(self).__name__))
def create_option(name, ty, docstring, default_factory=lambda: None):
"""Creates a type-checked property.
Args:
name: The name to use.
ty: The type to use. The type of the property will be validated when it
is set.
docstring: The docstring to use.
default_factory: A callable that takes no arguments and returns a default
value to use if not set.
Returns:
A type-checked property.
"""
def get_fn(option):
# pylint: disable=protected-access
if name not in option._options:
option._options[name] = default_factory()
return option._options.get(name)
def set_fn(option, value):
if not isinstance(value, (ty, type(None))):
raise TypeError("Property \"%s\" must be of type %s, got: %r (type: %r)" %
(name, ty, value, type(value)))
option._options[name] = value # pylint: disable=protected-access
return property(get_fn, set_fn, None, docstring)
def merge_options(*options_list):
"""Merges the given options, returning the result as a new options object.
The input arguments are expected to have a matching type that derives from
`OptionsBase` (and thus each represent a set of options). The method outputs
an object of the same type created by merging the sets of options represented
by the input arguments.
The sets of options can be merged as long as there does not exist an option
with different non-default values.
If an option is an instance of `OptionsBase` itself, then this method is
applied recursively to the set of options represented by this option.
Args:
*options_list: options to merge
Raises:
TypeError: if the input arguments are incompatible or not derived from
`OptionsBase`
ValueError: if the given options cannot be merged
Returns:
A new options object which is the result of merging the given options.
"""
if len(options_list) < 1:
raise ValueError("At least one options should be provided")
result_type = type(options_list[0])
for options in options_list:
if not isinstance(options, result_type):
raise TypeError("Incompatible options type: %r vs %r" % (type(options),
result_type))
if not isinstance(options_list[0], OptionsBase):
raise TypeError("The inputs should inherit from `OptionsBase`")
default_options = result_type()
result = result_type()
for options in options_list:
# Iterate over all set options and merge the into the result.
for name in options._options: # pylint: disable=protected-access
this = getattr(result, name)
that = getattr(options, name)
default = getattr(default_options, name)
if that == default:
continue
elif this == default:
setattr(result, name, that)
elif isinstance(this, OptionsBase):
setattr(result, name, merge_options(this, that))
elif this != that:
raise ValueError(
"Cannot merge incompatible values (%r and %r) of option: %s" %
(this, that, name))
return result
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It removes support for lists as a level of nesting in nested structures.
2. It adds support for `SparseTensorValue` as an atomic element.
The motivation for this change is twofold:
1. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
2. This is needed because `SparseTensorValue` is implemented as a `namedtuple`
that would normally be flattened and we want to be able to create sparse
tensor from `SparseTensorValue's similarly to creating tensors from numpy
arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
from tensorflow.python.util.compat import collections_abc as _collections_abc
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(list(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)((key, result[key]) for key in instance)
elif (isinstance(instance, tuple) and hasattr(instance, "_fields") and
isinstance(instance._fields, _collections_abc.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
if isinstance(iterable, dict):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif isinstance(iterable, _sparse_tensor.SparseTensorValue):
yield iterable
else:
for value in iterable:
yield value
# See the swig file (../../util/util.i) for documentation.
is_sequence = _pywrap_tensorflow.IsSequenceForData
# See the swig file (../../util/util.i) for documentation.
flatten = _pywrap_tensorflow.FlattenForData
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences should be same as
well. For dictionary, "type" of dictionary is considered to include its
keys. In other words, two dictionaries with different keys are considered
to have a different "type". If set to `False`, two iterables are
considered same as long as they yield the elements that have same
structures.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
_pywrap_tensorflow.AssertSameStructureForData(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
if check_types and isinstance(shallow_tree, dict):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
"structure has keys %s, while shallow structure has keys %s." %
(list(input_tree), list(shallow_tree)))
input_tree = list(sorted(_six.iteritems(input_tree)))
shallow_tree = list(sorted(_six.iteritems(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function, therefore, will return something with the same base structure
as `shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/nest.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import random_seed as data_random_seed
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
zero_t = constant_op.constant(0, dtype=dtypes.int64, name='zero')
one_t = constant_op.constant(1, dtype=dtypes.int64, name='one')
intmax_t = constant_op.constant(
2**31 - 1, dtype=dtypes.int64, name='intmax')
test_cases = [
# Each test case is a tuple with input to get_seed:
# (input_graph_seed, input_op_seed)
# and output from get_seed:
# (output_graph_seed, output_op_seed)
((None, None), (0, 0)),
((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, 1), (1, 1)),
((0, 0), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, 0), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, 2**31 - 1), (0, 2**31 - 1)), # Wrapping for the other argument
# Once more, with tensor-valued arguments
((None, one_t), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, one_t), (1, 1)),
((0, zero_t), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, zero_t), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, intmax_t), (0, 2**31 - 1)), # Wrapping for the other argument
]
for tc in test_cases:
tinput, toutput = tc[0], tc[1]
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = 'test_case = {0}, got {1}, want {2}'.format(
tinput, (g_seed, op_seed), toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if not context.executing_eagerly():
random_seed.set_random_seed(1)
tinput = (1, None)
toutput = (1, ops.get_default_graph()._last_id) # pylint: disable=protected-access
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = 'test_case = {0}, got {1}, want {2}'.format(1, (g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/random_seed_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities for traversing the dataset construction graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import traverse
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class _TestDataset(dataset_ops.UnaryUnchangedStructureDataset):
def __init__(self, input_dataset):
self._input_dataset = input_dataset
temp_variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor,
buffer_size=1,
**self._flat_structure)
variant_tensor = gen_dataset_ops.model_dataset(
temp_variant_tensor, **self._flat_structure)
super(_TestDataset, self).__init__(input_dataset, variant_tensor)
class TraverseTest(test.TestCase):
@test_util.run_deprecated_v1
def testOnlySource(self):
ds = dataset_ops.Dataset.range(10)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertAllEqual(["RangeDataset"], [x.name for x in variant_tensor_ops])
@test_util.run_deprecated_v1
def testSimplePipeline(self):
ds = dataset_ops.Dataset.range(10).map(math_ops.square)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["MapDataset", "RangeDataset"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testConcat(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = ds1.concatenate(ds2)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ConcatenateDataset", "RangeDataset", "RangeDataset_1"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testZip(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = dataset_ops.Dataset.zip((ds1, ds2))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ZipDataset", "RangeDataset", "RangeDataset_1"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testMultipleVariantTensors(self):
ds = dataset_ops.Dataset.range(10)
ds = _TestDataset(ds)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["RangeDataset", "ModelDataset", "PrefetchDataset"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testFlatMap(self):
ds1 = dataset_ops.Dataset.range(10).repeat(10)
def map_fn(ds):
def _map(x):
return ds.batch(x)
return _map
ds2 = dataset_ops.Dataset.range(20).prefetch(1)
ds2 = ds2.flat_map(map_fn(ds1))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds2)
self.assertSetEqual(
set([
"FlatMapDataset", "PrefetchDataset", "RepeatDataset",
"RangeDataset", "RangeDataset_1"
]), set([x.name for x in variant_tensor_ops]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/traverse_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers constructing Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def optional_param_to_tensor(argument_name,
argument_value,
argument_default=0,
argument_dtype=dtypes.int64):
if argument_value is not None:
return ops.convert_to_tensor(
argument_value, dtype=argument_dtype, name=argument_name)
else:
return constant_op.constant(
argument_default, dtype=argument_dtype, name=argument_name)
def partial_shape_to_tensor(shape_like):
"""Returns a `tf.Tensor` that represents the given shape.
Args:
shape_like: A value that can be converted to a `tf.TensorShape` or a
`tf.Tensor`.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where
`-1` is substituted for any unknown dimensions.
"""
try:
# First attempt to convert the input to a shape, and return the
# "canonical" tensor representation, which uses `-1` in place of
# `None`.
shape_like = tensor_shape.as_shape(shape_like)
return ops.convert_to_tensor(
[dim if dim is not None else -1 for dim in shape_like.as_list()],
dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(shape_like, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
raise ValueError("The given shape %s must be a 1-D tensor of tf.int64 "
"values, but the shape was %s."
% (shape_like, ret.shape))
if ret.dtype != dtypes.int64:
raise TypeError("The given shape %s must be a 1-D tensor of tf.int64 "
"values, but the element type was %s."
% (shape_like, ret.dtype.name))
return ret
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/convert.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import test
# NOTE(mrry): Arguments of parameterized tests are lifted into lambdas to make
# sure they are not executed before the (eager- or graph-mode) test environment
# has been set up.
#
# TODO(jsimsa): Add tests for OptionalStructure and DatasetStructure.
class StructureTest(test_base.DatasetTestBase, parameterized.TestCase,
test_util.TensorFlowTestCase):
# pylint: disable=g-long-lambda,protected-access
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0), tensor_spec.TensorSpec,
[dtypes.float32], [[]]),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
tensor_array_ops.TensorArraySpec, [dtypes.variant], [[]]),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorSpec, [dtypes.variant], [None]),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [4]]),
ragged_tensor.RaggedTensorSpec, [dtypes.variant], [None]),
("Nested_0",
lambda: (constant_op.constant(37.0), constant_op.constant([1, 2, 3])),
tuple, [dtypes.float32, dtypes.int32], [[], [3]]),
("Nested_1", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, dict, [dtypes.float32, dtypes.int32], [[], [3]]),
("Nested_2", lambda: {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}, dict, [dtypes.float32, dtypes.variant, dtypes.variant], [[], None, None
]),
)
def testFlatStructure(self, value_fn, expected_structure, expected_types,
expected_shapes):
value = value_fn()
s = structure.type_spec_from_value(value)
self.assertIsInstance(s, expected_structure)
flat_types = structure.get_flat_tensor_types(s)
self.assertEqual(expected_types, flat_types)
flat_shapes = structure.get_flat_tensor_shapes(s)
self.assertLen(flat_shapes, len(expected_shapes))
for expected, actual in zip(expected_shapes, flat_shapes):
if expected is None:
self.assertEqual(actual.ndims, None)
else:
self.assertEqual(actual.as_list(), expected)
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0), lambda: [
constant_op.constant(38.0),
array_ops.placeholder(dtypes.float32),
variables.Variable(100.0), 42.0,
np.array(42.0, dtype=np.float32)
], lambda: [constant_op.constant([1.0, 2.0]),
constant_op.constant(37)]),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0), lambda: [
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=10)
], lambda: [
tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(3,), size=0),
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(), size=0)
]),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: [
sparse_tensor.SparseTensor(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorValue(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
array_ops.sparse_placeholder(dtype=dtypes.int32),
array_ops.sparse_placeholder(dtype=dtypes.int32, shape=[None, None])
], lambda: [
constant_op.constant(37, shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[5, 6]),
array_ops.sparse_placeholder(
dtype=dtypes.int32, shape=[None, None, None]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1.0], dense_shape=[4, 5])
]),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [3]]),
lambda: [
ragged_factory_ops.constant([[1, 2], [3, 4], []]),
ragged_factory_ops.constant([[1], [2, 3, 4], [5]]),
], lambda: [
ragged_factory_ops.constant(1),
ragged_factory_ops.constant([1, 2]),
ragged_factory_ops.constant([[1], [2]]),
ragged_factory_ops.constant([["a", "b"]]),
]),
("Nested", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6])
}], lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6, 7])
}, {
"a": constant_op.constant(15),
"b": constant_op.constant([4, 5, 6])
}, {
"a":
constant_op.constant(15),
"b":
sparse_tensor.SparseTensor(
indices=[[0], [1], [2]], values=[4, 5, 6], dense_shape=[3])
}, (constant_op.constant(15.0), constant_op.constant([4, 5, 6]))]),
)
@test_util.run_deprecated_v1
def testIsCompatibleWithStructure(self, original_value_fn,
compatible_values_fn,
incompatible_values_fn):
original_value = original_value_fn()
compatible_values = compatible_values_fn()
incompatible_values = incompatible_values_fn()
s = structure.type_spec_from_value(original_value)
for compatible_value in compatible_values:
self.assertTrue(
structure.are_compatible(
s, structure.type_spec_from_value(compatible_value)))
for incompatible_value in incompatible_values:
self.assertFalse(
structure.are_compatible(
s, structure.type_spec_from_value(incompatible_value)))
@parameterized.named_parameters(
("Tensor",
lambda: constant_op.constant(37.0),
lambda: constant_op.constant(42.0),
lambda: constant_op.constant([5])),
("TensorArray",
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=0)),
("SparseTensor",
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[1, 2]], values=[42], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[3]], values=[-1], dense_shape=[5]),
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[1.0], dense_shape=[4, 5])),
("RaggedTensor",
lambda: ragged_factory_ops.constant([[[1, 2]], [[3]]]),
lambda: ragged_factory_ops.constant([[[5]], [[8], [3, 2]]]),
lambda: ragged_factory_ops.constant([[[1]], [[2], [3]]],
ragged_rank=1),
lambda: ragged_factory_ops.constant([[[1.0, 2.0]], [[3.0]]]),
lambda: ragged_factory_ops.constant([[[1]], [[2]], [[3]]])),
("Nested",
lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])},
lambda: {
"a": constant_op.constant(42.0),
"b": constant_op.constant([4, 5, 6])},
lambda: {
"a": constant_op.constant([1, 2, 3]),
"b": constant_op.constant(37.0)
}),
) # pyformat: disable
def testStructureFromValueEquality(self, value1_fn, value2_fn,
*not_equal_value_fns):
# pylint: disable=g-generic-assert
s1 = structure.type_spec_from_value(value1_fn())
s2 = structure.type_spec_from_value(value2_fn())
self.assertEqual(s1, s1) # check __eq__ operator.
self.assertEqual(s1, s2) # check __eq__ operator.
self.assertFalse(s1 != s1) # check __ne__ operator.
self.assertFalse(s1 != s2) # check __ne__ operator.
for c1, c2 in zip(nest.flatten(s1), nest.flatten(s2)):
self.assertEqual(hash(c1), hash(c1))
self.assertEqual(hash(c1), hash(c2))
for value_fn in not_equal_value_fns:
s3 = structure.type_spec_from_value(value_fn())
self.assertNotEqual(s1, s3) # check __ne__ operator.
self.assertNotEqual(s2, s3) # check __ne__ operator.
self.assertFalse(s1 == s3) # check __eq_ operator.
self.assertFalse(s2 == s3) # check __eq_ operator.
@parameterized.named_parameters(
("RaggedTensor_RaggedRank",
ragged_tensor.RaggedTensorSpec(None, dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec(None, dtypes.int32, 2)),
("RaggedTensor_Shape",
ragged_tensor.RaggedTensorSpec([3, None], dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec([5, None], dtypes.int32, 1)),
("RaggedTensor_DType",
ragged_tensor.RaggedTensorSpec(None, dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec(None, dtypes.float32, 1)),
)
def testRaggedStructureInequality(self, s1, s2):
# pylint: disable=g-generic-assert
self.assertNotEqual(s1, s2) # check __ne__ operator.
self.assertFalse(s1 == s2) # check __eq__ operator.
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
lambda: constant_op.constant(42.0), lambda: constant_op.constant([5])),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=0)),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[1, 2]], values=[42], dense_shape=[4, 5]), lambda:
sparse_tensor.SparseTensor(indices=[[3]], values=[-1], dense_shape=[5])),
("Nested", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: {
"a": constant_op.constant(42.0),
"b": constant_op.constant([4, 5, 6])
}, lambda: {
"a": constant_op.constant([1, 2, 3]),
"b": constant_op.constant(37.0)
}),
)
def testHash(self, value1_fn, value2_fn, value3_fn):
s1 = structure.type_spec_from_value(value1_fn())
s2 = structure.type_spec_from_value(value2_fn())
s3 = structure.type_spec_from_value(value3_fn())
for c1, c2, c3 in zip(nest.flatten(s1), nest.flatten(s2), nest.flatten(s3)):
self.assertEqual(hash(c1), hash(c1))
self.assertEqual(hash(c1), hash(c2))
self.assertNotEqual(hash(c1), hash(c3))
self.assertNotEqual(hash(c2), hash(c3))
@parameterized.named_parameters(
(
"Tensor",
lambda: constant_op.constant(37.0),
),
(
"SparseTensor",
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(), size=1).write(0, 7)),
(
"RaggedTensor",
lambda: ragged_factory_ops.constant([[1, 2], [], [3]]),
),
(
"Nested_0",
lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
},
),
(
"Nested_1",
lambda: {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
},
),
)
def testRoundTripConversion(self, value_fn):
value = value_fn()
s = structure.type_spec_from_value(value)
def maybe_stack_ta(v):
if isinstance(v, tensor_array_ops.TensorArray):
return v.stack()
else:
return v
before = self.evaluate(maybe_stack_ta(value))
after = self.evaluate(
maybe_stack_ta(
structure.from_tensor_list(s, structure.to_tensor_list(s, value))))
flat_before = nest.flatten(before)
flat_after = nest.flatten(after)
for b, a in zip(flat_before, flat_after):
if isinstance(b, sparse_tensor.SparseTensorValue):
self.assertAllEqual(b.indices, a.indices)
self.assertAllEqual(b.values, a.values)
self.assertAllEqual(b.dense_shape, a.dense_shape)
elif isinstance(
b,
(ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue)):
self.assertAllEqual(b, a)
else:
self.assertAllEqual(b, a)
# pylint: enable=g-long-lambda
def preserveStaticShape(self):
rt = ragged_factory_ops.constant([[1, 2], [], [3]])
rt_s = structure.type_spec_from_value(rt)
rt_after = structure.from_tensor_list(rt_s,
structure.to_tensor_list(rt_s, rt))
self.assertEqual(rt_after.row_splits.shape.as_list(),
rt.row_splits.shape.as_list())
self.assertEqual(rt_after.values.shape.as_list(), [None])
st = sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5])
st_s = structure.type_spec_from_value(st)
st_after = structure.from_tensor_list(st_s,
structure.to_tensor_list(st_s, st))
self.assertEqual(st_after.indices.shape.as_list(), [None, 2])
self.assertEqual(st_after.values.shape.as_list(), [None])
self.assertEqual(st_after.dense_shape.shape.as_list(),
st.dense_shape.shape.as_list())
def testPreserveTensorArrayShape(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=1, element_shape=(3,))
ta_s = structure.type_spec_from_value(ta)
ta_after = structure.from_tensor_list(ta_s,
structure.to_tensor_list(ta_s, ta))
self.assertEqual(ta_after.element_shape.as_list(), [3])
def testPreserveInferredTensorArrayShape(self):
ta = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=1)
# Shape is inferred from the write.
ta = ta.write(0, [1, 2, 3])
ta_s = structure.type_spec_from_value(ta)
ta_after = structure.from_tensor_list(ta_s,
structure.to_tensor_list(ta_s, ta))
self.assertEqual(ta_after.element_shape.as_list(), [3])
def testIncompatibleStructure(self):
# Define three mutually incompatible values/structures, and assert that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructre a flattened value with an
# incompatible structure fails.
value_tensor = constant_op.constant(42.0)
s_tensor = structure.type_spec_from_value(value_tensor)
flat_tensor = structure.to_tensor_list(s_tensor, value_tensor)
value_sparse_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
s_sparse_tensor = structure.type_spec_from_value(value_sparse_tensor)
flat_sparse_tensor = structure.to_tensor_list(s_sparse_tensor,
value_sparse_tensor)
value_nest = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_nest = structure.type_spec_from_value(value_nest)
flat_nest = structure.to_tensor_list(s_nest, value_nest)
with self.assertRaisesRegexp(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
structure.to_tensor_list(s_tensor, value_sparse_tensor)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_tensor, value_nest)
with self.assertRaisesRegexp(
TypeError, "Neither a SparseTensor nor SparseTensorValue"):
structure.to_tensor_list(s_sparse_tensor, value_tensor)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_sparse_tensor, value_nest)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_nest, value_tensor)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_nest, value_sparse_tensor)
with self.assertRaisesRegexp(ValueError, r"Incompatible input:"):
structure.from_tensor_list(s_tensor, flat_sparse_tensor)
with self.assertRaisesRegexp(ValueError, "Expected 1 tensors but got 2."):
structure.from_tensor_list(s_tensor, flat_nest)
with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
structure.from_tensor_list(s_sparse_tensor, flat_tensor)
with self.assertRaisesRegexp(ValueError, "Expected 1 tensors but got 2."):
structure.from_tensor_list(s_sparse_tensor, flat_nest)
with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 1."):
structure.from_tensor_list(s_nest, flat_tensor)
with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 1."):
structure.from_tensor_list(s_nest, flat_sparse_tensor)
def testIncompatibleNestedStructure(self):
# Define three mutually incompatible nested values/structures, and assert
# that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructure a flattened value with an
# incompatible structure fails.
value_0 = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_0 = structure.type_spec_from_value(value_0)
flat_s_0 = structure.to_tensor_list(s_0, value_0)
# `value_1` has compatible nested structure with `value_0`, but different
# classes.
value_1 = {
"a":
constant_op.constant(37.0),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
}
s_1 = structure.type_spec_from_value(value_1)
flat_s_1 = structure.to_tensor_list(s_1, value_1)
# `value_2` has incompatible nested structure with `value_0` and `value_1`.
value_2 = {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}
s_2 = structure.type_spec_from_value(value_2)
flat_s_2 = structure.to_tensor_list(s_2, value_2)
with self.assertRaisesRegexp(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*int32.* and shape \(3,\)"):
structure.to_tensor_list(s_0, value_1)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_0, value_2)
with self.assertRaisesRegexp(
TypeError, "Neither a SparseTensor nor SparseTensorValue"):
structure.to_tensor_list(s_1, value_0)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_1, value_2)
# NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
# needs to account for "a" coming before or after "b". It might be worth
# adding a deterministic repr for these error messages (among other
# improvements).
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_2, value_0)
with self.assertRaisesRegexp(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_2, value_1)
with self.assertRaisesRegexp(ValueError, r"Incompatible input:"):
structure.from_tensor_list(s_0, flat_s_1)
with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 3."):
structure.from_tensor_list(s_0, flat_s_2)
with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
structure.from_tensor_list(s_1, flat_s_0)
with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 3."):
structure.from_tensor_list(s_1, flat_s_2)
with self.assertRaisesRegexp(ValueError, "Expected 3 tensors but got 2."):
structure.from_tensor_list(s_2, flat_s_0)
with self.assertRaisesRegexp(ValueError, "Expected 3 tensors but got 2."):
structure.from_tensor_list(s_2, flat_s_1)
@parameterized.named_parameters(
("Tensor", dtypes.float32, tensor_shape.TensorShape(
[]), ops.Tensor, tensor_spec.TensorSpec([], dtypes.float32)),
("SparseTensor", dtypes.int32, tensor_shape.TensorShape(
[2, 2]), sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorSpec([2, 2], dtypes.int32)),
("TensorArray_0", dtypes.int32,
tensor_shape.TensorShape([None, True, 2, 2
]), tensor_array_ops.TensorArray,
tensor_array_ops.TensorArraySpec(
[2, 2], dtypes.int32, dynamic_size=None, infer_shape=True)),
("TensorArray_1", dtypes.int32,
tensor_shape.TensorShape([True, None, 2, 2
]), tensor_array_ops.TensorArray,
tensor_array_ops.TensorArraySpec(
[2, 2], dtypes.int32, dynamic_size=True, infer_shape=None)),
("TensorArray_2", dtypes.int32,
tensor_shape.TensorShape([True, False, 2, 2
]), tensor_array_ops.TensorArray,
tensor_array_ops.TensorArraySpec(
[2, 2], dtypes.int32, dynamic_size=True, infer_shape=False)),
("RaggedTensor", dtypes.int32, tensor_shape.TensorShape([2, None]),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32, 1)),
("Nested", {
"a": dtypes.float32,
"b": (dtypes.int32, dtypes.string)
}, {
"a": tensor_shape.TensorShape([]),
"b": (tensor_shape.TensorShape([2, 2]), tensor_shape.TensorShape([]))
}, {
"a": ops.Tensor,
"b": (sparse_tensor.SparseTensor, ops.Tensor)
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec(
[2, 2], dtypes.int32), tensor_spec.TensorSpec([], dtypes.string))
}),
)
def testConvertLegacyStructure(self, output_types, output_shapes,
output_classes, expected_structure):
actual_structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self.assertEqual(actual_structure, expected_structure)
def testNestedNestedStructure(self):
s = (tensor_spec.TensorSpec([], dtypes.int64),
(tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.string)))
int64_t = constant_op.constant(37, dtype=dtypes.int64)
float32_t = constant_op.constant(42.0)
string_t = constant_op.constant("Foo")
nested_tensors = (int64_t, (float32_t, string_t))
tensor_list = structure.to_tensor_list(s, nested_tensors)
for expected, actual in zip([int64_t, float32_t, string_t], tensor_list):
self.assertIs(expected, actual)
(actual_int64_t,
(actual_float32_t,
actual_string_t)) = structure.from_tensor_list(s, tensor_list)
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
(actual_int64_t, (actual_float32_t, actual_string_t)) = (
structure.from_compatible_tensor_list(s, tensor_list))
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
@parameterized.named_parameters(
("Tensor", tensor_spec.TensorSpec([], dtypes.float32), 32,
tensor_spec.TensorSpec([32], dtypes.float32)),
("TensorUnknown", tensor_spec.TensorSpec([], dtypes.float32), None,
tensor_spec.TensorSpec([None], dtypes.float32)),
("SparseTensor", sparse_tensor.SparseTensorSpec([None], dtypes.float32),
32, sparse_tensor.SparseTensorSpec([32, None], dtypes.float32)),
("SparseTensorUnknown",
sparse_tensor.SparseTensorSpec([4], dtypes.float32), None,
sparse_tensor.SparseTensorSpec([None, 4], dtypes.float32)),
("RaggedTensor",
ragged_tensor.RaggedTensorSpec([2, None], dtypes.float32, 1), 32,
ragged_tensor.RaggedTensorSpec([32, 2, None], dtypes.float32, 2)),
("RaggedTensorUnknown",
ragged_tensor.RaggedTensorSpec([4, None], dtypes.float32, 1), None,
ragged_tensor.RaggedTensorSpec([None, 4, None], dtypes.float32, 2)),
("Nested", {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec([2, 2], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.string))
}, 128, {
"a":
tensor_spec.TensorSpec([128], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec([128, 2, 2], dtypes.int32),
tensor_spec.TensorSpec([128], dtypes.string))
}),
)
def testBatch(self, element_structure, batch_size,
expected_batched_structure):
batched_structure = nest.map_structure(
lambda component_spec: component_spec._batch(batch_size),
element_structure)
self.assertEqual(batched_structure, expected_batched_structure)
@parameterized.named_parameters(
("Tensor", tensor_spec.TensorSpec(
[32], dtypes.float32), tensor_spec.TensorSpec([], dtypes.float32)),
("TensorUnknown", tensor_spec.TensorSpec(
[None], dtypes.float32), tensor_spec.TensorSpec([], dtypes.float32)),
("SparseTensor", sparse_tensor.SparseTensorSpec([32, None],
dtypes.float32),
sparse_tensor.SparseTensorSpec([None], dtypes.float32)),
("SparseTensorUnknown",
sparse_tensor.SparseTensorSpec([None, 4], dtypes.float32),
sparse_tensor.SparseTensorSpec([4], dtypes.float32)),
("RaggedTensor",
ragged_tensor.RaggedTensorSpec([32, None, None], dtypes.float32, 2),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32, 1)),
("RaggedTensorUnknown",
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.float32, 2),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32, 1)),
("Nested", {
"a":
tensor_spec.TensorSpec([128], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec([128, 2, 2], dtypes.int32),
tensor_spec.TensorSpec([None], dtypes.string))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec(
[2, 2], dtypes.int32), tensor_spec.TensorSpec([], dtypes.string))
}),
)
def testUnbatch(self, element_structure, expected_unbatched_structure):
unbatched_structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), element_structure)
self.assertEqual(unbatched_structure, expected_unbatched_structure)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
lambda: constant_op.constant([1.0, 2.0])),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=[13], dense_shape=[2])),
("RaggedTensor", lambda: ragged_factory_ops.constant([[[1]], [[2]]]),
lambda: ragged_factory_ops.constant([[1]])),
("Nest", lambda:
(constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2])),
lambda: (constant_op.constant([1.0, 2.0]),
sparse_tensor.SparseTensor(
indices=[[0]], values=[13], dense_shape=[2]))),
)
def testToBatchedTensorList(self, value_fn, element_0_fn):
batched_value = value_fn()
s = structure.type_spec_from_value(batched_value)
batched_tensor_list = structure.to_batched_tensor_list(s, batched_value)
# The batch dimension is 2 for all of the test cases.
# NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
# tensors in which we store sparse tensors.
for t in batched_tensor_list:
if t.dtype != dtypes.variant:
self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))
# Test that the 0th element from the unbatched tensor is equal to the
# expected value.
expected_element_0 = self.evaluate(element_0_fn())
unbatched_s = nest.map_structure(
lambda component_spec: component_spec._unbatch(), s)
actual_element_0 = structure.from_tensor_list(
unbatched_s, [t[0] for t in batched_tensor_list])
for expected, actual in zip(
nest.flatten(expected_element_0), nest.flatten(actual_element_0)):
self.assertValuesEqual(expected, actual)
# pylint: enable=g-long-lambda
def testDatasetSpecConstructor(self):
rt_spec = ragged_tensor.RaggedTensorSpec([10, None], dtypes.int32)
st_spec = sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32)
t_spec = tensor_spec.TensorSpec([10, 8], dtypes.string)
element_spec = {"rt": rt_spec, "st": st_spec, "t": t_spec}
ds_struct = dataset_ops.DatasetSpec(element_spec, [5])
self.assertEqual(ds_struct._element_spec, element_spec)
# Note: shape was automatically converted from a list to a TensorShape.
self.assertEqual(ds_struct._dataset_shape, tensor_shape.TensorShape([5]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/structure_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to traverse the Dataset dependency structure."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
def obtain_all_variant_tensor_ops(dataset):
"""Given an input dataset, finds all dataset ops used for construction.
A series of transformations would have created this dataset with each
transformation including zero or more Dataset ops, each producing a dataset
variant tensor. This method outputs all of them.
Args:
dataset: Dataset to find variant tensors for.
Returns:
A list of variant_tensor producing dataset ops used to construct this
dataset.
"""
all_variant_tensor_ops = []
bfs_q = Queue.Queue()
bfs_q.put(dataset._variant_tensor.op) # pylint: disable=protected-access
visited = []
while not bfs_q.empty():
op = bfs_q.get()
visited.append(op)
# We look for all ops that produce variant tensors as output. This is a bit
# of overkill but the other dataset _inputs() traversal strategies can't
# cover the case of function inputs that capture dataset variants.
# TODO(b/120873778): Make this more efficient.
if op.outputs[0].dtype == dtypes.variant:
all_variant_tensor_ops.append(op)
for i in op.inputs:
input_op = i.op
if input_op not in visited:
bfs_q.put(input_op)
return all_variant_tensor_ops
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/traverse.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset options utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.platform import test
class _TestOptions(options.OptionsBase):
x = options.create_option(
name="x",
ty=int,
docstring="the answer to everything",
default_factory=lambda: 42)
y = options.create_option(
name="y", ty=float, docstring="a tasty pie", default_factory=lambda: 3.14)
class _NestedTestOptions(options.OptionsBase):
opts = options.create_option(
name="opts", ty=_TestOptions, docstring="nested options")
class OptionsTest(test.TestCase):
def testDocumentation(self):
self.assertEqual(_TestOptions.x.__doc__, "the answer to everything")
self.assertEqual(_TestOptions.y.__doc__, "a tasty pie")
def testCreateOption(self):
opts = _TestOptions()
self.assertEqual(opts.x, 42)
self.assertEqual(opts.y, 3.14)
self.assertIsInstance(opts.x, int)
self.assertIsInstance(opts.y, float)
opts.x = 0
self.assertEqual(opts.x, 0)
with self.assertRaises(TypeError):
opts.x = 3.14
opts.y = 0.0
self.assertEqual(opts.y, 0.0)
with self.assertRaises(TypeError):
opts.y = 42
def testMergeOptions(self):
options1, options2 = _TestOptions(), _TestOptions()
with self.assertRaises(ValueError):
options.merge_options()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.x, 42)
self.assertEqual(merged_options.y, 3.14)
options1.x = 0
options2.y = 0.0
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.x, 0)
self.assertEqual(merged_options.y, 0.0)
def testMergeNestedOptions(self):
options1, options2 = _NestedTestOptions(), _NestedTestOptions()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts, None)
options1.opts = _TestOptions()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts, _TestOptions())
options2.opts = _TestOptions()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts, _TestOptions())
options1.opts.x = 0
options2.opts.y = 0.0
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts.x, 0)
self.assertEqual(merged_options.opts.y, 0.0)
def testMergeOptionsInvalid(self):
with self.assertRaises(TypeError):
options.merge_options(0)
options1, options2 = _TestOptions(), _NestedTestOptions()
with self.assertRaises(TypeError):
options.merge_options(options1, options2)
def testNoSpuriousAttrs(self):
test_options = _TestOptions()
with self.assertRaises(AttributeError):
test_options.wrong_attr = True
with self.assertRaises(AttributeError):
_ = test_options.wrong_attr
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/options_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for describing the structure of a `tf.data` type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from tensorflow.python.data.util import nest
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=invalid-name
@tf_export(v1=["data.experimental.TensorStructure"])
@deprecation.deprecated(None, "Use `tf.TensorSpec` instead.")
def _TensorStructure(dtype, shape):
return tensor_spec.TensorSpec(shape, dtype)
@tf_export(v1=["data.experimental.SparseTensorStructure"])
@deprecation.deprecated(None, "Use `tf.SparseTensorSpec` instead.")
def _SparseTensorStructure(dtype, shape):
return sparse_tensor.SparseTensorSpec(shape, dtype)
@tf_export(v1=["data.experimental.TensorArrayStructure"])
@deprecation.deprecated(None, "Use `tf.TensorArraySpec` instead.")
def _TensorArrayStructure(dtype, element_shape, dynamic_size, infer_shape):
return tensor_array_ops.TensorArraySpec(element_shape, dtype,
dynamic_size, infer_shape)
@tf_export(v1=["data.experimental.RaggedTensorStructure"])
@deprecation.deprecated(None, "Use `tf.RaggedTensorSpec` instead.")
def _RaggedTensorStructure(dtype, shape, ragged_rank):
return ragged_tensor.RaggedTensorSpec(shape, dtype, ragged_rank)
# pylint: enable=invalid-name
# TODO(jsimsa): Remove the special-case for `TensorArray` pass-through once
# it is a subclass of `CompositeTensor`.
def normalize_element(element):
"""Normalizes a nested structure of element components.
* Components matching `SparseTensorSpec` are converted to `SparseTensor`.
* Components matching `RaggedTensorSpec` are converted to `RaggedTensor`.
* Components matching `DatasetSpec` or `TensorArraySpec` are passed through.
* `CompositeTensor` components are passed through.
* All other components are converted to `Tensor`.
Args:
element: A nested structure of individual components.
Returns:
A nested structure of `Tensor`, `Dataset`, `SparseTensor`, `RaggedTensor`,
or `TensorArray` objects.
"""
components = nest.flatten(element)
normalized_components = []
with ops.name_scope("normalize_element"):
# Imported here to avoid circular dependency.
from tensorflow.python.data.ops import dataset_ops # pylint: disable=g-import-not-at-top
for i, t in enumerate(components):
try:
spec = type_spec_from_value(t, use_fallback=False)
except TypeError:
# TypeError indicates it was not possible to compute a `TypeSpec` for
# the value. As a fallback try converting the value to a tensor.
normalized_components.append(
ops.convert_to_tensor(t, name="component_%d" % i))
else:
if isinstance(spec, sparse_tensor.SparseTensorSpec):
normalized_components.append(sparse_tensor.SparseTensor.from_value(t))
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
normalized_components.append(
ragged_tensor.convert_to_tensor_or_ragged_tensor(
t, name="component_%d" % i))
elif isinstance(
spec, (tensor_array_ops.TensorArraySpec, dataset_ops.DatasetSpec)):
normalized_components.append(t)
elif isinstance(t, composite_tensor.CompositeTensor):
normalized_components.append(t)
else:
normalized_components.append(
ops.convert_to_tensor(t, name="component_%d" % i))
return nest.pack_sequence_as(element, normalized_components)
def convert_legacy_structure(output_types, output_shapes, output_classes):
"""Returns a `Structure` that represents the given legacy structure.
This method provides a way to convert from the existing `Dataset` and
`Iterator` structure-related properties to a `Structure` object. A "legacy"
structure is represented by the `tf.data.Dataset.output_types`,
`tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes`
properties.
TODO(b/110122868): Remove this function once `Structure` is used throughout
`tf.data`.
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of a structured value.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component a structured value.
output_classes: A nested structure of Python `type` objects corresponding
to each component of a structured value.
Returns:
A `Structure`.
Raises:
TypeError: If a structure cannot be built from the arguments, because one of
the component classes in `output_classes` is not supported.
"""
flat_types = nest.flatten(output_types)
flat_shapes = nest.flatten(output_shapes)
flat_classes = nest.flatten(output_classes)
flat_ret = []
for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes,
flat_classes):
if isinstance(flat_class, type_spec.TypeSpec):
flat_ret.append(flat_class)
elif issubclass(flat_class, sparse_tensor.SparseTensor):
flat_ret.append(sparse_tensor.SparseTensorSpec(flat_shape, flat_type))
elif issubclass(flat_class, ops.Tensor):
flat_ret.append(tensor_spec.TensorSpec(flat_shape, flat_type))
elif issubclass(flat_class, tensor_array_ops.TensorArray):
# We sneaked the dynamic_size and infer_shape into the legacy shape.
flat_ret.append(
tensor_array_ops.TensorArraySpec(
flat_shape[2:], flat_type,
dynamic_size=tensor_shape.dimension_value(flat_shape[0]),
infer_shape=tensor_shape.dimension_value(flat_shape[1])))
else:
# NOTE(mrry): Since legacy structures produced by iterators only
# comprise Tensors, SparseTensors, and nests, we do not need to
# support all structure types here.
raise TypeError(
"Could not build a structure for output class %r" % (flat_class,))
return nest.pack_sequence_as(output_classes, flat_ret)
def _from_tensor_list_helper(decode_fn, element_spec, tensor_list):
"""Returns an element constructed from the given spec and tensor list.
Args:
decode_fn: Method that constructs an element component from the element spec
component and a tensor list.
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
tensor_list: A list of tensors to use for constructing the value.
Returns:
An element constructed from the given spec and tensor list.
Raises:
ValueError: If the number of tensors needed to construct an element for
the given spec does not match the given number of tensors.
"""
# pylint: disable=protected-access
flat_specs = nest.flatten(element_spec)
flat_spec_lengths = [len(spec._flat_tensor_specs) for spec in flat_specs]
if sum(flat_spec_lengths) != len(tensor_list):
raise ValueError("Expected %d tensors but got %d." %
(sum(flat_spec_lengths), len(tensor_list)))
i = 0
flat_ret = []
for (component_spec, num_flat_values) in zip(flat_specs, flat_spec_lengths):
value = tensor_list[i:i + num_flat_values]
flat_ret.append(decode_fn(component_spec, value))
i += num_flat_values
return nest.pack_sequence_as(element_spec, flat_ret)
def from_compatible_tensor_list(element_spec, tensor_list):
"""Returns an element constructed from the given spec and tensor list.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
tensor_list: A list of tensors to use for constructing the value.
Returns:
An element constructed from the given spec and tensor list.
Raises:
ValueError: If the number of tensors needed to construct an element for
the given spec does not match the given number of tensors.
"""
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
return _from_tensor_list_helper(
lambda spec, value: spec._from_compatible_tensor_list(value),
element_spec, tensor_list)
def from_tensor_list(element_spec, tensor_list):
"""Returns an element constructed from the given spec and tensor list.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
tensor_list: A list of tensors to use for constructing the value.
Returns:
An element constructed from the given spec and tensor list.
Raises:
ValueError: If the number of tensors needed to construct an element for
the given spec does not match the given number of tensors or the given
spec is not compatible with the tensor list.
"""
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
return _from_tensor_list_helper(
lambda spec, value: spec._from_tensor_list(value), element_spec,
tensor_list)
def get_flat_tensor_specs(element_spec):
"""Returns a list `tf.TypeSpec`s for the element tensor representation.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
Returns:
A list `tf.TypeSpec`s for the element tensor representation.
"""
# pylint: disable=protected-access
return functools.reduce(lambda state, value: state + value._flat_tensor_specs,
nest.flatten(element_spec), [])
def get_flat_tensor_shapes(element_spec):
"""Returns a list `tf.TensorShapes`s for the element tensor representation.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
Returns:
A list `tf.TensorShapes`s for the element tensor representation.
"""
return [spec.shape for spec in get_flat_tensor_specs(element_spec)]
def get_flat_tensor_types(element_spec):
"""Returns a list `tf.DType`s for the element tensor representation.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
Returns:
A list `tf.DType`s for the element tensor representation.
"""
return [spec.dtype for spec in get_flat_tensor_specs(element_spec)]
def _to_tensor_list_helper(encode_fn, element_spec, element):
"""Returns a tensor list representation of the element.
Args:
encode_fn: Method that constructs a tensor list representation from the
given element spec and element.
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
element: The element to convert to tensor list representation.
Returns:
A tensor list representation of `element`.
Raises:
ValueError: If `element_spec` and `element` do not have the same number of
elements or if the two structures are not nested in the same way.
TypeError: If `element_spec` and `element` differ in the type of sequence
in any of their substructures.
"""
nest.assert_same_structure(element_spec, element)
def reduce_fn(state, value):
spec, component = value
return encode_fn(state, spec, component)
return functools.reduce(
reduce_fn, zip(nest.flatten(element_spec), nest.flatten(element)), [])
def to_batched_tensor_list(element_spec, element):
"""Returns a tensor list representation of the element.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
element: The element to convert to tensor list representation.
Returns:
A tensor list representation of `element`.
Raises:
ValueError: If `element_spec` and `element` do not have the same number of
elements or if the two structures are not nested in the same way or the
rank of any of the tensors in the tensor list representation is 0.
TypeError: If `element_spec` and `element` differ in the type of sequence
in any of their substructures.
"""
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
return _to_tensor_list_helper(
lambda state, spec, component: state + spec._to_batched_tensor_list(
component), element_spec, element)
def to_tensor_list(element_spec, element):
"""Returns a tensor list representation of the element.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
element: The element to convert to tensor list representation.
Returns:
A tensor list representation of `element`.
Raises:
ValueError: If `element_spec` and `element` do not have the same number of
elements or if the two structures are not nested in the same way.
TypeError: If `element_spec` and `element` differ in the type of sequence
in any of their substructures.
"""
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
return _to_tensor_list_helper(
lambda state, spec, component: state + spec._to_tensor_list(component),
element_spec, element)
def are_compatible(spec1, spec2):
"""Indicates whether two type specifications are compatible.
Two type specifications are compatible if they have the same nested structure
and the their individual components are pair-wise compatible.
Args:
spec1: A `tf.TypeSpec` object to compare.
spec2: A `tf.TypeSpec` object to compare.
Returns:
`True` if the two type specifications are compatible and `False` otherwise.
"""
try:
nest.assert_same_structure(spec1, spec2)
except TypeError:
return False
except ValueError:
return False
for s1, s2 in zip(nest.flatten(spec1), nest.flatten(spec2)):
if not s1.is_compatible_with(s2) or not s2.is_compatible_with(s1):
return False
return True
def type_spec_from_value(element, use_fallback=True):
"""Creates a type specification for the given value.
Args:
element: The element to create the type specification for.
use_fallback: Whether to fall back to converting the element to a tensor
in order to compute its `TypeSpec`.
Returns:
A nested structure of `TypeSpec`s that represents the type specification
of `element`.
Raises:
TypeError: If a `TypeSpec` cannot be built for `element`, because its type
is not supported.
"""
spec = type_spec._type_spec_from_value(element) # pylint: disable=protected-access
if spec is not None:
return spec
if isinstance(element, dict):
# We create a shallow copy in an attempt to preserve the key order.
#
# Note that we do not guarantee that the key order is preserved, which is
# a limitation inherited from `copy()`. As a consequence, callers of
# `type_spec_from_value` should not assume that the key order of a `dict`
# in the returned nested structure matches the key order of the
# corresponding `dict` in the input value.
result = element.copy()
for k in element:
result[k] = type_spec_from_value(element[k])
return result
if isinstance(element, tuple):
if hasattr(element, "_fields") and isinstance(
element._fields, collections.Sequence) and all(
isinstance(f, six.string_types) for f in element._fields):
# `element` is a namedtuple
return type(element)(*[type_spec_from_value(v) for v in element])
# `element` is not a namedtuple
return tuple([type_spec_from_value(v) for v in element])
if use_fallback:
# As a fallback try converting the element to a tensor.
try:
tensor = ops.convert_to_tensor(element)
spec = type_spec_from_value(tensor)
if spec is not None:
return spec
except (ValueError, TypeError) as e:
logging.vlog(
3, "Failed to convert %r to tensor: %s" % (type(element).__name__, e))
raise TypeError("Could not build a TypeSpec for %r with type %s" %
(element, type(element).__name__))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/structure.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python dataset sparse tensor utility functitons."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import sparse_ops
def any_sparse(classes):
"""Checks for sparse tensor.
Args:
classes: a structure of objects that identify the dataset item classes
Returns:
`True` if `classes` contains a sparse tensor type and `False` otherwise.
"""
return any(c is sparse_tensor.SparseTensor for c in nest.flatten(classes))
def as_dense_shapes(shapes, classes):
"""Converts sparse tensor shapes to their physical shapes.
Args:
shapes: a structure of shapes to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `shapes`, containing
`tensor_shape.unknown_shape()` at positions where `classes` contains
`tf.SparseTensor` and matching contents of `shapes` otherwise
"""
ret = nest.pack_sequence_as(shapes, [
tensor_shape.unknown_shape() if c is sparse_tensor.SparseTensor else shape
for shape, c in zip(nest.flatten(shapes), nest.flatten(classes))
])
return ret
def as_dense_types(types, classes):
"""Converts sparse tensor types to `dtypes.variant`.
Args:
types: a structure of types to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `types`, containing
`dtypes.variant` at positions where `classes` contains `tf.SparseTensor` and
matching contents of `types` otherwise
"""
ret = nest.pack_sequence_as(types, [
dtypes.variant if c is sparse_tensor.SparseTensor else ty
for ty, c in zip(nest.flatten(types), nest.flatten(classes))
])
return ret
def deserialize_sparse_tensors(tensors, types, shapes, classes):
"""Deserializes sparse tensors.
Args:
tensors: a structure of tensors to deserialize.
types: a structure that holds information about types of `tensors`
shapes: a structure that holds information about shapes of `tensors`
classes: a structure of objects that identify the dataset item classes
Returns:
`tensors` with any serialized sparse tensors replaced by their deserialized
version.
"""
ret = nest.pack_sequence_as(types, [
sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims)
if c is sparse_tensor.SparseTensor else tensor
for (tensor, ty, shape, c) in zip(
nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes),
nest.flatten(classes))
])
return ret
def get_classes(tensors):
"""Gets classes for a structure of tensors.
Args:
tensors: the tensor structure to get classes for.
Returns:
a structure matching the nested structure of `tensors`, containing
`tf.SparseTensor` at positions where `tensors` contains a sparse tensor and
`tf.Tensor` otherwise
"""
return nest.pack_sequence_as(tensors, [
sparse_tensor.SparseTensor
if isinstance(tensor, sparse_tensor.SparseTensor) else ops.Tensor
for tensor in nest.flatten(tensors)
])
def serialize_many_sparse_tensors(tensors):
"""Serializes many sparse tensors into a batch.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by the serialized batch.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
if sparse_tensor.is_sparse(tensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
def serialize_sparse_tensors(tensors):
"""Serializes sparse tensors.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by their serialized version.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant)
if isinstance(tensor, sparse_tensor.SparseTensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/sparse.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class NestTest(test.TestCase):
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
point = collections.namedtuple("Point", ["x", "y"])
structure = (point(x=4, y=2), ((point(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
def testFlattenDictOrder(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
def testPackDictOrder(self):
"""Packing orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertEqual(
collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
ordered_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
def testFlattenAndPackWithDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
named_tuple = collections.namedtuple("A", ("b", "c"))
mess = (
"z",
named_tuple(3, 4),
{
"c": (
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
),
"b": 5
},
17
)
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17])
structure_of_mess = (
14,
named_tuple("a", True),
{
"c": (
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
),
"b": 3
},
"hi everybody",
)
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
def testFlattenSparseValue(self):
st = sparse_tensor.SparseTensorValue([[0]], [0], [1])
single_value = st
list_of_values = [st, st, st]
nest_of_values = ((st), ((st), (st)))
dict_of_values = {"foo": st, "bar": st, "baz": st}
self.assertEqual([st], nest.flatten(single_value))
self.assertEqual([[st, st, st]], nest.flatten(list_of_values))
self.assertEqual([st, st, st], nest.flatten(nest_of_values))
self.assertEqual([st, st, st], nest.flatten(dict_of_values))
def testFlattenRaggedValue(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]]])
single_value = rt
list_of_values = [rt, rt, rt]
nest_of_values = ((rt), ((rt), (rt)))
dict_of_values = {"foo": rt, "bar": rt, "baz": rt}
self.assertEqual([rt], nest.flatten(single_value))
self.assertEqual([[rt, rt, rt]], nest.flatten(list_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(nest_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(dict_of_values))
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertFalse(nest.is_sequence([]))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
self.assertTrue(nest.is_sequence({"foo": 1, "bar": 2}))
self.assertFalse(
nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))
self.assertFalse(
nest.is_sequence(ragged_factory_ops.constant_value([[[0]], [[1]]])))
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
structure_dictionary = {"foo": 2, "bar": 4, "baz": {"foo": 5, "bar": 6}}
structure_dictionary_diff_nested = {
"foo": 2,
"bar": 4,
"baz": {
"foo": 5,
"baz": 6
}
}
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure((0, 1), np.array([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(0, (0, 1))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_nesting)
named_type_0 = collections.namedtuple("named_0", ("a", "b"))
named_type_1 = collections.namedtuple("named_1", ("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
named_type_0("a", "b"))
nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
named_type_0(3, 4), named_type_1(3, 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(((3,), 4), (3, (4,)))
structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)}
structure2_list = {"a": ((1, 2), 3), "b": 4, "d": (5, 6)}
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure1_list, structure2_list)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure_dictionary,
structure_dictionary_diff_nested)
nest.assert_same_structure(
structure_dictionary,
structure_dictionary_diff_nested,
check_types=False)
nest.assert_same_structure(
structure1_list, structure2_list, check_types=False)
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5})
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
def testAssertShallowStructure(self):
inp_ab = ("a", "b")
inp_abc = ("a", "b", "c")
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = ((1, 1), (2, 2))
inp_ab2 = {"a": (1, 1), "b": (2, 2)}
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'dict'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
def testFlattenUpTo(self):
input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))
shallow_tree = ((True, True), (False, True))
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
input_tree = ((("a", 1), (("b", 2), (("c", 3), (("d", 4))))))
shallow_tree = (("level_1", ("level_2", ("level_3", ("level_4")))))
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ("input_tree_0", "input_tree_1")
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = (0,)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = (0, 1)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ("shallow_tree",)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = "input_tree"
shallow_tree = ("shallow_tree_9", "shallow_tree_8")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using non-iterable elements.
input_tree = 0
shallow_tree = (9,)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = 0
shallow_tree = (9, 8)
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using dict.
input_tree = {"a": ((2, 2), (3, 3)), "b": ((4, 9), (5, 5))}
shallow_tree = {"a": (True, True), "b": (False, True)}
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
def testMapStructureUpTo(self):
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))
name_list = ("evens", ("odds", "primes"))
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ("first_4_evens", ("first_5_odds", "first_3_primes")))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/nest_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with user input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import convert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConvertTest(test.TestCase):
def testInteger(self):
resp = convert.optional_param_to_tensor("foo", 3)
self.assertEqual(3, self.evaluate(resp))
def testIntegerDefault(self):
resp = convert.optional_param_to_tensor("foo", None)
self.assertEqual(0, self.evaluate(resp))
def testStringDefault(self):
resp = convert.optional_param_to_tensor("bar", None, "default",
dtypes.string)
self.assertEqual(compat.as_bytes("default"), self.evaluate(resp))
def testString(self):
resp = convert.optional_param_to_tensor("bar", "value", "default",
dtypes.string)
self.assertEqual(compat.as_bytes("value"), self.evaluate(resp))
def testPartialShapeToTensorKnownDimension(self):
self.assertAllEqual([1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([1]))))
self.assertAllEqual([1], self.evaluate(
convert.partial_shape_to_tensor((1,))))
self.assertAllEqual([1], self.evaluate(
convert.partial_shape_to_tensor([1])))
self.assertAllEqual([1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([1], dtype=dtypes.int64))))
@test_util.run_deprecated_v1
def testPartialShapeToTensorUnknownDimension(self):
self.assertAllEqual([-1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None]))))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor((None,))))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor([None])))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor([-1])))
self.assertAllEqual([-1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([-1],
dtype=dtypes.int64))))
with self.assertRaisesRegexp(
ValueError, r"The given shape .* must be a 1-D tensor of tf.int64 "
r"values, but the shape was \(2, 2\)."):
convert.partial_shape_to_tensor(constant_op.constant(
[[1, 1], [1, 1]], dtype=dtypes.int64))
with self.assertRaisesRegexp(
TypeError, r"The given shape .* must be a 1-D tensor of tf.int64 "
r"values, but the element type was float32."):
convert.partial_shape_to_tensor(constant_op.constant([1., 1.]))
def testPartialShapeToTensorMultipleDimensions(self):
self.assertAllEqual([3, 6],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, 6]))))
self.assertAllEqual([3, 6],
self.evaluate(convert.partial_shape_to_tensor((3, 6))))
self.assertAllEqual([3, 6],
self.evaluate(convert.partial_shape_to_tensor([3, 6])))
self.assertAllEqual([3, 6],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([3, 6],
dtype=dtypes.int64))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, None]))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor((3, None))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor([3, None])))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([3, -1],
dtype=dtypes.int64))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None, None]))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor((None, None))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor([None, None])))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([-1, -1],
dtype=dtypes.int64))))
def testPartialShapeToTensorScalar(self):
self.assertAllEqual([],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([]))))
self.assertAllEqual([], self.evaluate(convert.partial_shape_to_tensor(())))
self.assertAllEqual([], self.evaluate(convert.partial_shape_to_tensor([])))
self.assertAllEqual([],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([], dtype=dtypes.int64))))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/convert_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating Tensor-valued random seeds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def get_seed(seed):
"""Returns the local seeds an operation should use given an op-specific seed.
See `tf.compat.v1.get_seed` for more details. This wrapper adds support for
the case
where `seed` may be a tensor.
Args:
seed: An integer or a `tf.int64` scalar tensor.
Returns:
A tuple of two `tf.int64` scalar tensors that should be used for the local
seed of the calling dataset.
"""
seed, seed2 = random_seed.get_seed(seed)
if seed is None:
seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
else:
seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
if seed2 is None:
seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
else:
with ops.name_scope("seed2") as scope:
seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)
seed2 = array_ops.where(
math_ops.logical_and(
math_ops.equal(seed, 0), math_ops.equal(seed2, 0)),
constant_op.constant(2**31 - 1, dtype=dtypes.int64),
seed2,
name=scope)
return seed, seed2
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/random_seed.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class SparseTest(test.TestCase):
def testAnySparse(self):
test_cases = (
{
"classes": (),
"expected": False
},
{
"classes": (ops.Tensor),
"expected": False
},
{
"classes": (((ops.Tensor))),
"expected": False
},
{
"classes": (ops.Tensor, ops.Tensor),
"expected": False
},
{
"classes": (ops.Tensor, sparse_tensor.SparseTensor),
"expected": True
},
{
"classes": (sparse_tensor.SparseTensor, sparse_tensor.SparseTensor),
"expected":
True
},
{
"classes": (sparse_tensor.SparseTensor, ops.Tensor),
"expected": True
},
{
"classes": (((sparse_tensor.SparseTensor))),
"expected": True
},
)
for test_case in test_cases:
self.assertEqual(
sparse.any_sparse(test_case["classes"]), test_case["expected"])
def assertShapesEqual(self, a, b):
for a, b in zip(nest.flatten(a), nest.flatten(b)):
self.assertEqual(a.ndims, b.ndims)
if a.ndims is None:
continue
for c, d in zip(a.as_list(), b.as_list()):
self.assertEqual(c, d)
def testAsDenseShapes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": tensor_shape.TensorShape([]),
"classes": ops.Tensor,
"expected": tensor_shape.TensorShape([])
},
{
"types": tensor_shape.TensorShape([]),
"classes": sparse_tensor.SparseTensor,
"expected": tensor_shape.unknown_shape()
},
{
"types": (tensor_shape.TensorShape([])),
"classes": (ops.Tensor),
"expected": (tensor_shape.TensorShape([]))
},
{
"types": (tensor_shape.TensorShape([])),
"classes": (sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.TensorShape([]), ()),
"classes": (ops.Tensor, ()),
"expected": (tensor_shape.TensorShape([]), ())
},
{
"types": ((), tensor_shape.TensorShape([])),
"classes": ((), ops.Tensor),
"expected": ((), tensor_shape.TensorShape([]))
},
{
"types": (tensor_shape.TensorShape([]), ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (tensor_shape.unknown_shape(), ())
},
{
"types": ((), tensor_shape.TensorShape([])),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.TensorShape([]), (),
tensor_shape.TensorShape([])),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (tensor_shape.TensorShape([]), (),
tensor_shape.TensorShape([]))
},
{
"types": (tensor_shape.TensorShape([]), (),
tensor_shape.TensorShape([])),
"classes":
(sparse_tensor.SparseTensor, (), sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape(), (),
tensor_shape.unknown_shape())
},
{
"types": ((), tensor_shape.TensorShape([]), ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), tensor_shape.TensorShape([]), ())
},
{
"types": ((), tensor_shape.TensorShape([]), ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), tensor_shape.unknown_shape(), ())
},
)
for test_case in test_cases:
self.assertShapesEqual(
sparse.as_dense_shapes(test_case["types"], test_case["classes"]),
test_case["expected"])
def testAsDenseTypes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": dtypes.int32,
"classes": ops.Tensor,
"expected": dtypes.int32
},
{
"types": dtypes.int32,
"classes": sparse_tensor.SparseTensor,
"expected": dtypes.variant
},
{
"types": (dtypes.int32),
"classes": (ops.Tensor),
"expected": (dtypes.int32)
},
{
"types": (dtypes.int32),
"classes": (sparse_tensor.SparseTensor),
"expected": (dtypes.variant)
},
{
"types": (dtypes.int32, ()),
"classes": (ops.Tensor, ()),
"expected": (dtypes.int32, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), ops.Tensor),
"expected": ((), dtypes.int32)
},
{
"types": (dtypes.int32, ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (dtypes.variant, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), dtypes.variant)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (dtypes.int32, (), dtypes.int32)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (dtypes.variant, (), dtypes.variant)
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), dtypes.int32, ())
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), dtypes.variant, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.as_dense_types(test_case["types"], test_case["classes"]),
test_case["expected"])
def testGetClasses(self):
s = sparse_tensor.SparseTensor(indices=[[0]], values=[1], dense_shape=[1])
d = ops.Tensor
t = sparse_tensor.SparseTensor
test_cases = (
{
"classes": (),
"expected": ()
},
{
"classes": s,
"expected": t
},
{
"classes": constant_op.constant([1]),
"expected": d
},
{
"classes": (s),
"expected": (t)
},
{
"classes": (constant_op.constant([1])),
"expected": (d)
},
{
"classes": (s, ()),
"expected": (t, ())
},
{
"classes": ((), s),
"expected": ((), t)
},
{
"classes": (constant_op.constant([1]), ()),
"expected": (d, ())
},
{
"classes": ((), constant_op.constant([1])),
"expected": ((), d)
},
{
"classes": (s, (), constant_op.constant([1])),
"expected": (t, (), d)
},
{
"classes": ((), s, ()),
"expected": ((), t, ())
},
{
"classes": ((), constant_op.constant([1]), ()),
"expected": ((), d, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.get_classes(test_case["classes"]), test_case["expected"])
def assertSparseValuesEqual(self, a, b):
if not isinstance(a, sparse_tensor.SparseTensor):
self.assertFalse(isinstance(b, sparse_tensor.SparseTensor))
self.assertEqual(a, b)
return
self.assertTrue(isinstance(b, sparse_tensor.SparseTensor))
with self.cached_session():
self.assertAllEqual(a.eval().indices, self.evaluate(b).indices)
self.assertAllEqual(a.eval().values, self.evaluate(b).values)
self.assertAllEqual(a.eval().dense_shape, self.evaluate(b).dense_shape)
@test_util.run_deprecated_v1
def testSerializeDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
@test_util.run_deprecated_v1
def testSerializeManyDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_many_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/util/sparse_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Verify that memory usage is minimal in eager mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
# memory_profiler might not be available in the OSS version of TensorFlow.
try:
import memory_profiler # pylint:disable=g-import-not-at-top
except ImportError:
memory_profiler = None
@test_util.run_all_in_graph_and_eager_modes
class MemoryCleanupTest(test_base.DatasetTestBase):
def assertNotIncreasingMemory(self,
f,
num_iters=100000,
increase_threshold_absolute_mb=10):
"""Assert memory usage doesn't increase beyond given threshold for f."""
with context.eager_mode():
# Warm up.
f()
# Wait for background threads to start up and take over memory.
# FIXME: The nature of this test leaves few other options. Maybe there
# is a better way to do this.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
logging.info("Memory increase observed: %f MB" % increase)
assert increase < increase_threshold_absolute_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase,
increase_threshold_absolute_mb)
@test_util.run_v1_only("b/121264236")
def testEagerMemoryUsageWithReset(self):
if not context.executing_eagerly():
self.skipTest("Only eager mode test")
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
def f():
self.evaluate(multi_device_iterator.get_next())
multi_device_iterator._eager_reset()
self.assertNotIncreasingMemory(
f, num_iters=100, increase_threshold_absolute_mb=350)
@test_util.run_v1_only("b/121264236")
def testEagerMemoryUsageWithRecreation(self):
if not context.executing_eagerly():
self.skipTest("Only eager mode test")
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
def f():
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
self.evaluate(multi_device_iterator.get_next())
del multi_device_iterator
# TODO(b/123316347): Reduce threshold once bug is fixed.
self.assertNotIncreasingMemory(
f, num_iters=100, increase_threshold_absolute_mb=500)
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/memory_cleanup_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class UnbatchTest(test_base.DatasetTestBase, parameterized.TestCase):
def testUnbatchWithUnknownRankInput(self):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]).unbatch()
self.assertDatasetProduces(dataset, range(4))
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
def testUnbatchNestedDataset(self):
data = dataset_ops.Dataset.from_tensors(
[dataset_ops.Dataset.range(10) for _ in range(10)])
data = data.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(data, list(range(10)) * 10)
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [
sparse_tensor.SparseTensorValue([[i]], [i], [10]) for i in range(10)
]
self.assertDatasetProduces(data, expected_output=expected_output)
def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
ragged_factory_ops.constant_value([[i]]))
for i in range(10)]
self.assertDatasetProduces(
data, expected_output=expected_output)
def testUnbatchDatasetWithRaggedTensor(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors(rt)
data = data.unbatch()
data = data.batch(5)
data = data.batch(2)
data = data.unbatch()
expected_output = [
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
]
self.assertDatasetProduces(
data, expected_output=expected_output)
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data,
[((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.unbatch()
self.assertDatasetProduces(data, [])
def testUnbatchStaticShapeMismatch(self):
data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
np.arange(9)))
with self.assertRaises(ValueError):
data.unbatch()
# Note: dynamic shape mismatch is graph specific test.
@test_util.run_deprecated_v1
def testSkipEagerUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.unbatch()
iterator = dataset_ops.make_initializable_iterator(data)
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
def testUnbatchDatasetWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1], [2], [3]], dtype=np.uint8), 2),
np.tile(np.array([[1], [2], [3], [256]], dtype=np.uint16), 2),
np.tile(np.array([[2], [3], [4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[3], [4], [5], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(4)]
data = dataset_ops.Dataset.from_tensor_slices(components)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/unbatch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FileCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(FileCacheTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
super(FileCacheTest, self).tearDown()
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(dataset_fn()))
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1, requires_initialization=True)
get_next2 = self.getNext(cache_dataset2, requires_initialization=True)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
@combinations.generate(test_base.default_test_combinations())
def testReadingPastEndOfSequence(self):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
dataset = dataset.map(lambda a: a).batch(4).repeat(2)
expected_output = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] * 2
self.assertDatasetProduces(dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCleaningUpCacheFiles(self):
def do_test(i):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
get_next = self.getNext(dataset)
for _ in range(i):
try:
self.evaluate(get_next())
except errors.OutOfRangeError:
break
if not context.executing_eagerly():
self.skipTest(
"Test requires eager mode for iterators to be deconstructed")
for i in [0, 3, 10, 12, 15]:
do_test(i)
class MemoryCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
@combinations.generate(test_base.default_test_combinations())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
# Create initialization ops for iterators without and with
# caching, respectively.
self.assertDatasetProduces(cache_dataset, expected_output=[])
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
dataset_fn = lambda: dataset_ops.Dataset.range(5).cache()
d1 = dataset_fn().map(lambda x: x + 1)
d2 = dataset_fn().map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
@combinations.generate(test_base.default_test_combinations())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCacheRepeatEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache().repeat(2)
get_next = self.getNext(dataset, requires_initialization=True)
# first epoch
for i in range(10):
self.assertEqual(i, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
# second epoch
for i in range(10):
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheIterationEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache()
# first epoch
i = 0
for elem in dataset:
self.assertEqual(i, self.evaluate(elem))
i += 1
self.assertEqual(i, self.evaluate(counter))
# second epoch
i = 0
for elem in dataset:
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(elem))
i += 1
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.cache()
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/cache_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.filter()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def new_and_legacy_filter_fn_combinations():
def new_filter_fn(dataset, predicate):
return dataset.filter(predicate)
def legacy_filter_fn(dataset, predicate):
return dataset.filter_with_legacy_function(predicate)
return (combinations.combine(
tf_api_version=[1, 2],
mode=["eager", "graph"],
apply_filter=combinations.NamedObject("new_filter_fn", new_filter_fn)) +
combinations.combine(
tf_api_version=1,
mode=["eager", "graph"],
apply_filter=combinations.NamedObject("legacy_filter_fn",
legacy_filter_fn)))
class FilterTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testFilterDataset(self, apply_filter):
components = (np.arange(7, dtype=np.int64),
np.array([[1, 2, 3]], dtype=np.int64) *
np.arange(7, dtype=np.int64)[:, np.newaxis],
np.array(37.0, dtype=np.float64) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def do_test(count, modulus): # pylint: disable=missing-docstring
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
# pylint: disable=g-long-lambda
dataset = apply_filter(
dataset,
lambda x, _y, _z: math_ops.equal(math_ops.mod(x, modulus), 0))
# pylint: enable=g-long-lambda
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
get_next = self.getNext(dataset)
for _ in range(count):
for i in [x for x in range(7) if x**2 % modulus == 0]:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
do_test(14, 2)
do_test(4, 18)
# Test an empty dataset.
do_test(0, 1)
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testFilterRange(self, apply_filter):
dataset = dataset_ops.Dataset.range(4)
dataset = apply_filter(dataset,
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
self.assertDatasetProduces(dataset, expected_output=[0, 1, 3])
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testFilterDict(self, apply_filter):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x**2})
dataset = apply_filter(dataset, lambda d: math_ops.equal(d["bar"] % 2, 0))
dataset = dataset.map(lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset,
expected_output=[(i * 2 + i**2) for i in range(10) if not (i**2) % 2])
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testUseStepContainerInFilter(self, apply_filter):
input_data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
# Define a predicate that returns true for the first element of
# the sequence and not the second, and uses `tf.map_fn()`.
def _predicate(xs):
squared_xs = map_fn.map_fn(lambda x: x * x, xs)
summed = math_ops.reduce_sum(squared_xs)
return math_ops.equal(summed, 1 + 4 + 9)
dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2, 3], [4, 5, 6]])
dataset = apply_filter(dataset, _predicate)
self.assertDatasetProduces(dataset, expected_output=[input_data[0]])
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testSparse(self, apply_filter):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1])), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
dataset = apply_filter(dataset, _filter_fn)
dataset = dataset.map(lambda x, i: x)
self.assertDatasetProduces(
dataset, expected_output=[_map_fn(i * 2)[0] for i in range(5)])
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testShortCircuit(self, apply_filter):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10),
dataset_ops.Dataset.from_tensors(True).repeat(None)))
dataset = apply_filter(dataset, lambda x, y: y)
self.assertDatasetProduces(
dataset, expected_output=[(i, True) for i in range(10)])
@combinations.generate(new_and_legacy_filter_fn_combinations())
def testParallelFilters(self, apply_filter):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_filter(dataset, lambda x: math_ops.equal(x % 2, 0))
next_elements = [self.getNext(dataset) for _ in range(10)]
self.assertEqual([0 for _ in range(10)],
self.evaluate(
[next_element() for next_element in next_elements]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/filter_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.reduce()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ReduceTest(test_base.DatasetTestBase, parameterized.TestCase):
def testSum(self):
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(np.int64(0), lambda x, y: x + y)
self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))
def testSumTuple(self):
def reduce_fn(state, value):
v1, v2 = value
return state + v1 + v2
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
ds = dataset_ops.Dataset.zip((ds, ds))
result = ds.reduce(constant_op.constant(0, dtype=dtypes.int64), reduce_fn)
self.assertEqual(((i + 1) * i), self.evaluate(result))
def testSumAndCount(self):
def reduce_fn(state, value):
s, c = state
return s + value, c + 1
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce((constant_op.constant(0, dtype=dtypes.int64),
constant_op.constant(0, dtype=dtypes.int64)),
reduce_fn)
s, c = self.evaluate(result)
self.assertEqual(((i + 1) * i) // 2, s)
self.assertEqual(i, c)
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerSquareUsingPlaceholder(self):
delta = array_ops.placeholder(dtype=dtypes.int64)
def reduce_fn(state, _):
return state + delta
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(np.int64(0), reduce_fn)
with self.cached_session() as sess:
square = sess.run(result, feed_dict={delta: i})
self.assertEqual(i * i, square)
def testSparse(self):
def reduce_fn(_, value):
return value
def make_sparse_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
for i in range(10):
ds = dataset_ops.Dataset.from_tensors(make_sparse_fn(i+1))
result = ds.reduce(make_sparse_fn(0), reduce_fn)
self.assertValuesEqual(make_sparse_fn(i + 1), self.evaluate(result))
def testNested(self):
def reduce_fn(state, value):
state["dense"] += value["dense"]
state["sparse"] = value["sparse"]
return state
def make_sparse_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def map_fn(i):
return {"dense": math_ops.cast(i, dtype=dtypes.int64),
"sparse": make_sparse_fn(math_ops.cast(i, dtype=dtypes.int64))}
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1).map(map_fn)
result = ds.reduce(map_fn(0), reduce_fn)
result = self.evaluate(result)
self.assertEqual(((i + 1) * i) // 2, result["dense"])
self.assertValuesEqual(make_sparse_fn(i), result["sparse"])
def testDatasetSideEffect(self):
counter_var = variables.Variable(0)
def increment_fn(x):
counter_var.assign_add(1)
return x
def dataset_fn():
return dataset_ops.Dataset.range(10).map(increment_fn)
def reduce_fn(state, value):
return state + value
@function.defun
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 10)
def testSideEffect(self):
counter_var = variables.Variable(0)
def dataset_fn():
return dataset_ops.Dataset.range(10)
def reduce_fn(state, value):
counter_var.assign_add(1)
return state + value
@function.defun
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 10)
def testAutomaticControlDependencies(self):
counter_var = variables.Variable(1)
def dataset_fn():
return dataset_ops.Dataset.range(1)
def reduce1_fn(state, value):
counter_var.assign(counter_var + 1)
return state + value
def reduce2_fn(state, value):
counter_var.assign(counter_var * 2)
return state + value
@function.defun
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce1_fn)
_ = dataset_fn().reduce(np.int64(0), reduce2_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 4)
def testStateOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
state = constant_op.constant(0, dtype=dtypes.int64)
def reduce_fn(state, value):
with ops.device("/gpu:0"):
return state + value
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(state, reduce_fn)
self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerCancellation(self):
ds = dataset_ops.Dataset.from_tensors(1).repeat()
result = ds.reduce(0, lambda x, y: x + y)
with self.cached_session() as sess:
# The `result` op is guaranteed to not complete before cancelled because
# the dataset that is being reduced is infinite.
thread = self.checkedThread(self.assert_op_cancelled, args=(result,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
def testInvalidFunction(self):
ds = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(ds.reduce(0, lambda _, __: ()))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/reduce_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.flat_map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
@test_util.run_all_in_graph_and_eager_modes
class FlatMapTest(test_base.DatasetTestBase):
# pylint: disable=g-long-lambda
def testFlatMapDataset(self):
repeats = [1, 2, 3, 4, 5, 0, 1]
components = np.array(repeats, dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensors([x]).repeat(x))
expected_output = []
for i in repeats:
expected_output.extend([[i]] * i)
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(
lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y))
)
expected_output = []
for row in repeats:
for i in row:
expected_output.extend([i] * i)
self.assertDatasetProduces(dataset, expected_output=expected_output)
# Note: no eager mode coverage, session specific test.
@test_util.run_deprecated_v1
def testSkipEagerSharedResourceNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
iterator = (
dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(
lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y))),
shared_name="shared_flat_map_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
# Create two concurrent sessions that share the same iterator
# resource on the same server, and verify that a random
# interleaving of `Session.run(get_next)` calls on the two
# sessions yields the expected result.
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess1:
with session.Session(server.target) as sess2:
for _ in range(3):
sess = random.choice([sess1, sess2])
sess.run(init_op)
for row in repeats:
for i in row:
for _ in range(i):
sess = random.choice([sess1, sess2])
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess = random.choice([sess1, sess2])
sess.run(get_next)
def testMapDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x ** 2}).flat_map(
lambda d: dataset_ops.Dataset.from_tensors(
d["foo"]).repeat(d["bar"]))
get_next = self.getNext(dataset)
for i in range(10):
for _ in range(i**2):
self.assertEqual(i * 2, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
for j in range(2):
expected_output.append([i, 0] if j % 2 == 0 else [0, -i])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testTensorArray(self):
def _map_fn(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i)))
def _flat_map_fn(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return dataset_ops.Dataset.from_tensor_slices(x.stack())
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
for j in range(i):
expected_output.append(j)
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testRagged(self):
def _map_fn(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1], [-1]])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
ragged_conversion_ops.to_tensor(x))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
expected_output.append([i])
expected_output.append([-i])
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/flat_map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.list_files()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class ListFilesTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _touchTempFiles(self, filenames):
for filename in filenames:
open(path.join(self.tmp_dir, filename), 'a').close()
# Note: eager mode fails in assertion error same as initializer in graph mode.
@test_util.run_deprecated_v1
def testSkipEagerEmptyDirectory(self):
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(dataset, expected_output=[])
def testSimpleDirectory(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
def testSimpleDirectoryNotShuffled(self):
filenames = ['b', 'c', 'a']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=False)
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in sorted(filenames)
])
def testFixedSeedResultsInRepeatableOrder(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
def dataset_fn():
return dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=True, seed=37)
expected_filenames = [
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
]
all_actual_filenames = []
for _ in range(3):
actual_filenames = []
next_element = self.getNext(dataset_fn(), requires_initialization=True)
try:
while True:
actual_filenames.append(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
all_actual_filenames.append(actual_filenames)
# Each run should produce the same set of filenames, which may be
# different from the order of `expected_filenames`.
self.assertItemsEqual(expected_filenames, all_actual_filenames[0])
# However, the different runs should produce filenames in the same order
# as each other.
self.assertEqual(all_actual_filenames[0], all_actual_filenames[1])
self.assertEqual(all_actual_filenames[0], all_actual_filenames[2])
def tesEmptyDirectoryInitializer(self):
def dataset_fn():
return dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset_fn(),
expected_error=(errors.InvalidArgumentError,
'No files matched pattern'),
requires_initialization=True)
def testSimpleDirectoryInitializer(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
def testFileSuffixes(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[1:-1]
],
assert_items_equal=True)
def testFileMiddles(self):
filenames = ['a.txt', 'b.py', 'c.pyc']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[1:]
],
assert_items_equal=True)
def testNoShuffle(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
# Repeat the list twice and ensure that the order is the same each time.
# NOTE(mrry): This depends on an implementation detail of `list_files()`,
# which is that the list of files is captured when the iterator is
# initialized. Otherwise, or if e.g. the iterator were initialized more than
# once, it's possible that the non-determinism of `tf.matching_files()`
# would cause this test to fail. However, it serves as a useful confirmation
# that the `shuffle=False` argument is working as intended.
# TODO(b/73959787): Provide some ordering guarantees so that this test is
# more meaningful.
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=False).repeat(2)
next_element = self.getNext(dataset)
expected_filenames = []
actual_filenames = []
for filename in filenames * 2:
expected_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
self.assertItemsEqual(expected_filenames, actual_filenames)
self.assertEqual(actual_filenames[:len(filenames)],
actual_filenames[len(filenames):])
def testMultiplePatternsAsList(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
patterns = [path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']]
dataset = dataset_ops.Dataset.list_files(patterns)
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[:-1]
],
assert_items_equal=True)
def testMultiplePatternsAsTensor(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
[path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']])
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[:-1]
],
assert_items_equal=True)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/list_files_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.range()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RangeTest(test_base.DatasetTestBase):
def testStop(self):
dataset = dataset_ops.Dataset.range(5)
self.assertDatasetProduces(dataset, expected_output=range(5))
def testStartStop(self):
start, stop = 2, 5
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(2, 5))
def testStartStopStep(self):
start, stop, step = 2, 10, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, 2))
def testZeroStep(self):
start, stop, step = 2, 10, 0
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(start, stop, step)
self.evaluate(dataset._variant_tensor)
def testNegativeStep(self):
start, stop, step = 2, 10, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, -1))
def testStopLessThanStart(self):
start, stop = 10, 2
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(10, 2))
def testStopLessThanStartWithPositiveStep(self):
start, stop, step = 10, 2, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, 2))
def testStopLessThanStartWithNegativeStep(self):
start, stop, step = 10, 2, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, -1))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/range_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Iterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
@test_util.deprecated_graph_mode_only
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset_ops.make_one_shot_iterator(dataset)
@test_util.deprecated_graph_mode_only
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14))
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
@test_util.deprecated_graph_mode_only
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(array_ops.gather([0], [4]))
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
@test_util.deprecated_graph_mode_only
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(
components).map(lambda x, y, z: (x, y, z)),
shared_name="shared_iterator")
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(components))
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(dataset_3), [None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(
dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(
dataset_ops.get_legacy_output_types(dataset_4),
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(
[None], dataset_ops.get_legacy_output_shapes(iterator).as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testReinitializableIteratorWithFunctions(self):
def g():
for i in range(10):
yield i
iterator = iterator_ops.Iterator.from_structure(dtypes.int64, [])
next_element = iterator.get_next()
with self.cached_session() as sess:
dataset_1 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_1))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
dataset_2 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_2))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
@test_util.deprecated_graph_mode_only
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset_3),
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleFuture(self):
with forward_compat.forward_compatibility_horizon(2018, 8, 4):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset_3),
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset_ops.make_one_shot_iterator(dataset)
initializable_iterator = dataset_ops.make_initializable_iterator(dataset)
structure_iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(dataset))
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(dataset_ops.make_one_shot_iterator(
dataset_int_scalar).string_handle())
handle_float_vector = sess.run(dataset_ops.make_one_shot_iterator(
dataset_float_vector).string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = dataset_ops.make_one_shot_iterator(src)
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(itr),
dataset_ops.get_legacy_output_shapes(itr))
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = dataset_ops.make_initializable_iterator(client_dataset)
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
input_bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
@test_util.deprecated_graph_mode_only
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.make_one_shot_iterator(dataset_ops.Dataset.range(10))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
tensor_spec.TensorSpec([],
dtypes.float32), ops.Tensor, dtypes.float32, []),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32),
sparse_tensor.SparseTensor, dtypes.int32, [1]),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (tensor_spec.TensorSpec(
[1], dtypes.string), tensor_spec.TensorSpec([], dtypes.string))
}, {
"a": ops.Tensor,
"b": (ops.Tensor, ops.Tensor)
}, {
"a": dtypes.float32,
"b": (dtypes.string, dtypes.string)
}, {
"a": [],
"b": ([1], [])
}),
)
def testIteratorStructure(self, tf_value_fn, expected_element_structure,
expected_output_classes, expected_output_types,
expected_output_shapes):
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(37.0))
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
@parameterized.named_parameters(
("Async", context.ASYNC),
("Sync", context.SYNC),
)
def testIteratorEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for foo in iterator:
self.assertEqual(val, foo.numpy())
val += 1
@test_util.run_v2_only
def testIteratorV2Function(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for _ in range(10):
queue.enqueue(next(iterator))
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@test_util.run_v2_only
def testIteratorV2FunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn)
iterator = iter(dataset)
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@test_util.run_v2_only
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for elem in iterator:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(iter(dataset))), 10)
self.assertEqual(self.evaluate(f(iter(dataset2))), 45)
self.assertEqual(trace_count[0], 1)
def assert_dataset_placement(self, host_dataset, host_iterator, host_tensor,
device_dataset, device_iterator, device_tensor):
self.assertTrue(
"cpu:0" in host_dataset._variant_tensor.device.lower() or
host_dataset._variant_tensor.device == ""
)
self.assertTrue(
"cpu:0" in host_iterator._iterator_resource.device.lower() or
host_iterator._iterator_resource.device == ""
)
self.assertTrue(
"cpu:0" in host_tensor.device.lower() or host_tensor.device == ""
)
self.assertIn("gpu:0", device_dataset._variant_tensor.device.lower())
self.assertIn("gpu:0", device_iterator._iterator_resource.device.lower())
self.assertIn("gpu:0", device_tensor.device.lower())
# @test_util.deprecated_graph_mode_only
# def testIteratorOnDeviceGraphModeOneShotIterator(self):
# if not test_util.is_gpu_available():
# self.skipTest("No GPU available")
#
# host_dataset = dataset_ops.Dataset.range(10)
# device_dataset = host_dataset.apply(
# prefetching_ops.prefetch_to_device("/gpu:0"))
#
# host_iterator = dataset_ops.make_one_shot_iterator(host_dataset)
# device_iterator = dataset_ops.make_one_shot_iterator(device_dataset)
#
# host_tensor = host_iterator.get_next()
# device_tensor = device_iterator.get_next()
#
# self.assert_dataset_placement(
# host_dataset, host_iterator, host_tensor,
# device_dataset, device_iterator, device_tensor
# )
#
# @test_util.deprecated_graph_mode_only
# def testIteratorOnDeviceGraphModeInitializableIterator(self):
# if not test_util.is_gpu_available():
# self.skipTest("No GPU available")
#
# host_dataset = dataset_ops.Dataset.range(10)
# device_dataset = host_dataset.apply(
# prefetching_ops.prefetch_to_device("/gpu:0"))
#
# host_iterator = dataset_ops.make_initializable_iterator(host_dataset)
# device_iterator = dataset_ops.make_initializable_iterator(device_dataset)
#
# host_tensor = host_iterator.get_next()
# device_tensor = device_iterator.get_next()
#
# self.assert_dataset_placement(
# host_dataset, host_iterator, host_tensor,
# device_dataset, device_iterator, device_tensor
# )
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/iterator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class FromSparseTensorSlicesTest(test_base.DatasetTestBase):
def testFromSparseTensorSlices(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = np.empty((0, 4), dtype=np.int64)
non_empty_values = [1, 2, 3, 4]
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices,
non_empty_values,
empty_dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid2(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = [[]]
empty_values = []
dense_shape = [1, 1]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shuffle()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ShuffleTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testShuffleDataset(self):
components = (
np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0])
)
def dataset_fn(count=5, buffer_size=None, seed=0):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if buffer_size:
shuffle_dataset = repeat_dataset.shuffle(buffer_size, seed)
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(shuffle_dataset))
return shuffle_dataset
else:
return repeat_dataset
# First run without shuffling to collect the "ground truth".
get_next = self.getNext(dataset_fn())
unshuffled_elements = []
for _ in range(20):
unshuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the shuffled dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
shuffled_elements = []
for _ in range(20):
shuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(sorted(unshuffled_elements), sorted(shuffled_elements))
# Assert that shuffling twice with the same seeds gives the same sequence.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
reshuffled_elements_same_seed = []
for _ in range(20):
reshuffled_elements_same_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(shuffled_elements, reshuffled_elements_same_seed)
# Assert that shuffling twice with a different seed gives a different
# permutation of the same elements.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=137))
reshuffled_elements_different_seed = []
for _ in range(20):
reshuffled_elements_different_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertNotEqual(shuffled_elements, reshuffled_elements_different_seed)
self.assertAllEqual(
sorted(shuffled_elements), sorted(reshuffled_elements_different_seed))
# Assert that the shuffled dataset has the same elements as the
# "ground truth" when the buffer size is smaller than the input
# dataset.
get_next = self.getNext(dataset_fn(buffer_size=2, seed=37))
reshuffled_elements_small_buffer = []
for _ in range(20):
reshuffled_elements_small_buffer.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(
sorted(unshuffled_elements), sorted(reshuffled_elements_small_buffer))
# Test the case of shuffling an empty dataset.
get_next = self.getNext(dataset_fn(count=0, buffer_size=100, seed=37))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSeedZero(self):
"""Test for same behavior when the seed is a Python or Tensor zero."""
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=0))
get_next = iterator.get_next()
elems = []
with self.cached_session() as sess:
for _ in range(10):
elems.append(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
seed_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=seed_placeholder))
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={seed_placeholder: 0})
for elem in elems:
self.assertEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testDefaultArguments(self):
components = [0, 1, 2, 3, 4]
dataset = dataset_ops.Dataset.from_tensor_slices(components).shuffle(
5).repeat()
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=[1, 2], mode="graph"),
combinations.combine(reshuffle=[True, False]),
combinations.combine(graph_seed=38, op_seed=None) +
combinations.combine(graph_seed=None, op_seed=42) +
combinations.combine(graph_seed=38, op_seed=42)))
def testShuffleSeed(self, reshuffle, graph_seed, op_seed):
results = []
for _ in range(2):
with ops.Graph().as_default() as g:
random_seed.set_random_seed(graph_seed)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=op_seed, reshuffle_each_iteration=reshuffle).repeat(3)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
run_results = []
with self.session(graph=g) as sess:
for _ in range(30):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertAllEqual(results[0], results[1])
# TODO(b/117581999): enable this test for eager-mode.
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=[1, 2], mode="graph"),
combinations.combine(
reshuffle=[True, False], initializable=[True, False])))
def testMultipleIterators(self, reshuffle, initializable):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(100).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(3)
if initializable:
iterators = [dataset_ops.make_initializable_iterator(dataset)
for _ in range(2)]
else:
iterators = [dataset_ops.make_one_shot_iterator(dataset)
for _ in range(2)]
results = []
with self.session(graph=g) as sess:
for iterator in iterators:
if initializable:
sess.run(iterator.initializer)
next_element = iterator.get_next()
run_results = []
for _ in range(300):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertNotEqual(results[0], results[1])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleRepeatEpochs(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle).repeat(2)
next_element = self.getNext(dataset)
first_epoch = []
for _ in range(10):
first_epoch.append(self.evaluate(next_element()))
second_epoch = []
for _ in range(10):
second_epoch.append(self.evaluate(next_element()))
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode="eager"),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleIterationEpochs(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle)
first_epoch = []
for elem in dataset:
first_epoch.append(elem.numpy())
second_epoch = []
for elem in dataset:
second_epoch.append(elem.numpy())
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.shuffle(1)
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=[1, 2], mode="eager"),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleSeparateTransformations(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10)
first_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
first_epoch.append(elem.numpy())
second_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
second_epoch.append(elem.numpy())
self.assertEqual(first_epoch != second_epoch, seed is None)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/shuffle_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.TextLineDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
try:
import psutil # pylint: disable=g-import-not-at-top
psutil_import_succeeded = True
except ImportError:
psutil_import_succeeded = False
@test_util.run_all_in_graph_and_eager_modes
class TextLineDatasetTest(test_base.DatasetTestBase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
def dataset_fn(filenames, num_epochs, batch_size=None):
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
# Basic test: read from file 0.
expected_output = [self._lineText(0, i) for i in range(5)]
self.assertDatasetProduces(
dataset_fn([test_filenames[0]], 1), expected_output=expected_output)
# Basic test: read from file 1.
self.assertDatasetProduces(
dataset_fn([test_filenames[1]], 1),
expected_output=[self._lineText(1, i) for i in range(5)])
# Basic test: read from both files.
expected_output = [self._lineText(0, i) for i in range(5)]
expected_output.extend([self._lineText(1, i) for i in range(5)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 1), expected_output=expected_output)
# Test repeated iteration through both files.
expected_output = [self._lineText(0, i) for i in range(5)]
expected_output.extend([self._lineText(1, i) for i in range(5)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 10), expected_output=expected_output * 10)
# Test batched and repeated iteration through both files.
self.assertDatasetProduces(
dataset_fn(test_filenames, 10, 5),
expected_output=[[self._lineText(0, i) for i in range(5)],
[self._lineText(1, i) for i in range(5)]] * 10)
def testTextLineDatasetParallelRead(self):
test_filenames = self._createFiles(10, 10)
files = dataset_ops.Dataset.from_tensor_slices(test_filenames).repeat(10)
expected_output = []
for j in range(10):
expected_output.extend([self._lineText(j, i) for i in range(10)])
dataset = readers.TextLineDataset(files, num_parallel_reads=4)
self.assertDatasetProduces(
dataset, expected_output=expected_output * 10, assert_items_equal=True)
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
expected_output = []
for j in range(2):
expected_output.extend([self._lineText(j, i) for i in range(5)])
self.assertDatasetProduces(repeat_dataset, expected_output=expected_output)
def testIteratorResourceCleanup(self):
filename = os.path.join(self.get_temp_dir(), "text.txt")
with open(filename, "wt") as f:
for i in range(3):
f.write("%d\n" % (i,))
with context.eager_mode():
first_iterator = iter(readers.TextLineDataset(filename))
self.assertEqual(b"0", next(first_iterator).numpy())
second_iterator = iter(readers.TextLineDataset(filename))
self.assertEqual(b"0", next(second_iterator).numpy())
# Eager kernel caching is based on op attributes, which includes the
# Dataset's output shape. Create a different kernel to test that they
# don't create resources with the same names.
different_kernel_iterator = iter(
readers.TextLineDataset(filename).repeat().batch(16))
self.assertEqual([16], next(different_kernel_iterator).shape)
# Remove our references to the Python Iterator objects, which (assuming no
# reference cycles) is enough to trigger DestroyResourceOp and close the
# partially-read files.
del first_iterator
del second_iterator
del different_kernel_iterator
if not psutil_import_succeeded:
self.skipTest(
"psutil is required to check that we've closed our files.")
open_files = psutil.Process().open_files()
self.assertNotIn(filename, [open_file.path for open_file in open_files])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/text_line_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_tensor_slices()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FromTensorSlicesTest(test_base.DatasetTestBase):
def testFromTensorSlices(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
results = self.evaluate(get_next())
def testFromTensorSlicesDataset(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensor_slices(dss)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
def testFromTensorSlicesDatasetInFunction(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensors(dss)
ds = ds.flat_map(dataset_ops.Dataset.from_tensor_slices)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
def testFromTensorSlicesSparse(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape[1:]) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
self.assertDatasetProduces(dataset, expected_output=expected)
def testFromTensorSlicesMixed(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (np.tile(np.array([[1], [2], [3]]), 20),
np.tile(np.array([[12], [13], [14]]), 22),
np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape[1:])
if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
self.assertValuesEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["foo"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["bar"])
self.assertEqual((), dataset_ops.get_legacy_output_shapes(dataset)["foo"])
self.assertEqual((1,), dataset_ops.get_legacy_output_shapes(dataset)["bar"])
for i in range(3):
results = self.evaluate(get_next())
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
expected = [(ragged_factory_ops.constant_value([[0]]),
ragged_factory_ops.constant_value([[3]])),
(ragged_factory_ops.constant_value([[1]]),
ragged_factory_ops.constant_value([[4]])),
(ragged_factory_ops.constant_value([[2]]),
ragged_factory_ops.constant_value([[5]]))]
self.assertDatasetProduces(dataset, expected_output=expected)
def testFromTensorSlicesMixedRagged(self):
components = (np.tile(np.array([[1], [2], [3]]),
20), np.tile(np.array([[12], [13], [14]]),
22), np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[0]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[1]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[2]
])),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
self.assertValuesEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1]], dtype=np.uint8), 2),
np.tile(np.array([[2], [256]], dtype=np.uint16), 2),
np.tile(np.array([[4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[8], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(2)]
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(expected_types,
dataset_ops.get_legacy_output_types(dataset))
self.assertDatasetProduces(dataset, expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/from_tensor_slices_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _interleave(lists, cycle_length, block_length):
"""Reference implementation of interleave used for testing.
Args:
lists: a list of lists to interleave
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
Yields:
Elements of `lists` interleaved in the order determined by `cycle_length`
and `block_length`.
"""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
if cycle_length == dataset_ops.AUTOTUNE:
cycle_length = multiprocessing.cpu_count()
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)]
@test_util.run_all_in_graph_and_eager_modes
class InterleaveTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", [4, 5, 6], 1, 1, [
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5,
5, 6, 6, 6, 6, 6, 6
]),
("2", [4, 5, 6], 2, 1, [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6,
5, 6, 5, 6, 5, 6, 6
]),
("3", [4, 5, 6], 2, 3, [
4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5, 5, 5, 6,
6, 6, 5, 5, 6, 6, 6
]),
("4", [4, 5, 6], 7, 2, [
4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6,
6, 5, 6, 6, 5, 6, 6
]),
("5", [4, 0, 6], 2, 1,
[4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6]),
)
def testPythonImplementation(self, input_values, cycle_length, block_length,
expected_elements):
input_lists = _repeat(input_values, 2)
for expected, produced in zip(
expected_elements, _interleave(input_lists, cycle_length,
block_length)):
self.assertEqual(expected, produced)
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 1, 3, None),
("2", np.int64([4, 5, 6]), 1, 3, 1),
("3", np.int64([4, 5, 6]), 2, 1, None),
("4", np.int64([4, 5, 6]), 2, 1, 1),
("5", np.int64([4, 5, 6]), 2, 1, 2),
("6", np.int64([4, 5, 6]), 2, 3, None),
("7", np.int64([4, 5, 6]), 2, 3, 1),
("8", np.int64([4, 5, 6]), 2, 3, 2),
("9", np.int64([4, 5, 6]), 7, 2, None),
("10", np.int64([4, 5, 6]), 7, 2, 1),
("11", np.int64([4, 5, 6]), 7, 2, 3),
("12", np.int64([4, 5, 6]), 7, 2, 5),
("13", np.int64([4, 5, 6]), 7, 2, 7),
("14", np.int64([4, 5, 6]), dataset_ops.AUTOTUNE, 3, None),
("15", np.int64([4, 5, 6]), dataset_ops.AUTOTUNE, 3, 1),
("16", np.int64([]), 2, 3, None),
("17", np.int64([0, 0, 0]), 2, 3, None),
("18", np.int64([4, 0, 6]), 2, 3, None),
("19", np.int64([4, 0, 6]), 2, 3, 1),
("20", np.int64([4, 0, 6]), 2, 3, 2),
)
def testInterleaveDataset(self, input_values, cycle_length, block_length,
num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
expected_output = [
element for element in _interleave(
_repeat(input_values, count), cycle_length, block_length)
]
self.assertDatasetProduces(dataset, expected_output)
@parameterized.named_parameters(
("1", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, None),
("2", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, 1),
("3", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, None),
("4", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 1),
("5", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 2),
("6", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, None),
("7", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 1),
("8", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 2),
("9", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, None),
("10", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 1),
("11", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 3),
("12", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 5),
("13", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 7),
)
def testInterleaveDatasetError(self, input_values, cycle_length, block_length,
num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map(
lambda x: array_ops.check_numerics(x, "message")).interleave(
dataset_ops.Dataset.from_tensors, cycle_length, block_length,
num_parallel_calls)
get_next = self.getNext(dataset)
for value in input_values:
if np.isnan(value):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
else:
self.assertEqual(value, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testInterleaveSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 1, 3, 1),
("2", np.int64([4, 5, 6]), 2, 1, 1),
("3", np.int64([4, 5, 6]), 2, 1, 2),
("4", np.int64([4, 5, 6]), 2, 3, 1),
("5", np.int64([4, 5, 6]), 2, 3, 2),
("6", np.int64([4, 5, 6]), 7, 2, 1),
("7", np.int64([4, 5, 6]), 7, 2, 3),
("8", np.int64([4, 5, 6]), 7, 2, 5),
("9", np.int64([4, 5, 6]), 7, 2, 7),
("10", np.int64([4, 5, 6]), dataset_ops.AUTOTUNE, 3, 1),
("11", np.int64([4, 0, 6]), 2, 3, 1),
("12", np.int64([4, 0, 6]), 2, 3, 2),
)
def testSloppyInterleaveDataset(self, input_values, cycle_length,
block_length, num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset.with_options(options)
expected_output = [
element for element in _interleave(
_repeat(input_values, count), cycle_length, block_length)
]
get_next = self.getNext(dataset)
actual_output = []
for _ in range(len(expected_output)):
actual_output.append(self.evaluate(get_next()))
self.assertAllEqual(expected_output.sort(), actual_output.sort())
def testInterleaveMap(self):
dataset = dataset_ops.Dataset.range(100)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
return dataset.map(lambda x: x + x)
dataset = dataset.interleave(interleave_fn, cycle_length=5)
dataset = dataset.interleave(interleave_fn, cycle_length=5)
self.assertDatasetProduces(dataset, [4 * x for x in range(100)])
def testParallelInterleaveCached(self):
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.cache(os.path.join(self.get_temp_dir(), "cache_dir"))
def interleave_fn(x):
return dataset_ops.Dataset.from_tensors(x)
dataset = dataset.interleave(
interleave_fn, cycle_length=2, num_parallel_calls=2)
self.assertDatasetProduces(dataset, list(range(5)))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/interleave_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing tf.data iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
# TODO(jsimsa): Add missing test combinations.
class CheckpointTest(test_base.DatasetTestBase, parameterized.TestCase):
def tearDown(self):
super(CheckpointTest, self).tearDown()
prefix = self._iterator_checkpoint_prefix()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
def _iterator_checkpoint_prefix(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(self, iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
self._iterator_checkpoint_prefix(),
parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(self, iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(self._iterator_checkpoint_prefix()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="graph"))
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="graph"))
def testInitThenRestore(self):
# Note: Calling init_op before restore_op is redundant. This test just makes
# sure we do not fail if restore is called on an already initialized
# iterator resource.
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="graph"))
def testMultipleSaves(self):
def _build_graph(start, stop):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
break_point1 = 5
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point1):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point1, break_point2):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point2, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="graph"))
def testSaveRestoreWithRepeat(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop).repeat(num_epochs))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
break_range = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(init_op)
sess.run(restore_op)
for _ in range(break_epoch - 1):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_range):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_range, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="graph"))
def testSaveRestoreExhaustedIterator(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop).repeat(num_epochs))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(init_op)
sess.run(restore_op)
for _ in range(num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="eager"))
def testSaveRestoreOneShotIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(
math_ops.square).batch(2)
# TODO(b/138399725): Re-enable default optimizations.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = iter(dataset)
get_next = iterator.get_next
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual([1, 4], get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
with self.assertRaises(errors.OutOfRangeError):
get_next()
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="eager"))
def testSaveRestoreMultipleIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
# TODO(b/138399725): Re-enable default optimizations.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator_1 = iter(dataset)
get_next_1 = iterator_1.get_next
iterator_2 = iter(dataset)
get_next_2 = iterator_2.get_next
dataset_2 = dataset_ops.Dataset.range(10)
iterator_3 = iter(dataset_2)
get_next_3 = iterator_3.get_next
checkpoint = trackable_utils.Checkpoint(
iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
self.assertAllEqual([1, 4], get_next_1())
self.assertAllEqual(0, get_next_3())
self.assertAllEqual(1, get_next_3())
self.assertAllEqual(2, get_next_3())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual([9, 16], get_next_2())
self.assertAllEqual(3, get_next_3())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next_1())
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual(3, get_next_3())
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="eager"))
def testRestoreExhaustedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(3)
iterator = iter(dataset)
get_next = iterator.get_next
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual(0, get_next())
self.assertAllEqual(1, get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual(2, get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(2, get_next())
save_path = checkpoint.save(checkpoint_prefix)
checkpoint.restore(save_path).run_restore_ops()
with self.assertRaises(errors.OutOfRangeError):
get_next()
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode="eager"))
def testRestoreInReconstructedIteratorInitializable(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
get_next = iterator.get_next
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
for i in range(5):
checkpoint.restore(
checkpoint_management.latest_checkpoint(
checkpoint_directory)).initialize_or_restore()
for j in range(2):
self.assertEqual(i * 2 + j, self.evaluate(get_next()))
checkpoint.save(file_prefix=checkpoint_prefix)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/checkpoint_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.take()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class TakeTest(test_base.DatasetTestBase):
def testTakeTensorDataset(self):
components = (np.arange(10),)
def do_test(count):
dataset = dataset_ops.Dataset.from_tensor_slices(components).take(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
num_output = min(count, 10) if count != -1 else 10
self.assertDatasetProduces(
dataset, [tuple(components[0][i:i + 1]) for i in range(num_output)])
# Take fewer than input size
do_test(4)
# Take more than input size
do_test(25)
# Take all of input
do_test(-1)
# Take nothing
do_test(0)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/take_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testAsSerializedGraph(self):
dataset = dataset_ops.Dataset.range(10)
graph = graph_pb2.GraphDef().FromString(
self.evaluate(dataset._as_serialized_graph()))
self.assertTrue(any([node.op == "RangeDataset" for node in graph.node]))
def testAsSerializedGraphStateful(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda _: random_ops.random_uniform(()))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(dataset._as_serialized_graph())
@combinations.generate(test_base.default_test_combinations())
def testAsFunctionWithMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = dataset_ops._VariantDataset(
variant, original_dataset.element_spec)
self.assertDatasetProduces(revived_dataset, range(0, 10, 2))
@combinations.generate(test_base.default_test_combinations())
def testAsFunctionWithMapInFlatMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).flat_map(
lambda x: dataset_ops.Dataset.range(5).map(lambda x: x * 2))
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = dataset_ops._VariantDataset(
variant, original_dataset.element_spec)
self.assertDatasetProduces(revived_dataset, list(original_dataset))
def checkNumInputs(self, dataset, num_inputs):
self.assertLen(dataset._inputs(), num_inputs)
@combinations.generate(test_base.default_test_combinations())
def testFixedLengthRecordInputs(self):
dataset = readers.FixedLengthRecordDataset("", 42)
self.checkNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorInputs(self):
def gen():
yield 42
dataset = dataset_ops.Dataset.from_generator(gen, dtypes.int32)
self.checkNumInputs(dataset, 1)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsInputs(self):
dataset = dataset_ops.Dataset.from_tensors([42])
self.checkNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testRangeInputs(self):
dataset = dataset_ops.Dataset.range(10)
self.checkNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testTextLineInputs(self):
dataset = readers.TextLineDataset("")
self.checkNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordInputs(self):
dataset = readers.TFRecordDataset("")
self.checkNumInputs(dataset, 1)
@combinations.generate(
combinations.combine(tf_api_version=1, mode=["eager", "graph"]))
def testDatasetComplexSourceInputs(self):
dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices(
sparse_tensor.SparseTensor(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])))
self.assertEmpty(dataset_fn._inputs())
def checkUnaryInputs(self, dataset_fn):
input_dataset = dataset_ops.Dataset.range(0)
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testBatchInputs(self):
self.checkUnaryInputs(lambda x: x.batch(10))
@combinations.generate(test_base.default_test_combinations())
def testCacheInputs(self):
self.checkUnaryInputs(lambda x: x.cache())
@combinations.generate(test_base.default_test_combinations())
def testFilterInputs(self):
self.checkUnaryInputs(lambda x: x.filter(lambda x: True))
@combinations.generate(test_base.default_test_combinations())
def testFlatMapInputs(self):
self.checkUnaryInputs(
lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)))
@combinations.generate(test_base.default_test_combinations())
def testMapInputs(self):
self.checkUnaryInputs(lambda x: x.map(lambda x: x))
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchInputs(self):
self.checkUnaryInputs(lambda x: x.padded_batch(10, []))
@combinations.generate(test_base.default_test_combinations())
def testParallelMapInputs(self):
self.checkUnaryInputs(lambda x: x.map(lambda x: x, num_parallel_calls=2))
@combinations.generate(test_base.default_test_combinations())
def testRepeatInputs(self):
self.checkUnaryInputs(lambda x: x.repeat())
@combinations.generate(test_base.default_test_combinations())
def testShuffleInputs(self):
self.checkUnaryInputs(lambda x: x.shuffle(10))
@combinations.generate(test_base.default_test_combinations())
def testSkipInputs(self):
self.checkUnaryInputs(lambda x: x.skip(1))
@combinations.generate(test_base.default_test_combinations())
def testTakeInputs(self):
self.checkUnaryInputs(lambda x: x.take(1))
@combinations.generate(test_base.default_test_combinations())
def testWindowInputs(self):
self.checkUnaryInputs(lambda x: x.window(10))
@combinations.generate(test_base.default_test_combinations())
def testUnaryTransformationInputsApply(self):
input_dataset = dataset_ops.Dataset.range(0)
dataset = input_dataset.apply(lambda dataset: dataset.cache())
self.assertEqual([input_dataset], dataset._inputs())
def checkInputsWithInterleaveFn(self, dataset_fn, interleave_parallelism):
input_dataset = dataset_ops.Dataset.range(0)
dataset = input_dataset.interleave(
lambda x: dataset_ops.Dataset.range(0),
cycle_length=2,
num_parallel_calls=interleave_parallelism)
self.assertEqual([input_dataset], dataset._inputs())
@combinations.generate(test_base.default_test_combinations())
def testParallelInterleaveInputs(self):
self.checkInputsWithInterleaveFn(lambda: dataset_ops.range(0), 2)
@combinations.generate(test_base.default_test_combinations())
def testInterleaveInputs(self):
self.checkInputsWithInterleaveFn(lambda: dataset_ops.range(0), None)
@combinations.generate(test_base.default_test_combinations())
def testNoWarnings(self):
with test.mock.patch.object(warnings, "warn") as mock_log:
dataset_ops.Dataset.range(0).interleave(
lambda x: dataset_ops.Dataset.range(0), cycle_length=2)
self.assertEmpty(mock_log.call_args_list)
def checkBinaryInputs(self, dataset_fn):
input1 = dataset_ops.Dataset.range(0)
input2 = dataset_ops.Dataset.range(1)
self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testConcatenateInputs(self):
self.checkBinaryInputs(lambda x, y: x.concatenate(y))
def checkVariadicInputs(self, dataset_fn, input_datasets):
self.assertEqual(
nest.flatten(input_datasets),
dataset_fn(input_datasets)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testZipOneInputs(self):
input_datasets = dataset_ops.Dataset.range(0)
self.checkVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testZipNestInputs(self):
input_datasets = (dataset_ops.Dataset.range(0),
(dataset_ops.Dataset.range(1),
dataset_ops.Dataset.range(2)))
self.checkVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testZipTupleInputs(self):
input_datasets = (dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1))
self.checkVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testFunctions(self):
dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
self.assertLen(dataset._functions(), 1)
@combinations.generate(test_base.default_test_combinations())
def testCollectInputs(self):
ds1 = dataset_ops.Dataset.range(0)
ds2 = ds1.concatenate(ds1)
ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))
inputs = []
queue = [ds3]
while queue:
ds = queue[0]
queue = queue[1:]
queue.extend(ds._inputs())
inputs.append(ds)
self.assertEqual(5, inputs.count(ds1))
self.assertEqual(2, inputs.count(ds2))
self.assertEqual(1, inputs.count(ds3))
def checkDatasetSpec(self, tf_value, expected_element_structure):
dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value)
dataset_structure = structure.type_spec_from_value(dataset)
self.assertIsInstance(dataset_structure, dataset_ops.DatasetSpec)
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset), expected_element_structure))
self.assertEqual([dtypes.variant],
structure.get_flat_tensor_types(dataset_structure))
self.assertEqual([tensor_shape.TensorShape([])],
structure.get_flat_tensor_shapes(dataset_structure))
# Assert that the `Dataset` survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_dataset = dataset_structure._from_tensor_list(
dataset_structure._to_tensor_list(dataset))
value = tf_value
if isinstance(value, dataset_ops.Dataset):
self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x))
elif isinstance(value, optional_ops.Optional):
self.assertDatasetProduces(
round_trip_dataset.map(lambda opt: opt.get_value()),
[self.evaluate(value.get_value())],
requires_initialization=True)
else:
self.assertDatasetProduces(
round_trip_dataset, [self.evaluate(tf_value)],
requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testTensorDatasetSpec(self):
self.checkDatasetSpec(
constant_op.constant(37.0), tensor_spec.TensorSpec([], dtypes.float32))
@combinations.generate(test_base.default_test_combinations())
def testSparseTensorDatasetSpec(self):
self.checkDatasetSpec(
sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32))
@combinations.generate(test_base.default_test_combinations())
def testNestDatasetSpec(self):
self.checkDatasetSpec(
{
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (
tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string),
)
})
@combinations.generate(test_base.default_test_combinations())
def testDatasetDatasetSpec(self):
self.checkDatasetSpec(
dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3])),
dataset_ops.DatasetSpec(tensor_spec.TensorSpec([], dtypes.int32)))
@combinations.generate(test_base.default_test_combinations())
def testOptionalDatasetSpec(self):
self.checkDatasetSpec(
optional_ops.Optional.from_value(37.0),
optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32)))
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSkipEagerSameGraphErrorOneShot(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
dataset = dataset.batch(2)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSkipEagerSameGraphErrorOneShotSimple(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with test.mock.patch.object(tf_logging, "warning") as mock_log:
_ = dataset_ops.make_one_shot_iterator(dataset)
self.assertRegexpMatches(
str(mock_log.call_args), "Please ensure that all datasets in the "
"pipeline are created in the same graph as the iterator.")
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSkipEagerSameGraphErrorInitializable(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
dataset = dataset.batch(2)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=[1, 2], mode="eager"),
combinations.combine(execution_mode=[context.ASYNC, context.SYNC])))
def testEagerIteration(self, execution_mode):
with context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
@combinations.generate(test_base.default_test_combinations())
def testDatasetAsFunctionArgument(self):
@def_function.function
def _uses_dataset(d):
accumulator = array_ops.zeros([], dtype=dtypes.int64)
for value in d:
accumulator += value
return accumulator
with ops.device("CPU"):
first_dataset = dataset_ops.Dataset.range(10)
self.assertEqual(45, self.evaluate(_uses_dataset(first_dataset)))
second_dataset = dataset_ops.Dataset.range(11)
self.assertEqual(55, self.evaluate(_uses_dataset(second_dataset)))
first_concrete = _uses_dataset.get_concrete_function(first_dataset)
# The dataset should not be a captured input
self.assertEmpty(first_concrete.graph.captures)
# The two datasets have the same structure and so should re-use a trace.
self.assertIs(first_concrete,
_uses_dataset.get_concrete_function(second_dataset))
# With a different structure we should use a different trace.
self.assertIsNot(
first_concrete,
_uses_dataset.get_concrete_function(
dataset_ops.Dataset.zip((first_dataset, second_dataset))))
@combinations.generate(test_base.default_test_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(ds):
trace_count[0] += 1
counter = np.int64(0)
for elem in ds:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(dataset)), 10)
self.assertEqual(self.evaluate(f(dataset2)), 45)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.prefetch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class PrefetchTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.parameters((-1), (0), (5))
def testBufferSize(self, buffer_size):
dataset = dataset_ops.Dataset.range(10).prefetch(buffer_size=buffer_size)
self.assertDatasetProduces(dataset, expected_output=range(10))
@parameterized.parameters((-2), (-42))
def testInvalidBufferSize(self, buffer_size):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).prefetch(buffer_size=buffer_size)
self.evaluate(dataset._variant_tensor)
@parameterized.parameters(*[(buffer_size, slack_period)
for buffer_size in (-1, None, 0, 5)
for slack_period in (1, 8)])
def testPrefetchWithSlack(self, buffer_size, slack_period):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset_ops.PrefetchDataset(
dataset, buffer_size, slack_period=slack_period)
self.assertDatasetProduces(dataset, expected_output=range(100))
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerPrefetchCancellation(self):
def map_py_fn(x):
while x > -1:
x = x * 1
return x
dataset = dataset_ops.Dataset.range(10).map(map_py_fn).prefetch(3)
get_next = self.getNext(dataset)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.5)
sess.close()
thread.join()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/prefetch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.enumerate()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class EnumerateTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testEnumerate(self):
components = (["a", "b"], [1, 2], [37.0, 38])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).enumerate(
start)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset)[0])
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual((), dataset_output_shapes[0])
self.assertEqual([tensor_shape.TensorShape([])] * 3,
[shape for shape in dataset_output_shapes[1]])
self.assertDatasetProduces(dataset, [(20, (b"a", 1, 37.0)),
(21, (b"b", 2, 38.0))])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/enumerate_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.concatenate()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConcatenateTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testConcatenateDataset(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 15),
np.array([37.0, 38.0, 39.0, 40.0]))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
dataset_ops.get_legacy_output_shapes(concatenated),
(tensor_shape.TensorShape([20]), tensor_shape.TensorShape([15]),
tensor_shape.TensorShape([])))
get_next = self.getNext(concatenated)
for i in range(9):
result = self.evaluate(get_next())
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConcatenateDatasetDifferentShape(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
[ts.as_list()
for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(concatenated))],
[[20], [None]])
get_next = self.getNext(concatenated)
for i in range(9):
result = self.evaluate(get_next())
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConcatenateDatasetDifferentStructure(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
@combinations.generate(test_base.default_test_combinations())
def testConcatenateDatasetDifferentKeys(self):
input_components = {
"foo": np.array([[1], [2], [3], [4]]),
"bar": np.array([[12], [13], [14], [15]])
}
to_concatenate_components = {
"foo": np.array([[1], [2], [3], [4]]),
"baz": np.array([[5], [6], [7], [8]])
}
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
@combinations.generate(test_base.default_test_combinations())
def testConcatenateDatasetDifferentType(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1.0], [2.0], [3.0], [4.0]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/concatenate_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.FixedLengthRecordDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class FixedLengthRecordDatasetTest(test_base.DatasetTestBase):
def setUp(self):
super(FixedLengthRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self, compression_type=None):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
contents = []
contents.append(b"H" * self._header_bytes)
for j in range(self._num_records):
contents.append(self._record(i, j))
contents.append(b"F" * self._footer_bytes)
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testFixedLengthRecordDataset(self, compression_type=None):
test_filenames = self._createFiles(compression_type=compression_type)
def dataset_fn(filenames, num_epochs, batch_size=None):
repeat_dataset = readers.FixedLengthRecordDataset(
filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
compression_type=compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
# Basic test: read from file 0.
self.assertDatasetProduces(
dataset_fn([test_filenames[0]], 1),
expected_output=[
self._record(0, i) for i in range(self._num_records)
])
# Basic test: read from file 1.
self.assertDatasetProduces(
dataset_fn([test_filenames[1]], 1),
expected_output=[
self._record(1, i) for i in range(self._num_records)
])
# Basic test: read from both files.
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 1), expected_output=expected_output)
# Test repeated iteration through both files.
get_next = self.getNext(dataset_fn(test_filenames, 10))
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test batched and repeated iteration through both files.
get_next = self.getNext(dataset_fn(test_filenames, 10, self._num_records))
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFixedLengthRecordDatasetNoCompression(self):
self._testFixedLengthRecordDataset()
def testFixedLengthRecordDatasetGzipCompression(self):
self._testFixedLengthRecordDataset(compression_type="GZIP")
def testFixedLengthRecordDatasetZlibCompression(self):
self._testFixedLengthRecordDataset(compression_type="ZLIB")
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testFixedLengthRecordDatasetParallelRead(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10,
num_parallel_reads=4)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output,
assert_items_equal=True)
def testFixedLengthRecordDatasetWrongSize(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes + 1, # Incorrect record length.
self._header_bytes,
self._footer_bytes,
buffer_size=10)
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r"Excluding the header \(5 bytes\) and footer \(2 bytes\), input "
r"file \".*fixed_length_record.0.txt\" has body length 21 bytes, "
r"which is not an exact multiple of the record length \(4 bytes\).")
)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/fixed_length_record_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shard()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class ShardTest(test_base.DatasetTestBase):
def testSimpleCase(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[2, 7])
def testNestedData(self):
dataset_a = dataset_ops.Dataset.range(10)
dataset_b = dataset_ops.Dataset.range(10, 0, -1)
dataset = dataset_ops.Dataset.zip((dataset_a, dataset_b)).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[(2, 8), (7, 3)])
def testOffsetZero(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 0)
self.assertDatasetProduces(dataset, expected_output=[0, 5])
def testOffsetGreaterNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(5, 7)
self.evaluate(self.getNext(dataset)())
def testNegativeOffset(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(5, -3)
self.evaluate(self.getNext(dataset)())
def testNegativeNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(-3, 1)
self.evaluate(self.getNext(dataset)())
def testZeroNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(0, 1)
self.evaluate(self.getNext(dataset)())
def testIteratorEndsBeforeFirstElem(self):
dataset = dataset_ops.Dataset.range(1).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[])
def testLargerWorkerPool(self):
dataset = dataset_ops.Dataset.range(10).shard(7, 5)
self.assertDatasetProduces(dataset, expected_output=[5])
def testIndexEqualsNumShards(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 4)
self.assertDatasetProduces(dataset, expected_output=[4, 9])
def testIndexEqualsNumShards2(self):
dataset = dataset_ops.Dataset.range(10).shard(4, 3)
self.assertDatasetProduces(dataset, expected_output=[3, 7])
def testNumShardsLargerThanDataset(self):
dataset = dataset_ops.Dataset.range(10).shard(20, 5)
self.assertDatasetProduces(dataset, expected_output=[5])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/shard_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Optional`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class OptionalTest(test_base.DatasetTestBase, parameterized.TestCase):
def testFromValue(self):
opt = optional_ops.Optional.from_value(constant_op.constant(37.0))
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual(37.0, self.evaluate(opt.get_value()))
def testFromStructuredValue(self):
opt = optional_ops.Optional.from_value({
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
})
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual({
"a": 37.0,
"b": ([b"Foo"], b"Bar")
}, self.evaluate(opt.get_value()))
def testFromSparseTensor(self):
st_0 = sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0], dtype=np.int64),
dense_shape=np.array([1]))
st_1 = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=np.array([2, 2]))
opt = optional_ops.Optional.from_value((st_0, st_1))
self.assertTrue(self.evaluate(opt.has_value()))
val_0, val_1 = opt.get_value()
for expected, actual in [(st_0, val_0), (st_1, val_1)]:
self.assertAllEqual(expected.indices, self.evaluate(actual.indices))
self.assertAllEqual(expected.values, self.evaluate(actual.values))
self.assertAllEqual(expected.dense_shape,
self.evaluate(actual.dense_shape))
def testFromNone(self):
value_structure = tensor_spec.TensorSpec([], dtypes.float32)
opt = optional_ops.Optional.none_from_structure(value_structure)
self.assertTrue(opt.value_structure.is_compatible_with(value_structure))
self.assertFalse(
opt.value_structure.is_compatible_with(
tensor_spec.TensorSpec([1], dtypes.float32)))
self.assertFalse(
opt.value_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int32)))
self.assertFalse(self.evaluate(opt.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(opt.get_value())
def testAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt1 = optional_ops.Optional.from_value((1.0, 2.0))
opt2 = optional_ops.Optional.from_value((3.0, 4.0))
add_tensor = math_ops.add_n([opt1._variant_tensor,
opt2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt1.value_structure)
self.assertAllEqual(self.evaluate(add_opt.get_value()), (4.0, 6.0))
# Without value
opt_none1 = optional_ops.Optional.none_from_structure(
opt1.value_structure)
opt_none2 = optional_ops.Optional.none_from_structure(
opt2.value_structure)
add_tensor = math_ops.add_n([opt_none1._variant_tensor,
opt_none2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor,
opt_none1.value_structure)
self.assertFalse(self.evaluate(add_opt.has_value()))
def testNestedAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value([1, 2.0])
opt2 = optional_ops.Optional.from_value([3, 4.0])
opt3 = optional_ops.Optional.from_value((5.0, opt1._variant_tensor))
opt4 = optional_ops.Optional.from_value((6.0, opt2._variant_tensor))
add_tensor = math_ops.add_n([opt3._variant_tensor,
opt4._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt3.value_structure)
self.assertEqual(self.evaluate(add_opt.get_value()[0]), 11.0)
inner_add_opt = optional_ops._OptionalImpl(add_opt.get_value()[1],
opt1.value_structure)
self.assertAllEqual(inner_add_opt.get_value(), [4, 6.0])
def testZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt = optional_ops.Optional.from_value((1.0, 2.0))
zeros_tensor = array_ops.zeros_like(opt._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt.value_structure)
self.assertAllEqual(self.evaluate(zeros_opt.get_value()),
(0.0, 0.0))
# Without value
opt_none = optional_ops.Optional.none_from_structure(
opt.value_structure)
zeros_tensor = array_ops.zeros_like(opt_none._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt_none.value_structure)
self.assertFalse(self.evaluate(zeros_opt.has_value()))
def testNestedZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value(1.0)
opt2 = optional_ops.Optional.from_value(opt1._variant_tensor)
zeros_tensor = array_ops.zeros_like(opt2._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt2.value_structure)
inner_zeros_opt = optional_ops._OptionalImpl(zeros_opt.get_value(),
opt1.value_structure)
self.assertEqual(self.evaluate(inner_zeros_opt.get_value()), 0.0)
def testCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
tensor_spec.TensorSpec([], dtypes.float32))
with ops.device("/gpu:0"):
gpu_optional_with_value = optional_ops._OptionalImpl(
array_ops.identity(optional_with_value._variant_tensor),
optional_with_value.value_structure)
gpu_optional_none = optional_ops._OptionalImpl(
array_ops.identity(optional_none._variant_tensor),
optional_none.value_structure)
gpu_optional_with_value_has_value = gpu_optional_with_value.has_value()
gpu_optional_with_value_values = gpu_optional_with_value.get_value()
gpu_optional_none_has_value = gpu_optional_none.has_value()
self.assertTrue(self.evaluate(gpu_optional_with_value_has_value))
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(gpu_optional_with_value_values))
self.assertFalse(self.evaluate(gpu_optional_none_has_value))
def testNestedCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
tensor_spec.TensorSpec([], dtypes.float32))
nested_optional = optional_ops.Optional.from_value(
(optional_with_value._variant_tensor, optional_none._variant_tensor,
1.0))
with ops.device("/gpu:0"):
gpu_nested_optional = optional_ops._OptionalImpl(
array_ops.identity(nested_optional._variant_tensor),
nested_optional.value_structure)
gpu_nested_optional_has_value = gpu_nested_optional.has_value()
gpu_nested_optional_values = gpu_nested_optional.get_value()
self.assertTrue(self.evaluate(gpu_nested_optional_has_value))
inner_with_value = optional_ops._OptionalImpl(
gpu_nested_optional_values[0], optional_with_value.value_structure)
inner_none = optional_ops._OptionalImpl(
gpu_nested_optional_values[1], optional_none.value_structure)
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(inner_with_value.get_value()))
self.assertFalse(self.evaluate(inner_none.has_value()))
self.assertEqual(1.0, self.evaluate(gpu_nested_optional_values[2]))
def _assertElementValueEqual(self, expected, actual):
if isinstance(expected, dict):
self.assertItemsEqual(list(expected.keys()), list(actual.keys()))
for k in expected.keys():
self._assertElementValueEqual(expected[k], actual[k])
elif isinstance(expected, sparse_tensor.SparseTensorValue):
self.assertAllEqual(expected.indices, actual.indices)
self.assertAllEqual(expected.values, actual.values)
self.assertAllEqual(expected.dense_shape, actual.dense_shape)
else:
self.assertAllEqual(expected, actual)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
tensor_spec.TensorSpec([], dtypes.float32)),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0, 1]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[10, 10]),
sparse_tensor.SparseTensorSpec([10, 10], dtypes.int32)),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (
tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string),
)
}),
("Optional", lambda: optional_ops.Optional.from_value(37.0),
optional_ops.OptionalSpec(
tensor_spec.TensorSpec([], dtypes.float32))),
)
def testOptionalSpec(self, tf_value_fn, expected_value_structure):
tf_value = tf_value_fn()
opt = optional_ops.Optional.from_value(tf_value)
self.assertTrue(
structure.are_compatible(opt.value_structure, expected_value_structure))
opt_structure = structure.type_spec_from_value(opt)
self.assertIsInstance(opt_structure, optional_ops.OptionalSpec)
self.assertTrue(structure.are_compatible(opt_structure, opt_structure))
self.assertTrue(
structure.are_compatible(opt_structure._value_structure,
expected_value_structure))
self.assertEqual([dtypes.variant],
structure.get_flat_tensor_types(opt_structure))
self.assertEqual([tensor_shape.TensorShape([])],
structure.get_flat_tensor_shapes(opt_structure))
# All OptionalSpec objects are not compatible with a non-optional
# value.
non_optional_structure = structure.type_spec_from_value(
constant_op.constant(42.0))
self.assertFalse(opt_structure.is_compatible_with(non_optional_structure))
# Assert that the optional survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_opt = opt_structure._from_tensor_list(
opt_structure._to_tensor_list(opt))
if isinstance(tf_value, optional_ops.Optional):
self._assertElementValueEqual(
self.evaluate(tf_value.get_value()),
self.evaluate(round_trip_opt.get_value().get_value()))
else:
self._assertElementValueEqual(
self.evaluate(tf_value),
self.evaluate(round_trip_opt.get_value()))
@parameterized.named_parameters(
("Tensor", np.array([1, 2, 3], dtype=np.int32),
lambda: constant_op.constant([4, 5, 6], dtype=dtypes.int32), True),
("SparseTensor", sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32), dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]], values=[37.0, 42.0], dense_shape=[2, 2]),
False),
("Nest", {"a": np.array([1, 2, 3], dtype=np.int32),
"b": sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=[2, 2])},
lambda: {"a": constant_op.constant([4, 5, 6], dtype=dtypes.int32),
"b": sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]], values=[37.0, 42.0],
dense_shape=[2, 2])}, False),
)
def testIteratorGetNextAsOptional(self, np_value, tf_value_fn,
works_on_gpu):
if not works_on_gpu and test.is_gpu_available():
self.skipTest("Test case not yet supported on GPU.")
ds = dataset_ops.Dataset.from_tensors(np_value).repeat(3)
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(ds)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(structure.are_compatible(
next_elem.value_structure,
structure.type_spec_from_value(tf_value_fn())))
self.assertTrue(next_elem.has_value())
self._assertElementValueEqual(np_value, next_elem.get_value())
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertFalse(self.evaluate(next_elem.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_elem.get_value())
else:
iterator = dataset_ops.make_initializable_iterator(ds)
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(structure.are_compatible(
next_elem.value_structure,
structure.type_spec_from_value(tf_value_fn())))
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError. This is only relevant in graph mode.
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# Now we initialize the iterator.
self.evaluate(iterator.initializer)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self._assertElementValueEqual(np_value, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
def testFunctionBoundaries(self):
@def_function.function
def get_optional():
x = constant_op.constant(1.0)
opt = optional_ops.Optional.from_value(x)
# TODO(skyewm): support returning Optionals from functions?
return opt._variant_tensor
# TODO(skyewm): support Optional arguments?
@def_function.function
def consume_optional(opt_tensor):
value_structure = tensor_spec.TensorSpec([], dtypes.float32)
opt = optional_ops._OptionalImpl(opt_tensor, value_structure)
return opt.get_value()
opt_tensor = get_optional()
val = consume_optional(opt_tensor)
self.assertEqual(self.evaluate(val), 1.0)
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(opt):
trace_count[0] += 1
return opt.get_value()
opt1 = optional_ops.Optional.from_value(constant_op.constant(37.0))
opt2 = optional_ops.Optional.from_value(constant_op.constant(42.0))
for _ in range(10):
self.assertEqual(self.evaluate(f(opt1)), 37.0)
self.assertEqual(self.evaluate(f(opt2)), 42.0)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/optional_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
def _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements).map(
map_fn, num_parallel_calls).with_options(options)
return dataset, coordination_events
# TODO(jsimsa): Add tests for `map_with_legacy_function`.
@test_util.run_all_in_graph_and_eager_modes
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(self._buildMapDataset(components, 14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerMapDatasetMultithreaded(self):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(self._buildMapDataset(components, 18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=num_parallel_calls).prefetch(
output_buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
def do_test(num_parallel_calls, output_buffer_size):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._buildParallelMapDataset(components, 14, num_parallel_calls,
output_buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for num_parallel_calls_val, output_buffer_size_val in [(1, 1), (1, 2), (2,
2),
(2, 4), (8, 8),
(8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerParallelMapDatasetMultithreaded(self):
def do_test(num_parallel_calls, output_buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._buildParallelMapDataset(components, 18, num_parallel_calls,
output_buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureIterator(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = input_sentences.map(lambda x: string_ops.string_split([x]).values
).map(table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@test_util.run_v1_only("b/123904513")
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@test_util.run_v1_only("b/120545219")
def testSkipEagerCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testSeededStatefulOperatorIsProperlyStateful(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
10).map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testStatefulMapKeepsStateAcrossIterators(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: random_ops.random_uniform((), seed=11)).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
def testStatefulOperationInShortCircuit(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
def testMapDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x**2}).map(
lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(count):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
def testUseStepContainerInMap(self):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
def testCaseAndCondInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(
row).map(lambda x: control_map_fn(x, num))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseAndCondInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
# pylint: enable=g-long-lambda
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testNestedListMapDataset(self):
dataset = dataset_ops.Dataset.from_tensors(
[0, 1, 2]).repeat(10).map(lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
def do_test(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
# Simple test that prefetch yields the expected values in the
# expected order.
for i in range(100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in [1, 10, 100, 1000]:
do_test(buffer_size)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
def do_test_ev(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in range(1, set_event_during_invocation):
do_test_ev(buffer_size)
def testReturnList(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10).map(_sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
def testTensorArray(self):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10).map(_tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
def testTensorArrayChain(self):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10).map(_tensor_array).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
def testRagged(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5).map(_ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
def testRaggedChain(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10).map(_ragged).map(_concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
@test_util.run_v1_only("b/123904513")
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105).map(
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConstantOutput(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
@test_util.run_v1_only("map_with_legacy_function v1 only")
def testWarnOnLookupTableLegacyFunction(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map_with_legacy_function(
collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(
2).map(lambda x: random_ops.random_shuffle(x, seed=37))
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
def testNestedDatasetMap(self):
# TODO(b/110122868): When iterators can yield a `tf.data.Dataset`, remove
# the `get_single_element()` call.
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]).map(
dataset_ops.Dataset.from_tensor_slices).map(
lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "BrokenConst"))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Map", lambda dataset, func:
dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),
("ParallelMap", lambda dataset, func:
dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,
use_inter_op_parallelism=False)),
)
def testNoInterOpParallelism(self, make_dataset_fn):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = make_dataset_fn(dataset, _map_fn)
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
# pylint: enable=g-long-lambda
@parameterized.named_parameters(
("SequentialIdentity", None, lambda x: x, None),
("SequentialReplicate", None, lambda x: (x, x), None),
("SequentialSwap", (None, None), lambda x, y: (y, x), None),
("SequentialProject", (None, None), lambda x, y: x, None),
("ParallelIdentity", None, lambda x: x, 10),
("ParallelReplicate", None, lambda x: (x, x), 10),
("ParallelSwap", (None, None), lambda x, y: (y, x), 10),
("ParallelProject", (None, None), lambda x, y: x, 10),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().map(
map_fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = map_fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = map_fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 10),
)
def testShortCircuitCapturedInput(self, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat().map(
lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@parameterized.named_parameters(
("1", 1, 1),
("2", 10, 1),
("3", 10, 10),
("4", 100, 1),
("5", 100, 10),
("6", 100, 100),
)
def testSloppyInterleaveInOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 10, 10),
("2", 100, 10),
("3", 100, 100),
)
def testSloppyInterleaveOutOfOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("Map", None),
("ParallelMap", 12),
)
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.DatasetV2.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# NOTE: collection test is specific to graph mode only, no eager coverage.
@test_util.run_v1_only("graph specific test")
def testSkipEagerCollectionCopy(self):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
dataset.map(func)
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 12),
)
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerMapCancellation(self, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1).map(
fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(shivaniagarwal): separate out `map` and `map_with_legacy_function` tests
# as later would not work in v2.
@test_util.run_all_in_graph_and_eager_modes
class MapWithCapturedVariableTests(test_base.DatasetTestBase,
parameterized.TestCase):
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@test_util.run_v1_only("b/126553094")
def testSkipEagerCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
# NOTE: In the legacy function, resource is captured by value for variable
# getter.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("map_with_legacy_function is only available in v1.")
def testCaptureVariable(self, transformation_function):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(
dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
# NOTE: no need to explicitly initialize variables in eager mode.
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("this test is meant to run in graph mode only.")
def testSkipEagerCaptureUninitializedVariableError(self,
transformation_function):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(
dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("b/121264236")
def testSkipEagerCaptureConstantsWithConflictingDevices(
self, transformation_function):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerRefVariablesWithMultipleDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map_with_legacy_function(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerResourceVariablesWithMultipleDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g_1 = ops.Graph()
with self.session(config=config, graph=g_1):
# The MapDataset node ends up with two ResourceVariable inputs, one on
# device CPU:0 and the other on device CPU:1.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
g_2 = ops.Graph()
with self.session(config=config, graph=g_2):
# In old-Defun variable is captured as value, hence there is no colocation
# error.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map_with_legacy_function(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_tensors()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FromTensorsTest(test_base.DatasetTestBase):
def testFromTensors(self):
"""Test a dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
[c.shape for c in components],
nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)))
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsDataset(self):
"""Test a dataset that represents a dataset."""
dataset = dataset_ops.Dataset.from_tensors(dataset_ops.Dataset.range(10))
dataset = dataset.flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
def testFromTensorsTensorArray(self):
"""Test a dataset that represents a TensorArray."""
components = (
tensor_array_ops.TensorArray(dtypes.float32, element_shape=(), size=2)
.unstack([1.0, 2.0]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(
dataset, expected_output=[[1.0, 2.0]], requires_initialization=True)
def testFromTensorsSparse(self):
"""Test a dataset that represents a single tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsMixed(self):
"""Test an dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape)
if sparse_tensor.is_sparse(c) else c.shape for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsMixedRagged(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(dataset, expected_output=[components])
# pylint: disable=g-long-lambda,unnecessary-lambda
def testNestedStructure(self):
components = (np.array([1, 2, 3], dtype=np.int64),
(np.array([4., 5.]), np.array([6., 7.])),
np.array([8, 9, 10], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.shuffle(10, 10)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.repeat(-1)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(lambda x, y, z: True)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.take(5)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEqual(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),
(y[0], y[1])))
)
self.assertEqual(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.batch(32)
self.assertEqual(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual((([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(dataset_output_shapes, [
s.as_list()
for s in nest.flatten(dataset_output_shapes)
]))
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3], dtype=np.int64),
(np.array([4., 5., 6.]), np.array([7., 8., 9.])),
np.array([10, 11, 12], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEqual((dtypes.int64,
(dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([], ([], []), []),
dataset_ops.get_legacy_output_shapes(dataset))
# TODO(b/117581999): more specific shapes in eager mode.
@test_util.run_deprecated_v1
def testSkipEagerNestedStructure(self):
components = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5.]),
np.array([6., 7.])),
np.array([8, 9, 10], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensors(components)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(
((x[0], x[1]), (y[0], y[1])))).batch(32)
get_next = self.getNext(dataset)
(w, x), (y, z) = get_next()
self.assertEqual(dtypes.int64, w.dtype)
self.assertEqual(dtypes.int64, x.dtype)
self.assertEqual(dtypes.float64, y.dtype)
self.assertEqual(dtypes.float64, z.dtype)
self.assertEqual([None, 3], w.shape.as_list())
self.assertEqual([None, 3], x.shape.as_list())
self.assertEqual([None, 2], y.shape.as_list())
self.assertEqual([None, 2], z.shape.as_list())
get_next = self.getNext(dataset)
(w, x), (y, z) = get_next()
self.assertEqual(dtypes.int64, w.dtype)
self.assertEqual(dtypes.int64, x.dtype)
self.assertEqual(dtypes.float64, y.dtype)
self.assertEqual(dtypes.float64, z.dtype)
self.assertEqual([None, 3], w.shape.as_list())
self.assertEqual([None, 3], x.shape.as_list())
self.assertEqual([None, 2], y.shape.as_list())
self.assertEqual([None, 2], z.shape.as_list())
def testNestedDict(self):
components = {"a": {"aa": 1, "ab": [2.0, 2.0]}, "b": [3, 3, 3]}
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["a"]["aa"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["a"]["ab"])
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["b"])
self.assertEqual([],
dataset_ops.get_legacy_output_shapes(dataset)["a"]["aa"])
self.assertEqual([2],
dataset_ops.get_legacy_output_shapes(dataset)["a"]["ab"])
self.assertEqual([3],
dataset_ops.get_legacy_output_shapes(dataset)["b"])
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([2, 3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int64, get_next().dtype)
self.assertEqual([3], get_next().shape)
# TODO(b/121264236): needs mechanism for multiple device in eager mode.
def testSkipEagerSplitPipeline(self):
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
dataset = dataset_ops.Dataset.from_tensors(0)
# Define a pipeline that attempts to use variables on two
# different devices.
#
# Initialize the variables before creating to iterator, to avoid the
# placement algorithm overriding the DT_RESOURCE colocation constraints.
with ops.device("/cpu:0"):
var_0 = resource_variable_ops.ResourceVariable(initial_value=1)
dataset = dataset.map(lambda x: x + var_0.read_value())
sess.run(var_0.initializer)
with ops.device("/cpu:1"):
var_1 = resource_variable_ops.ResourceVariable(initial_value=1)
dataset = dataset.map(lambda x: x + var_1.read_value())
sess.run(var_1.initializer)
iterator = dataset_ops.make_initializable_iterator(dataset)
sess.run(iterator.initializer)
self.assertEqual(sess.run(iterator.get_next()), 2)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/from_tensors_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Iterator` using distributed sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import lookup as lookup_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class IteratorClusterTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorWithoutRemoteCallFail(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
with ops.device("/job:worker/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_it = iterator_ops.Iterator.from_string_handle(
iterator_3_handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
get_next_op = remote_it.get_next()
with session.Session(worker[0].target) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next_op)
def _testRemoteIteratorHelper(self, device0, device1, target):
with ops.device(device1):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device(device0):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with session.Session(target) as sess:
elem = sess.run(remote_op, feed_dict={target_placeholder: device1})
self.assertEqual(elem, [1])
# Fails when target is cpu:0 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(remote_op, feed_dict={target_placeholder: device0})
elem = sess.run(iterator_3.get_next())
self.assertEqual(elem, [2])
elem = sess.run(remote_op, feed_dict={target_placeholder: device1})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(remote_op, feed_dict={target_placeholder: device1})
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorUsingRemoteCallOp(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
self._testRemoteIteratorHelper("/job:worker/replica:0/task:0/cpu:0",
"/job:worker/replica:0/task:0/cpu:1",
worker[0].target)
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorUsingRemoteCallOpCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
self._testRemoteIteratorHelper("/job:worker/replica:0/task:0/cpu:0",
"/job:worker/replica:0/task:1/cpu:0",
workers[0].target)
@test_util.run_v1_only("b/120545219")
def testCaptureHashTableInSharedIterator(self):
worker, _ = test_util.create_local_cluster(1, 1)
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values),
default_val,
shared_name="shared_table")
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (
input_sentences.map(lambda x: string_ops.string_split([x]).values).map(
table.lookup)
.make_initializable_iterator(shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(worker[0].target) as sess:
sess.run(table.initializer)
sess.run(init_op)
self.assertAllEqual([0, 0, -1, 1, 2], sess.run(get_next))
with session.Session(worker[0].target) as sess:
self.assertAllEqual([2, 0], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.run_v1_only("b/120545219")
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(None) -> PrefetchDataset(100).
worker, _ = test_util.create_local_cluster(1, 1)
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(None).prefetch(10000))
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(worker[0].target) as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/iterator_cluster_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Options`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class OptionsTest(test_base.DatasetTestBase):
def testOptionsDefault(self):
ds = dataset_ops.Dataset.range(0)
self.assertEqual(dataset_ops.Options(), ds.options())
def testOptionsOnce(self):
options = dataset_ops.Options()
ds = dataset_ops.Dataset.range(0).with_options(options).cache()
self.assertEqual(options, ds.options())
def testOptionsTwiceSame(self):
options = dataset_ops.Options()
options.experimental_optimization.autotune = True
ds = dataset_ops.Dataset.range(0).with_options(options).with_options(
options)
self.assertEqual(options, ds.options())
def testOptionsTwiceDifferent(self):
options1 = dataset_ops.Options()
options1.experimental_optimization.autotune = True
options2 = dataset_ops.Options()
options2.experimental_deterministic = False
ds = dataset_ops.Dataset.range(0).with_options(options1).with_options(
options2)
self.assertTrue(ds.options().experimental_optimization.autotune)
# Explicitly check that flag is False since assertFalse allows None
self.assertIs(ds.options().experimental_deterministic, False)
def testOptionsTwiceDifferentError(self):
options1 = dataset_ops.Options()
options1.experimental_optimization.autotune = True
options2 = dataset_ops.Options()
options2.experimental_optimization.autotune = False
with self.assertRaisesRegexp(ValueError,
"Cannot merge incompatible values"):
dataset_ops.Dataset.range(0).with_options(options1).with_options(options2)
def testOptionsMergeOptionsFromMultipleInputs(self):
options1 = dataset_ops.Options()
options1.experimental_optimization.autotune = True
options2 = dataset_ops.Options()
options2.experimental_deterministic = True
ds = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(0).with_options(options1),
dataset_ops.Dataset.range(0).with_options(options2)))
self.assertTrue(ds.options().experimental_optimization.autotune)
self.assertTrue(ds.options().experimental_deterministic)
def testOptionsHaveDefaults(self):
options1 = dataset_ops.Options()
options2 = dataset_ops.Options()
self.assertIsNot(options1.experimental_optimization,
options2.experimental_optimization)
self.assertIsNot(options1.experimental_stats,
options2.experimental_stats)
self.assertIsNot(options1.experimental_threading,
options2.experimental_threading)
self.assertEquals(options1.experimental_optimization,
optimization_options.OptimizationOptions())
self.assertEquals(options1.experimental_stats,
stats_options.StatsOptions())
self.assertEquals(options1.experimental_threading,
threading_options.ThreadingOptions())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/options_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.zip()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ZipTest(test_base.DatasetTestBase):
def testZipDataset(self):
def dataset_fn(components):
datasets = tuple([
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
])
return dataset_ops.Dataset.zip(datasets)
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
get_next = self.getNext(dataset_fn(equal_length_components))
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(equal_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
variable_length_components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
get_next = self.getNext(dataset_fn(variable_length_components))
for i in range(2):
results = self.evaluate(get_next())
for component, result_component in zip(variable_length_components,
results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testNestedZipDataset(self):
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in equal_length_components
]
dataset = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset),
(tensor_shape.TensorShape([20]),
(tensor_shape.TensorShape([22]), tensor_shape.TensorShape([]))))
get_next = self.getNext(dataset)
for i in range(4):
result1, (result2, result3) = self.evaluate(get_next())
self.assertAllEqual(equal_length_components[0][i], result1)
self.assertAllEqual(equal_length_components[1][i], result2)
self.assertAllEqual(equal_length_components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/data/kernel_tests/zip_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.