partition stringclasses 3 values | func_name stringlengths 1 134 | docstring stringlengths 1 46.9k | path stringlengths 4 223 | original_string stringlengths 75 104k | code stringlengths 75 104k | docstring_tokens listlengths 1 1.97k | repo stringlengths 7 55 | language stringclasses 1 value | url stringlengths 87 315 | code_tokens listlengths 19 28.4k | sha stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|---|---|---|
train | get_dataset_feature_statistics | Calculate statistics for the specified split. | tensorflow_datasets/core/dataset_info.py | def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = dataset.output_shapes
output_types_dict = dataset.output_types
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
numeric_statistics.min = feature_to_min[feature_name]
numeric_statistics.max = feature_to_max[feature_name]
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema | def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = dataset.output_shapes
output_types_dict = dataset.output_types
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
numeric_statistics.min = feature_to_min[feature_name]
numeric_statistics.max = feature_to_max[feature_name]
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema | [
"Calculate",
"statistics",
"for",
"the",
"specified",
"split",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L443-L556 | [
"def",
"get_dataset_feature_statistics",
"(",
"builder",
",",
"split",
")",
":",
"statistics",
"=",
"statistics_pb2",
".",
"DatasetFeatureStatistics",
"(",
")",
"# Make this to the best of our abilities.",
"schema",
"=",
"schema_pb2",
".",
"Schema",
"(",
")",
"dataset",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | read_from_json | Read JSON-formatted proto into DatasetInfo proto. | tensorflow_datasets/core/dataset_info.py | def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto | def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto | [
"Read",
"JSON",
"-",
"formatted",
"proto",
"into",
"DatasetInfo",
"proto",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L559-L566 | [
"def",
"read_from_json",
"(",
"json_filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"json_filename",
")",
"as",
"f",
":",
"dataset_info_json_str",
"=",
"f",
".",
"read",
"(",
")",
"# Parse it back into a proto.",
"parsed_proto",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo.full_name | Full canonical name: (<dataset_name>/<config_name>/<version>). | tensorflow_datasets/core/dataset_info.py | def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names) | def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names) | [
"Full",
"canonical",
"name",
":",
"(",
"<dataset_name",
">",
"/",
"<config_name",
">",
"/",
"<version",
">",
")",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L150-L156 | [
"def",
"full_name",
"(",
"self",
")",
":",
"names",
"=",
"[",
"self",
".",
"_builder",
".",
"name",
"]",
"if",
"self",
".",
"_builder",
".",
"builder_config",
":",
"names",
".",
"append",
"(",
"self",
".",
"_builder",
".",
"builder_config",
".",
"name"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo.update_splits_if_different | Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split | tensorflow_datasets/core/dataset_info.py | def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict) | def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict) | [
"Overwrite",
"the",
"splits",
"if",
"they",
"are",
"different",
"from",
"the",
"current",
"ones",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L197-L217 | [
"def",
"update_splits_if_different",
"(",
"self",
",",
"split_dict",
")",
":",
"assert",
"isinstance",
"(",
"split_dict",
",",
"splits_lib",
".",
"SplitDict",
")",
"# If splits are already defined and identical, then we do not update",
"if",
"self",
".",
"_splits",
"and",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo._set_splits | Split setter (private method). | tensorflow_datasets/core/dataset_info.py | def _set_splits(self, split_dict):
"""Split setter (private method)."""
# Update the dictionary representation.
# Use from/to proto for a clean copy
self._splits = split_dict.copy()
# Update the proto
del self.as_proto.splits[:] # Clear previous
for split_info in split_dict.to_proto():
self.as_proto.splits.add().CopyFrom(split_info) | def _set_splits(self, split_dict):
"""Split setter (private method)."""
# Update the dictionary representation.
# Use from/to proto for a clean copy
self._splits = split_dict.copy()
# Update the proto
del self.as_proto.splits[:] # Clear previous
for split_info in split_dict.to_proto():
self.as_proto.splits.add().CopyFrom(split_info) | [
"Split",
"setter",
"(",
"private",
"method",
")",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L219-L228 | [
"def",
"_set_splits",
"(",
"self",
",",
"split_dict",
")",
":",
"# Update the dictionary representation.",
"# Use from/to proto for a clean copy",
"self",
".",
"_splits",
"=",
"split_dict",
".",
"copy",
"(",
")",
"# Update the proto",
"del",
"self",
".",
"as_proto",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo._compute_dynamic_properties | Update from the DatasetBuilder. | tensorflow_datasets/core/dataset_info.py | def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits) | def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits) | [
"Update",
"from",
"the",
"DatasetBuilder",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L249-L278 | [
"def",
"_compute_dynamic_properties",
"(",
"self",
",",
"builder",
")",
":",
"# Fill other things by going over the dataset.",
"splits",
"=",
"self",
".",
"splits",
"for",
"split_info",
"in",
"utils",
".",
"tqdm",
"(",
"splits",
".",
"values",
"(",
")",
",",
"de... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo.write_to_directory | Write `DatasetInfo` as JSON to `dataset_info_dir`. | tensorflow_datasets/core/dataset_info.py | def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json) | def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json) | [
"Write",
"DatasetInfo",
"as",
"JSON",
"to",
"dataset_info_dir",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L284-L297 | [
"def",
"write_to_directory",
"(",
"self",
",",
"dataset_info_dir",
")",
":",
"# Save the metadata from the features (vocabulary, labels,...)",
"if",
"self",
".",
"features",
":",
"self",
".",
"features",
".",
"save_metadata",
"(",
"dataset_info_dir",
")",
"if",
"self",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo.read_from_directory | Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version. | tensorflow_datasets/core/dataset_info.py | def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_filename(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True | def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_filename(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True | [
"Update",
"DatasetInfo",
"from",
"the",
"JSON",
"file",
"in",
"dataset_info_dir",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L299-L367 | [
"def",
"read_from_directory",
"(",
"self",
",",
"dataset_info_dir",
")",
":",
"if",
"not",
"dataset_info_dir",
":",
"raise",
"ValueError",
"(",
"\"Calling read_from_directory with undefined dataset_info_dir.\"",
")",
"json_filename",
"=",
"self",
".",
"_dataset_info_filenam... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DatasetInfo.initialize_from_bucket | Initialize DatasetInfo from GCS bucket info files. | tensorflow_datasets/core/dataset_info.py | def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir) | def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir) | [
"Initialize",
"DatasetInfo",
"from",
"GCS",
"bucket",
"info",
"files",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L369-L381 | [
"def",
"initialize_from_bucket",
"(",
"self",
")",
":",
"# In order to support Colab, we use the HTTP GCS API to access the metadata",
"# files. They are copied locally and then loaded.",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"\"tfds\"",
")",
"data_files",
"=",
"gcs_ut... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | CycleGAN._split_generators | Returns SplitGenerators. | tensorflow_datasets/image/cycle_gan.py | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _DL_URLS[self.builder_config.name]
data_dirs = dl_manager.download_and_extract(url)
path_to_dataset = os.path.join(data_dirs, tf.io.gfile.listdir(data_dirs)[0])
train_a_path = os.path.join(path_to_dataset, "trainA")
train_b_path = os.path.join(path_to_dataset, "trainB")
test_a_path = os.path.join(path_to_dataset, "testA")
test_b_path = os.path.join(path_to_dataset, "testB")
return [
tfds.core.SplitGenerator(
name="trainA",
num_shards=10,
gen_kwargs={
"path": train_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="trainB",
num_shards=10,
gen_kwargs={
"path": train_b_path,
"label": "B",
}),
tfds.core.SplitGenerator(
name="testA",
num_shards=1,
gen_kwargs={
"path": test_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="testB",
num_shards=1,
gen_kwargs={
"path": test_b_path,
"label": "B",
}),
] | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _DL_URLS[self.builder_config.name]
data_dirs = dl_manager.download_and_extract(url)
path_to_dataset = os.path.join(data_dirs, tf.io.gfile.listdir(data_dirs)[0])
train_a_path = os.path.join(path_to_dataset, "trainA")
train_b_path = os.path.join(path_to_dataset, "trainB")
test_a_path = os.path.join(path_to_dataset, "testA")
test_b_path = os.path.join(path_to_dataset, "testB")
return [
tfds.core.SplitGenerator(
name="trainA",
num_shards=10,
gen_kwargs={
"path": train_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="trainB",
num_shards=10,
gen_kwargs={
"path": train_b_path,
"label": "B",
}),
tfds.core.SplitGenerator(
name="testA",
num_shards=1,
gen_kwargs={
"path": test_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="testB",
num_shards=1,
gen_kwargs={
"path": test_b_path,
"label": "B",
}),
] | [
"Returns",
"SplitGenerators",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cycle_gan.py#L108-L149 | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"url",
"=",
"_DL_URLS",
"[",
"self",
".",
"builder_config",
".",
"name",
"]",
"data_dirs",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"url",
")",
"path_to_dataset",
"=",
"os",
".... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _map_promise | Map the function into each element and resolve the promise. | tensorflow_datasets/core/download/download_manager.py | def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res | def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res | [
"Map",
"the",
"function",
"into",
"each",
"element",
"and",
"resolve",
"the",
"promise",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L392-L396 | [
"def",
"_map_promise",
"(",
"map_fn",
",",
"all_inputs",
")",
":",
"all_promises",
"=",
"utils",
".",
"map_nested",
"(",
"map_fn",
",",
"all_inputs",
")",
"# Apply the function",
"res",
"=",
"utils",
".",
"map_nested",
"(",
"_wait_on_promise",
",",
"all_promises... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager._handle_download_result | Store dled file to definitive place, write INFO file, return path. | tensorflow_datasets/core/download/download_manager.py | def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('More than one file in %s.' % tmp_dir_path)
original_fname = fnames[0]
tmp_path = os.path.join(tmp_dir_path, original_fname)
self._recorded_sizes_checksums[resource.url] = (dl_size, sha256)
if self._register_checksums:
self._record_sizes_checksums()
elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None):
raise NonMatchingChecksumError(resource.url, tmp_path)
download_path = self._get_final_dl_path(resource.url, sha256)
resource_lib.write_info_file(resource, download_path, self._dataset_name,
original_fname)
# Unconditionally overwrite because either file doesn't exist or
# FORCE_DOWNLOAD=true
tf.io.gfile.rename(tmp_path, download_path, overwrite=True)
tf.io.gfile.rmtree(tmp_dir_path)
return download_path | def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('More than one file in %s.' % tmp_dir_path)
original_fname = fnames[0]
tmp_path = os.path.join(tmp_dir_path, original_fname)
self._recorded_sizes_checksums[resource.url] = (dl_size, sha256)
if self._register_checksums:
self._record_sizes_checksums()
elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None):
raise NonMatchingChecksumError(resource.url, tmp_path)
download_path = self._get_final_dl_path(resource.url, sha256)
resource_lib.write_info_file(resource, download_path, self._dataset_name,
original_fname)
# Unconditionally overwrite because either file doesn't exist or
# FORCE_DOWNLOAD=true
tf.io.gfile.rename(tmp_path, download_path, overwrite=True)
tf.io.gfile.rmtree(tmp_dir_path)
return download_path | [
"Store",
"dled",
"file",
"to",
"definitive",
"place",
"write",
"INFO",
"file",
"return",
"path",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L196-L215 | [
"def",
"_handle_download_result",
"(",
"self",
",",
"resource",
",",
"tmp_dir_path",
",",
"sha256",
",",
"dl_size",
")",
":",
"fnames",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"tmp_dir_path",
")",
"if",
"len",
"(",
"fnames",
")",
">",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager._download | Download resource, returns Promise->path to downloaded file. | tensorflow_datasets/core/download/download_manager.py | def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksums:
expected_sha256 = self._sizes_checksums[url][1]
download_path = self._get_final_dl_path(url, expected_sha256)
if not self._force_download and resource.exists_locally(download_path):
logging.info('URL %s already downloaded: reusing %s.',
url, download_path)
self._recorded_sizes_checksums[url] = self._sizes_checksums[url]
return promise.Promise.resolve(download_path)
# There is a slight difference between downloader and extractor here:
# the extractor manages its own temp directory, while the DownloadManager
# manages the temp directory of downloader.
download_dir_path = os.path.join(
self._download_dir,
'%s.tmp.%s' % (resource_lib.get_dl_dirname(url), uuid.uuid4().hex))
tf.io.gfile.makedirs(download_dir_path)
logging.info('Downloading %s into %s...', url, download_dir_path)
def callback(val):
checksum, dl_size = val
return self._handle_download_result(
resource, download_dir_path, checksum, dl_size)
return self._downloader.download(url, download_dir_path).then(callback) | def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksums:
expected_sha256 = self._sizes_checksums[url][1]
download_path = self._get_final_dl_path(url, expected_sha256)
if not self._force_download and resource.exists_locally(download_path):
logging.info('URL %s already downloaded: reusing %s.',
url, download_path)
self._recorded_sizes_checksums[url] = self._sizes_checksums[url]
return promise.Promise.resolve(download_path)
# There is a slight difference between downloader and extractor here:
# the extractor manages its own temp directory, while the DownloadManager
# manages the temp directory of downloader.
download_dir_path = os.path.join(
self._download_dir,
'%s.tmp.%s' % (resource_lib.get_dl_dirname(url), uuid.uuid4().hex))
tf.io.gfile.makedirs(download_dir_path)
logging.info('Downloading %s into %s...', url, download_dir_path)
def callback(val):
checksum, dl_size = val
return self._handle_download_result(
resource, download_dir_path, checksum, dl_size)
return self._downloader.download(url, download_dir_path).then(callback) | [
"Download",
"resource",
"returns",
"Promise",
"-",
">",
"path",
"to",
"downloaded",
"file",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L221-L247 | [
"def",
"_download",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"url",
"=",
"resource",
")",
"url",
"=",
"resource",
".",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager._extract | Extract a single archive, returns Promise->path to extraction result. | tensorflow_datasets/core/download/download_manager.py | def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_method = resource.extract_method
if extract_method == resource_lib.ExtractMethod.NO_EXTRACT:
logging.info('Skipping extraction for %s (method=NO_EXTRACT).', path)
return promise.Promise.resolve(path)
method_name = resource_lib.ExtractMethod(extract_method).name
extract_path = os.path.join(self._extract_dir,
'%s.%s' % (method_name, os.path.basename(path)))
if not self._force_extraction and tf.io.gfile.exists(extract_path):
logging.info('Reusing extraction of %s at %s.', path, extract_path)
return promise.Promise.resolve(extract_path)
return self._extractor.extract(path, extract_method, extract_path) | def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_method = resource.extract_method
if extract_method == resource_lib.ExtractMethod.NO_EXTRACT:
logging.info('Skipping extraction for %s (method=NO_EXTRACT).', path)
return promise.Promise.resolve(path)
method_name = resource_lib.ExtractMethod(extract_method).name
extract_path = os.path.join(self._extract_dir,
'%s.%s' % (method_name, os.path.basename(path)))
if not self._force_extraction and tf.io.gfile.exists(extract_path):
logging.info('Reusing extraction of %s at %s.', path, extract_path)
return promise.Promise.resolve(extract_path)
return self._extractor.extract(path, extract_method, extract_path) | [
"Extract",
"a",
"single",
"archive",
"returns",
"Promise",
"-",
">",
"path",
"to",
"extraction",
"result",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L251-L266 | [
"def",
"_extract",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"path",
"=",
"resource",
")",
"path",
"=",
"resource",
".",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager._download_extract | Download-extract `Resource` or url, returns Promise->path. | tensorflow_datasets/core/download/download_manager.py | def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
return self._extract(resource)
return self._download(resource).then(callback) | def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
return self._extract(resource)
return self._download(resource).then(callback) | [
"Download",
"-",
"extract",
"Resource",
"or",
"url",
"returns",
"Promise",
"-",
">",
"path",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L270-L277 | [
"def",
"_download_extract",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"url",
"=",
"resource",
")",
"def",
"callback",
"(",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager.download_kaggle_data | Download data for a given Kaggle competition. | tensorflow_datasets/core/download/download_manager.py | def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
files = kaggle_downloader.competition_files
return _map_promise(self._download,
dict((f, u) for (f, u) in zip(files, urls))) | def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
files = kaggle_downloader.competition_files
return _map_promise(self._download,
dict((f, u) for (f, u) in zip(files, urls))) | [
"Download",
"data",
"for",
"a",
"given",
"Kaggle",
"competition",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L279-L286 | [
"def",
"download_kaggle_data",
"(",
"self",
",",
"competition_name",
")",
":",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"kaggle_downloader",
"=",
"self",
".",
"_downloader",
".",
"kaggle_downloader",
"(",
"competition_name",
")",
"urls",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager.download | Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls. | tensorflow_datasets/core/download/download_manager.py | def download(self, url_or_urls):
"""Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
return _map_promise(self._download, url_or_urls) | def download(self, url_or_urls):
"""Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
return _map_promise(self._download, url_or_urls) | [
"Download",
"given",
"url",
"(",
"s",
")",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L288-L301 | [
"def",
"download",
"(",
"self",
",",
"url_or_urls",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"return",
"_map_promise",
"(",
"self",
".",
"_download",
",",
"url_or_urls",
")"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager.iter_archive | Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj). | tensorflow_datasets/core/download/download_manager.py | def iter_archive(self, resource):
"""Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
"""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
return extractor.iter_archive(resource.path, resource.extract_method) | def iter_archive(self, resource):
"""Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
"""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
return extractor.iter_archive(resource.path, resource.extract_method) | [
"Returns",
"iterator",
"over",
"files",
"within",
"archive",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L303-L317 | [
"def",
"iter_archive",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"path",
"=",
"resource",
")",
"return",
"extractor",
".",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager.extract | Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths. | tensorflow_datasets/core/download/download_manager.py | def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
"""
# Add progress bar to follow the download state
with self._extractor.tqdm():
return _map_promise(self._extract, path_or_paths) | def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
"""
# Add progress bar to follow the download state
with self._extractor.tqdm():
return _map_promise(self._extract, path_or_paths) | [
"Extract",
"given",
"path",
"(",
"s",
")",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L319-L335 | [
"def",
"extract",
"(",
"self",
",",
"path_or_paths",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_extractor",
".",
"tqdm",
"(",
")",
":",
"return",
"_map_promise",
"(",
"self",
".",
"_extract",
",",
"path_or_paths",
")"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager.download_and_extract | Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s). | tensorflow_datasets/core/download/download_manager.py | def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
with self._extractor.tqdm():
return _map_promise(self._download_extract, url_or_urls) | def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
with self._extractor.tqdm():
return _map_promise(self._download_extract, url_or_urls) | [
"Download",
"and",
"extract",
"given",
"url_or_urls",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L337-L359 | [
"def",
"download_and_extract",
"(",
"self",
",",
"url_or_urls",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"with",
"self",
".",
"_extractor",
".",
"tqdm",
"(",
")",
":",
"return",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | DownloadManager.manual_dir | Returns the directory containing the manually extracted data. | tensorflow_datasets/core/download/download_manager.py | def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir | def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir | [
"Returns",
"the",
"directory",
"containing",
"the",
"manually",
"extracted",
"data",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L362-L368 | [
"def",
"manual_dir",
"(",
"self",
")",
":",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"self",
".",
"_manual_dir",
")",
":",
"raise",
"AssertionError",
"(",
"'Manual directory {} does not exist. Create it and download/extract '",
"'dataset artif... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _make_builder_configs | Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects. | tensorflow_datasets/image/cifar10_corrupted.py | def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects.
"""
config_list = []
for corruption in _CORRUPTIONS:
for severity in range(1, 6):
config_list.append(
Cifar10CorruptedConfig(
name=corruption + '_' + str(severity),
version='0.0.1',
description='Corruption method: ' + corruption +
', severity level: ' + str(severity),
corruption_type=corruption,
severity=severity,
))
return config_list | def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects.
"""
config_list = []
for corruption in _CORRUPTIONS:
for severity in range(1, 6):
config_list.append(
Cifar10CorruptedConfig(
name=corruption + '_' + str(severity),
version='0.0.1',
description='Corruption method: ' + corruption +
', severity level: ' + str(severity),
corruption_type=corruption,
severity=severity,
))
return config_list | [
"Construct",
"a",
"list",
"of",
"BuilderConfigs",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L93-L114 | [
"def",
"_make_builder_configs",
"(",
")",
":",
"config_list",
"=",
"[",
"]",
"for",
"corruption",
"in",
"_CORRUPTIONS",
":",
"for",
"severity",
"in",
"range",
"(",
"1",
",",
"6",
")",
":",
"config_list",
".",
"append",
"(",
"Cifar10CorruptedConfig",
"(",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Cifar10Corrupted._split_generators | Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split. | tensorflow_datasets/image/cifar10_corrupted.py | def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})
] | def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})
] | [
"Return",
"the",
"test",
"split",
"of",
"Cifar10",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L138-L153 | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"path",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"_DOWNLOAD_URL",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Cifar10Corrupted._generate_examples | Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label. | tensorflow_datasets/image/cifar10_corrupted.py | def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_config.corruption
severity = self.builder_config.severity
images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption])
labels_file = os.path.join(data_dir, _LABELS_FILENAME)
with tf.io.gfile.GFile(labels_file, mode='rb') as f:
labels = np.load(f)
num_images = labels.shape[0] // 5
# Labels are stacked 5 times so we can just read the first iteration
labels = labels[:num_images]
with tf.io.gfile.GFile(images_file, mode='rb') as f:
images = np.load(f)
# Slice images corresponding to correct severity level
images = images[(severity - 1) * num_images:severity * num_images]
for image, label in zip(images, labels):
yield {
'image': image,
'label': label,
} | def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_config.corruption
severity = self.builder_config.severity
images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption])
labels_file = os.path.join(data_dir, _LABELS_FILENAME)
with tf.io.gfile.GFile(labels_file, mode='rb') as f:
labels = np.load(f)
num_images = labels.shape[0] // 5
# Labels are stacked 5 times so we can just read the first iteration
labels = labels[:num_images]
with tf.io.gfile.GFile(images_file, mode='rb') as f:
images = np.load(f)
# Slice images corresponding to correct severity level
images = images[(severity - 1) * num_images:severity * num_images]
for image, label in zip(images, labels):
yield {
'image': image,
'label': label,
} | [
"Generate",
"corrupted",
"Cifar10",
"test",
"data",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L155-L189 | [
"def",
"_generate_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"corruption",
"=",
"self",
".",
"builder_config",
".",
"corruption",
"severity",
"=",
"self",
".",
"builder_config",
".",
"severity",
"images_file",
"=",
"os",
".",
"path",
".",
"join",
"("... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | document_single_builder | Doc string for a single builder, with or without configs. | tensorflow_datasets/scripts/document_datasets.py | def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
mod_file = mod_file[:-1]
description_prefix = ""
if builder.builder_configs:
# Dataset with configs; document each one
config_docs = []
for config in builder.BUILDER_CONFIGS:
builder = tfds.builder(builder.name, config=config)
info = builder.info
# TODO(rsepassi): document the actual config object
config_doc = SINGLE_CONFIG_ENTRY.format(
builder_name=builder.name,
config_name=config.name,
description=config.description,
version=config.version,
feature_information=make_feature_information(info),
size=tfds.units.size_str(info.size_in_bytes),
)
config_docs.append(config_doc)
out_str = DATASET_WITH_CONFIGS_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
config_names="\n".join([
CONFIG_BULLET.format(name=config.name,
description=config.description,
version=config.version,
size=tfds.units.size_str(tfds.builder(
builder.name, config=config)
.info.size_in_bytes))
for config in builder.BUILDER_CONFIGS]),
config_cls="%s.%s" % (tfds_mod_name(mod_name),
type(builder.builder_config).__name__),
configs="\n".join(config_docs),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
statistics_information=make_statistics_information(info),
description=builder.info.description,
description_prefix=description_prefix,
)
else:
info = builder.info
out_str = DATASET_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
description=info.description,
description_prefix=description_prefix,
version=info.version,
feature_information=make_feature_information(info),
statistics_information=make_statistics_information(info),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
size=tfds.units.size_str(info.size_in_bytes),
)
out_str = schema_org(builder) + "\n" + out_str
return out_str | def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
mod_file = mod_file[:-1]
description_prefix = ""
if builder.builder_configs:
# Dataset with configs; document each one
config_docs = []
for config in builder.BUILDER_CONFIGS:
builder = tfds.builder(builder.name, config=config)
info = builder.info
# TODO(rsepassi): document the actual config object
config_doc = SINGLE_CONFIG_ENTRY.format(
builder_name=builder.name,
config_name=config.name,
description=config.description,
version=config.version,
feature_information=make_feature_information(info),
size=tfds.units.size_str(info.size_in_bytes),
)
config_docs.append(config_doc)
out_str = DATASET_WITH_CONFIGS_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
config_names="\n".join([
CONFIG_BULLET.format(name=config.name,
description=config.description,
version=config.version,
size=tfds.units.size_str(tfds.builder(
builder.name, config=config)
.info.size_in_bytes))
for config in builder.BUILDER_CONFIGS]),
config_cls="%s.%s" % (tfds_mod_name(mod_name),
type(builder.builder_config).__name__),
configs="\n".join(config_docs),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
statistics_information=make_statistics_information(info),
description=builder.info.description,
description_prefix=description_prefix,
)
else:
info = builder.info
out_str = DATASET_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
description=info.description,
description_prefix=description_prefix,
version=info.version,
feature_information=make_feature_information(info),
statistics_information=make_statistics_information(info),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
size=tfds.units.size_str(info.size_in_bytes),
)
out_str = schema_org(builder) + "\n" + out_str
return out_str | [
"Doc",
"string",
"for",
"a",
"single",
"builder",
"with",
"or",
"without",
"configs",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L196-L265 | [
"def",
"document_single_builder",
"(",
"builder",
")",
":",
"mod_name",
"=",
"builder",
".",
"__class__",
".",
"__module__",
"cls_name",
"=",
"builder",
".",
"__class__",
".",
"__name__",
"mod_file",
"=",
"sys",
".",
"modules",
"[",
"mod_name",
"]",
".",
"__... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | make_module_to_builder_dict | Get all builders organized by module in nested dicts. | tensorflow_datasets/scripts/document_datasets.py | def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list)))
# pylint: enable=g-long-lambda
if datasets:
builders = [tfds.builder(name) for name in datasets]
else:
builders = [
tfds.builder(name)
for name in tfds.list_builders()
if name not in BUILDER_BLACKLIST
] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")]
for builder in builders:
mod_name = builder.__class__.__module__
modules = mod_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder | def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list)))
# pylint: enable=g-long-lambda
if datasets:
builders = [tfds.builder(name) for name in datasets]
else:
builders = [
tfds.builder(name)
for name in tfds.list_builders()
if name not in BUILDER_BLACKLIST
] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")]
for builder in builders:
mod_name = builder.__class__.__module__
modules = mod_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder | [
"Get",
"all",
"builders",
"organized",
"by",
"module",
"in",
"nested",
"dicts",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L275-L305 | [
"def",
"make_module_to_builder_dict",
"(",
"datasets",
"=",
"None",
")",
":",
"# pylint: disable=g-long-lambda",
"# dict to hold tfds->image->mnist->[builders]",
"module_to_builder",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"collections",
".",
"defaultdict"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _pprint_features_dict | Pretty-print tfds.features.FeaturesDict. | tensorflow_datasets/scripts/document_datasets.py | def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
type(features_dict).__name__,
)
lines = [first_line]
for k in sorted(list(features_dict.keys())):
v = features_dict[k]
if isinstance(v, tfds.features.FeaturesDict):
v_str = _pprint_features_dict(v, indent + 4, False)
else:
v_str = str(v)
lines.append("%s'%s': %s," % (indent_str, k, v_str))
lines.append("%s})" % first_last_indent_str)
return "\n".join(lines) | def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
type(features_dict).__name__,
)
lines = [first_line]
for k in sorted(list(features_dict.keys())):
v = features_dict[k]
if isinstance(v, tfds.features.FeaturesDict):
v_str = _pprint_features_dict(v, indent + 4, False)
else:
v_str = str(v)
lines.append("%s'%s': %s," % (indent_str, k, v_str))
lines.append("%s})" % first_last_indent_str)
return "\n".join(lines) | [
"Pretty",
"-",
"print",
"tfds",
".",
"features",
".",
"FeaturesDict",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L308-L325 | [
"def",
"_pprint_features_dict",
"(",
"features_dict",
",",
"indent",
"=",
"0",
",",
"add_prefix",
"=",
"True",
")",
":",
"first_last_indent_str",
"=",
"\" \"",
"*",
"indent",
"indent_str",
"=",
"\" \"",
"*",
"(",
"indent",
"+",
"4",
")",
"first_line",
"=",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | make_statistics_information | Make statistics information table. | tensorflow_datasets/scripts/document_datasets.py | def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for split_name, split_info in info.splits.items():
stats.append((split_info.num_examples, split_name.upper()))
# Sort reverse on number of examples.
stats.sort(reverse=True)
stats = "\n".join([
"{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats
])
return STATISTICS_TABLE.format(split_statistics=stats) | def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for split_name, split_info in info.splits.items():
stats.append((split_info.num_examples, split_name.upper()))
# Sort reverse on number of examples.
stats.sort(reverse=True)
stats = "\n".join([
"{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats
])
return STATISTICS_TABLE.format(split_statistics=stats) | [
"Make",
"statistics",
"information",
"table",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L337-L351 | [
"def",
"make_statistics_information",
"(",
"info",
")",
":",
"if",
"not",
"info",
".",
"splits",
".",
"total_num_examples",
":",
"# That means that we have yet to calculate the statistics for this.",
"return",
"\"None computed\"",
"stats",
"=",
"[",
"(",
"info",
".",
"s... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | dataset_docs_str | Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format). | tensorflow_datasets/scripts/document_datasets.py | def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
"""
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc | def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
"""
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc | [
"Create",
"dataset",
"documentation",
"string",
"for",
"given",
"datasets",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L354-L383 | [
"def",
"dataset_docs_str",
"(",
"datasets",
"=",
"None",
")",
":",
"module_to_builder",
"=",
"make_module_to_builder_dict",
"(",
"datasets",
")",
"sections",
"=",
"sorted",
"(",
"list",
"(",
"module_to_builder",
".",
"keys",
"(",
")",
")",
")",
"section_tocs",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | schema_org | Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata | tensorflow_datasets/scripts/document_datasets.py | def schema_org(builder):
# pylint: disable=line-too-long
"""Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
"""
# pylint: enable=line-too-long
properties = [
(lambda x: x.name, SCHEMA_ORG_NAME),
(lambda x: x.description, SCHEMA_ORG_DESC),
(lambda x: x.name, SCHEMA_ORG_URL),
(lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS)
]
info = builder.info
out_str = SCHEMA_ORG_PRE
for extractor, template in properties:
val = extractor(info)
if val:
# We are using cgi module instead of html due to Python 2 compatibility
out_str += template.format(val=cgi.escape(val, quote=True).strip())
out_str += SCHEMA_ORG_POST
return out_str | def schema_org(builder):
# pylint: disable=line-too-long
"""Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
"""
# pylint: enable=line-too-long
properties = [
(lambda x: x.name, SCHEMA_ORG_NAME),
(lambda x: x.description, SCHEMA_ORG_DESC),
(lambda x: x.name, SCHEMA_ORG_URL),
(lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS)
]
info = builder.info
out_str = SCHEMA_ORG_PRE
for extractor, template in properties:
val = extractor(info)
if val:
# We are using cgi module instead of html due to Python 2 compatibility
out_str += template.format(val=cgi.escape(val, quote=True).strip())
out_str += SCHEMA_ORG_POST
return out_str | [
"Builds",
"schema",
".",
"org",
"microdata",
"for",
"DatasetSearch",
"from",
"DatasetBuilder",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L414-L449 | [
"def",
"schema_org",
"(",
"builder",
")",
":",
"# pylint: disable=line-too-long",
"# pylint: enable=line-too-long",
"properties",
"=",
"[",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
",",
"SCHEMA_ORG_NAME",
")",
",",
"(",
"lambda",
"x",
":",
"x",
".",
"descrip... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | disk | Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel. | tensorflow_datasets/image/corruptions.py | def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur) | def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur) | [
"Generating",
"a",
"Gaussian",
"blurring",
"kernel",
"with",
"disk",
"shape",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L46-L70 | [
"def",
"disk",
"(",
"radius",
",",
"alias_blur",
"=",
"0.1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"if",
"radius",
"<=",
"8",
":",
"length",
"=",
"np",
".",
"arange",
"(",
"-",
"8",
",",
"8",
"+",
"1",
")",
"ksize",
"=",
"(",
"3"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | clipped_zoom | Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping. | tensorflow_datasets/image/corruptions.py | def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w] | def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w] | [
"Zoom",
"image",
"with",
"clipping",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L73-L101 | [
"def",
"clipped_zoom",
"(",
"img",
",",
"zoom_factor",
")",
":",
"h",
"=",
"img",
".",
"shape",
"[",
"0",
"]",
"ch",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"h",
"/",
"float",
"(",
"zoom_factor",
")",
")",
")",
"top_h",
"=",
"(",
"h",
"-",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | plasma_fractal | Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255]. | tensorflow_datasets/image/corruptions.py | def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize //
2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max() | def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize //
2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize //
2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max() | [
"Generate",
"a",
"heightmap",
"using",
"diamond",
"-",
"square",
"algorithm",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L104-L159 | [
"def",
"plasma_fractal",
"(",
"mapsize",
"=",
"512",
",",
"wibbledecay",
"=",
"3",
")",
":",
"if",
"mapsize",
"&",
"(",
"mapsize",
"-",
"1",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'mapsize must be a power of two.'",
")",
"maparray",
"=",
"np",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | gaussian_noise | Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise. | tensorflow_datasets/image/corruptions.py | def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip) | def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip) | [
"Gaussian",
"noise",
"corruption",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L167-L180 | [
"def",
"gaussian_noise",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
".08",
",",
".12",
",",
"0.18",
",",
"0.26",
",",
"0.38",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | shot_noise | Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise. | tensorflow_datasets/image/corruptions.py | def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip) | def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip) | [
"Shot",
"noise",
"corruption",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L183-L196 | [
"def",
"shot_noise",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"60",
",",
"25",
",",
"12",
",",
"5",
",",
"3",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"x_clip",
"=... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | impulse_noise | Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise. | tensorflow_datasets/image/corruptions.py | def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | [
"Impulse",
"noise",
"corruption",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L199-L213 | [
"def",
"impulse_noise",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
".03",
",",
".06",
",",
".09",
",",
"0.17",
",",
"0.27",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"skimag... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | defocus_blur | Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur. | tensorflow_datasets/image/corruptions.py | def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip) | def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip) | [
"Defocus",
"blurring",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L216-L236 | [
"def",
"defocus_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"(",
"3",
",",
"0.1",
")",
",",
"(",
"4",
",",
"0.5",
")",
",",
"(",
"6",
",",
"0.5",
")",
",",
"(",
"8",
",",
"0.5",
")",
",",
"(",
"10",
",",
"0.5",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | frosted_glass_blur | Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur. | tensorflow_datasets/image/corruptions.py | def frosted_glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4,
2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip) | def frosted_glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4,
2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip) | [
"Frosted",
"glass",
"blurring",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L239-L270 | [
"def",
"frosted_glass_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"# sigma, max_delta, iterations",
"c",
"=",
"[",
"(",
"0.7",
",",
"1",
",",
"2",
")",
",",
"(",
"0.9",
",",
"2",
",",
"1",
")",
",",
"(",
"1",
",",
"2",
",",
"3",
")",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | zoom_blur | Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur. | tensorflow_datasets/image/corruptions.py | def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | [
"Zoom",
"blurring",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L273-L298 | [
"def",
"zoom_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"np",
".",
"arange",
"(",
"1",
",",
"1.11",
",",
"0.01",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.16",
",",
"0.01",
")",
",",
"np",
".",
"arange",
"("... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | fog | Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog. | tensorflow_datasets/image/corruptions.py | def fog(x, severity=1):
"""Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
"""
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
mapsize = 512
shape = x.shape
max_length = max(shape[0], shape[1])
if max_length > mapsize:
mapsize = 2**int(np.ceil(np.log2(float(max_length))))
tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])
tmp = tmp[:x.shape[0], :x.shape[1]]
tmp = tmp[..., np.newaxis]
x += c[0] * tmp
x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return around_and_astype(x_clip) | def fog(x, severity=1):
"""Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
"""
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
mapsize = 512
shape = x.shape
max_length = max(shape[0], shape[1])
if max_length > mapsize:
mapsize = 2**int(np.ceil(np.log2(float(max_length))))
tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])
tmp = tmp[:x.shape[0], :x.shape[1]]
tmp = tmp[..., np.newaxis]
x += c[0] * tmp
x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return around_and_astype(x_clip) | [
"Fog",
"corruption",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L301-L326 | [
"def",
"fog",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"(",
"1.5",
",",
"2",
")",
",",
"(",
"2.",
",",
"2",
")",
",",
"(",
"2.5",
",",
"1.7",
")",
",",
"(",
"2.5",
",",
"1.5",
")",
",",
"(",
"3.",
",",
"1.4",
")",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | brightness | Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness. | tensorflow_datasets/image/corruptions.py | def brightness(x, severity=1):
"""Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
"""
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | def brightness(x, severity=1):
"""Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
"""
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | [
"Change",
"brightness",
"of",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L329-L346 | [
"def",
"brightness",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
".1",
",",
".2",
",",
".3",
",",
".4",
",",
".5",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"x",
"=",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | contrast | Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast. | tensorflow_datasets/image/corruptions.py | def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip) | def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip) | [
"Change",
"contrast",
"of",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L349-L364 | [
"def",
"contrast",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"0.4",
",",
".3",
",",
".2",
",",
".1",
",",
".05",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
"means",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | elastic | Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform. | tensorflow_datasets/image/corruptions.py | def elastic(x, severity=1):
"""Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
"""
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01,
244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(
z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip) | def elastic(x, severity=1):
"""Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
"""
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01,
244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(
z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip) | [
"Conduct",
"elastic",
"transform",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L367-L425 | [
"def",
"elastic",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"(",
"244",
"*",
"2",
",",
"244",
"*",
"0.7",
",",
"244",
"*",
"0.1",
")",
",",
"(",
"244",
"*",
"2",
",",
"244",
"*",
"0.08",
",",
"244",
"*",
"0.2",
")",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | pixelate | Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption. | tensorflow_datasets/image/corruptions.py | def pixelate(x, severity=1):
"""Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
"""
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int(shape[1] * c), int(shape[0] * c)))
x = x.resize((shape[1], shape[0]))
return np.asarray(x) | def pixelate(x, severity=1):
"""Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
"""
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int(shape[1] * c), int(shape[0] * c)))
x = x.resize((shape[1], shape[0]))
return np.asarray(x) | [
"Pixelate",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L428-L447 | [
"def",
"pixelate",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"0.6",
",",
"0.5",
",",
"0.4",
",",
"0.3",
",",
"0.25",
"]",
"[",
"severity",
"-",
"1",
"]",
"shape",
"=",
"x",
".",
"shape",
"x",
"=",
"tfds",
".",
"core",
".... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | jpeg_compression | Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression. | tensorflow_datasets/image/corruptions.py | def jpeg_compression(x, severity=1):
"""Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
"""
c = [25, 18, 15, 10, 7][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
output = io.BytesIO()
x.save(output, 'JPEG', quality=c)
output.seek(0)
x = tfds.core.lazy_imports.PIL_Image.open(output)
return np.asarray(x) | def jpeg_compression(x, severity=1):
"""Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
"""
c = [25, 18, 15, 10, 7][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
output = io.BytesIO()
x.save(output, 'JPEG', quality=c)
output.seek(0)
x = tfds.core.lazy_imports.PIL_Image.open(output)
return np.asarray(x) | [
"Conduct",
"jpeg",
"compression",
"to",
"images",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L450-L466 | [
"def",
"jpeg_compression",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"25",
",",
"18",
",",
"15",
",",
"10",
",",
"7",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"PIL_Image",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | temporary_assignment | Temporarily assign obj.attr to value. | tensorflow_datasets/core/utils/py_utils.py | def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
yield
setattr(obj, attr, original) | def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
yield
setattr(obj, attr, original) | [
"Temporarily",
"assign",
"obj",
".",
"attr",
"to",
"value",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L55-L60 | [
"def",
"temporary_assignment",
"(",
"obj",
",",
"attr",
",",
"value",
")",
":",
"original",
"=",
"getattr",
"(",
"obj",
",",
"attr",
",",
"None",
")",
"setattr",
"(",
"obj",
",",
"attr",
",",
"value",
")",
"yield",
"setattr",
"(",
"obj",
",",
"attr",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | zip_dict | Iterate over items of dictionaries grouped by their keys. | tensorflow_datasets/core/utils/py_utils.py | def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts) | def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts) | [
"Iterate",
"over",
"items",
"of",
"dictionaries",
"grouped",
"by",
"their",
"keys",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L63-L67 | [
"def",
"zip_dict",
"(",
"*",
"dicts",
")",
":",
"for",
"key",
"in",
"set",
"(",
"itertools",
".",
"chain",
"(",
"*",
"dicts",
")",
")",
":",
"# set merge all keys",
"# Will raise KeyError if the dict don't have the same keys",
"yield",
"key",
",",
"tuple",
"(",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | map_nested | Apply a function recursively to each element of a nested data struct. | tensorflow_datasets/core/utils/py_utils.py | def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct) | def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct) | [
"Apply",
"a",
"function",
"recursively",
"to",
"each",
"element",
"of",
"a",
"nested",
"data",
"struct",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L122-L143 | [
"def",
"map_nested",
"(",
"function",
",",
"data_struct",
",",
"dict_only",
"=",
"False",
",",
"map_tuple",
"=",
"False",
")",
":",
"# Could add support for more exotic data_struct, like OrderedDict",
"if",
"isinstance",
"(",
"data_struct",
",",
"dict",
")",
":",
"r... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | zip_nested | Zip data struct together and return a data struct with the same shape. | tensorflow_datasets/core/utils/py_utils.py | def zip_nested(arg0, *args, **kwargs):
"""Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments
dict_only = kwargs.pop("dict_only", False)
assert not kwargs
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(arg0, dict):
return {
k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)
}
elif not dict_only:
if isinstance(arg0, list):
return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]
# Singleton
return (arg0,) + args | def zip_nested(arg0, *args, **kwargs):
"""Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments
dict_only = kwargs.pop("dict_only", False)
assert not kwargs
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(arg0, dict):
return {
k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)
}
elif not dict_only:
if isinstance(arg0, list):
return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]
# Singleton
return (arg0,) + args | [
"Zip",
"data",
"struct",
"together",
"and",
"return",
"a",
"data",
"struct",
"with",
"the",
"same",
"shape",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L146-L161 | [
"def",
"zip_nested",
"(",
"arg0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Python 2 do not support kwargs only arguments",
"dict_only",
"=",
"kwargs",
".",
"pop",
"(",
"\"dict_only\"",
",",
"False",
")",
"assert",
"not",
"kwargs",
"# Could add sup... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | as_proto_cls | Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProto(proto_field=123)
a = A()
a.CopyFrom(p) # a is like a proto object
assert a.proto_field == 123
a.custom_method() # But has additional methods
```
Args:
proto_cls: The protobuf class to inherit from
Returns:
decorated_cls: The decorated class | tensorflow_datasets/core/utils/py_utils.py | def as_proto_cls(proto_cls):
"""Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProto(proto_field=123)
a = A()
a.CopyFrom(p) # a is like a proto object
assert a.proto_field == 123
a.custom_method() # But has additional methods
```
Args:
proto_cls: The protobuf class to inherit from
Returns:
decorated_cls: The decorated class
"""
def decorator(cls):
"""Decorator applied to the class."""
class ProtoCls(object):
"""Base class simulating the protobuf."""
def __init__(self, *args, **kwargs):
super(ProtoCls, self).__setattr__(
"_ProtoCls__proto",
proto_cls(*args, **kwargs),
)
def __getattr__(self, attr_name):
return getattr(self.__proto, attr_name)
def __setattr__(self, attr_name, new_value):
try:
return setattr(self.__proto, attr_name, new_value)
except AttributeError:
return super(ProtoCls, self).__setattr__(attr_name, new_value)
def __eq__(self, other):
return self.__proto, other.get_proto()
def get_proto(self):
return self.__proto
def __repr__(self):
return "<{cls_name}\n{proto_repr}\n>".format(
cls_name=cls.__name__, proto_repr=repr(self.__proto))
decorator_cls = type(cls.__name__, (cls, ProtoCls), {
"__doc__": cls.__doc__,
})
return decorator_cls
return decorator | def as_proto_cls(proto_cls):
"""Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProto(proto_field=123)
a = A()
a.CopyFrom(p) # a is like a proto object
assert a.proto_field == 123
a.custom_method() # But has additional methods
```
Args:
proto_cls: The protobuf class to inherit from
Returns:
decorated_cls: The decorated class
"""
def decorator(cls):
"""Decorator applied to the class."""
class ProtoCls(object):
"""Base class simulating the protobuf."""
def __init__(self, *args, **kwargs):
super(ProtoCls, self).__setattr__(
"_ProtoCls__proto",
proto_cls(*args, **kwargs),
)
def __getattr__(self, attr_name):
return getattr(self.__proto, attr_name)
def __setattr__(self, attr_name, new_value):
try:
return setattr(self.__proto, attr_name, new_value)
except AttributeError:
return super(ProtoCls, self).__setattr__(attr_name, new_value)
def __eq__(self, other):
return self.__proto, other.get_proto()
def get_proto(self):
return self.__proto
def __repr__(self):
return "<{cls_name}\n{proto_repr}\n>".format(
cls_name=cls.__name__, proto_repr=repr(self.__proto))
decorator_cls = type(cls.__name__, (cls, ProtoCls), {
"__doc__": cls.__doc__,
})
return decorator_cls
return decorator | [
"Simulate",
"proto",
"inheritance",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L164-L229 | [
"def",
"as_proto_cls",
"(",
"proto_cls",
")",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"\"\"\"Decorator applied to the class.\"\"\"",
"class",
"ProtoCls",
"(",
"object",
")",
":",
"\"\"\"Base class simulating the protobuf.\"\"\"",
"def",
"__init__",
"(",
"self",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | tfds_dir | Path to tensorflow_datasets directory. | tensorflow_datasets/core/utils/py_utils.py | def tfds_dir():
"""Path to tensorflow_datasets directory."""
return os.path.dirname(os.path.dirname(os.path.dirname(__file__))) | def tfds_dir():
"""Path to tensorflow_datasets directory."""
return os.path.dirname(os.path.dirname(os.path.dirname(__file__))) | [
"Path",
"to",
"tensorflow_datasets",
"directory",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L232-L234 | [
"def",
"tfds_dir",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
")"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | atomic_write | Writes to path atomically, by writing to temp file and renaming it. | tensorflow_datasets/core/utils/py_utils.py | def atomic_write(path, mode):
"""Writes to path atomically, by writing to temp file and renaming it."""
tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)
with tf.io.gfile.GFile(tmp_path, mode) as file_:
yield file_
tf.io.gfile.rename(tmp_path, path, overwrite=True) | def atomic_write(path, mode):
"""Writes to path atomically, by writing to temp file and renaming it."""
tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)
with tf.io.gfile.GFile(tmp_path, mode) as file_:
yield file_
tf.io.gfile.rename(tmp_path, path, overwrite=True) | [
"Writes",
"to",
"path",
"atomically",
"by",
"writing",
"to",
"temp",
"file",
"and",
"renaming",
"it",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L238-L243 | [
"def",
"atomic_write",
"(",
"path",
",",
"mode",
")",
":",
"tmp_path",
"=",
"\"%s%s_%s\"",
"%",
"(",
"path",
",",
"constants",
".",
"INCOMPLETE_SUFFIX",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | read_checksum_digest | Given a hash constructor, returns checksum digest and size of file. | tensorflow_datasets/core/utils/py_utils.py | def read_checksum_digest(path, checksum_cls=hashlib.sha256):
"""Given a hash constructor, returns checksum digest and size of file."""
checksum = checksum_cls()
size = 0
with tf.io.gfile.GFile(path, "rb") as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
size += len(block)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), size | def read_checksum_digest(path, checksum_cls=hashlib.sha256):
"""Given a hash constructor, returns checksum digest and size of file."""
checksum = checksum_cls()
size = 0
with tf.io.gfile.GFile(path, "rb") as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
size += len(block)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), size | [
"Given",
"a",
"hash",
"constructor",
"returns",
"checksum",
"digest",
"and",
"size",
"of",
"file",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L262-L273 | [
"def",
"read_checksum_digest",
"(",
"path",
",",
"checksum_cls",
"=",
"hashlib",
".",
"sha256",
")",
":",
"checksum",
"=",
"checksum_cls",
"(",
")",
"size",
"=",
"0",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"\"rb\"",
")",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | reraise | Reraise an exception with an additional message. | tensorflow_datasets/core/utils/py_utils.py | def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback) | def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback) | [
"Reraise",
"an",
"exception",
"with",
"an",
"additional",
"message",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L276-L280 | [
"def",
"reraise",
"(",
"additional_msg",
")",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"str",
"(",
"exc_value",
")",
"+",
"\"\\n\"",
"+",
"additional_msg",
"six",
".",
"reraise",
"(",
"ex... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | rgetattr | Get attr that handles dots in attr name. | tensorflow_datasets/core/utils/py_utils.py | def rgetattr(obj, attr, *args):
"""Get attr that handles dots in attr name."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split(".")) | def rgetattr(obj, attr, *args):
"""Get attr that handles dots in attr name."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split(".")) | [
"Get",
"attr",
"that",
"handles",
"dots",
"in",
"attr",
"name",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L283-L287 | [
"def",
"rgetattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
":",
"def",
"_getattr",
"(",
"obj",
",",
"attr",
")",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
"return",
"functools",
".",
"reduce",
"(",
"_getattr"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | CelebAHq._split_generators | Returns SplitGenerators. | tensorflow_datasets/image/celebahq.py | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
image_tar_file = os.path.join(dl_manager.manual_dir,
self.builder_config.file_name)
if not tf.io.gfile.exists(image_tar_file):
# The current celebahq generation code depends on a concrete version of
# pillow library and cannot be easily ported into tfds.
msg = "You must download the dataset files manually and place them in: "
msg += dl_manager.manual_dir
msg += " as .tar files. See testing/test_data/fake_examples/celeb_a_hq "
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=50,
gen_kwargs={"archive": dl_manager.iter_archive(image_tar_file)},
)
] | def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
image_tar_file = os.path.join(dl_manager.manual_dir,
self.builder_config.file_name)
if not tf.io.gfile.exists(image_tar_file):
# The current celebahq generation code depends on a concrete version of
# pillow library and cannot be easily ported into tfds.
msg = "You must download the dataset files manually and place them in: "
msg += dl_manager.manual_dir
msg += " as .tar files. See testing/test_data/fake_examples/celeb_a_hq "
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=50,
gen_kwargs={"archive": dl_manager.iter_archive(image_tar_file)},
)
] | [
"Returns",
"SplitGenerators",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celebahq.py#L107-L124 | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"image_tar_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_manager",
".",
"manual_dir",
",",
"self",
".",
"builder_config",
".",
"file_name",
")",
"if",
"not",
"tf",
".",
"io",
".... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | TedHrlrTranslate._generate_examples | This function returns the examples in the raw (text) form. | tensorflow_datasets/translate/ted_hrlr.py | def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(source_file) as f:
source_sentences = f.read().split("\n")
with tf.io.gfile.GFile(target_file) as f:
target_sentences = f.read().split("\n")
assert len(target_sentences) == len(
source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (len(
source_sentences), len(target_sentences), source_file, target_file)
source, target = self.builder_config.language_pair
for l1, l2 in zip(source_sentences, target_sentences):
result = {source: l1, target: l2}
# Make sure that both translations are non-empty.
if all(result.values()):
yield result | def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(source_file) as f:
source_sentences = f.read().split("\n")
with tf.io.gfile.GFile(target_file) as f:
target_sentences = f.read().split("\n")
assert len(target_sentences) == len(
source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (len(
source_sentences), len(target_sentences), source_file, target_file)
source, target = self.builder_config.language_pair
for l1, l2 in zip(source_sentences, target_sentences):
result = {source: l1, target: l2}
# Make sure that both translations are non-empty.
if all(result.values()):
yield result | [
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/ted_hrlr.py#L160-L176 | [
"def",
"_generate_examples",
"(",
"self",
",",
"source_file",
",",
"target_file",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"source_file",
")",
"as",
"f",
":",
"source_sentences",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Xnli._generate_examples | This function returns the examples in the raw (text) form. | tensorflow_datasets/text/xnli.py | def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with tf.io.gfile.GFile(filepath) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row['pairID']].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row['language']: row['sentence1'] for row in rows}
hypothesis = {row['language']: row['sentence2'] for row in rows}
yield {
'premise': premise,
'hypothesis': hypothesis,
'label': rows[0]['gold_label'],
} | def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with tf.io.gfile.GFile(filepath) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row['pairID']].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row['language']: row['sentence1'] for row in rows}
hypothesis = {row['language']: row['sentence2'] for row in rows}
yield {
'premise': premise,
'hypothesis': hypothesis,
'label': rows[0]['gold_label'],
} | [
"This",
"function",
"returns",
"the",
"examples",
"in",
"the",
"raw",
"(",
"text",
")",
"form",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/xnli.py#L107-L123 | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"rows_per_pair_id",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
")",
"as",
"f",
":",
"reader",
"=",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Voc2007._generate_example | Yields examples. | tensorflow_datasets/image/voc.py | def _generate_example(self, data_path, image_id):
"""Yields examples."""
image_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/JPEGImages", "{}.jpg".format(image_id))
annon_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/Annotations", "{}.xml".format(image_id))
def _get_example_objects():
"""Function to get all the objects from the annotation XML file."""
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
# Get object's label name.
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
objects = list(_get_example_objects())
# Use set() to remove duplicates
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
} | def _generate_example(self, data_path, image_id):
"""Yields examples."""
image_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/JPEGImages", "{}.jpg".format(image_id))
annon_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/Annotations", "{}.xml".format(image_id))
def _get_example_objects():
"""Function to get all the objects from the annotation XML file."""
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
# Get object's label name.
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
objects = list(_get_example_objects())
# Use set() to remove duplicates
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
} | [
"Yields",
"examples",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/voc.py#L137-L186 | [
"def",
"_generate_example",
"(",
"self",
",",
"data_path",
",",
"image_id",
")",
":",
"image_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"\"VOCdevkit/VOC2007/JPEGImages\"",
",",
"\"{}.jpg\"",
".",
"format",
"(",
"image_id",
")",
")",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image.set_encoding_format | Update the encoding format. | tensorflow_datasets/core/features/image_feature.py | def set_encoding_format(self, encoding_format):
"""Update the encoding format."""
supported = ENCODE_FN.keys()
if encoding_format not in supported:
raise ValueError('`encoding_format` must be one of %s.' % supported)
self._encoding_format = encoding_format | def set_encoding_format(self, encoding_format):
"""Update the encoding format."""
supported = ENCODE_FN.keys()
if encoding_format not in supported:
raise ValueError('`encoding_format` must be one of %s.' % supported)
self._encoding_format = encoding_format | [
"Update",
"the",
"encoding",
"format",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L97-L102 | [
"def",
"set_encoding_format",
"(",
"self",
",",
"encoding_format",
")",
":",
"supported",
"=",
"ENCODE_FN",
".",
"keys",
"(",
")",
"if",
"encoding_format",
"not",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"'`encoding_format` must be one of %s.'",
"%",
"s... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image.set_shape | Update the shape. | tensorflow_datasets/core/features/image_feature.py | def set_shape(self, shape):
"""Update the shape."""
channels = shape[-1]
acceptable_channels = ACCEPTABLE_CHANNELS[self._encoding_format]
if channels not in acceptable_channels:
raise ValueError('Acceptable `channels` for %s: %s (was %s)' % (
self._encoding_format, acceptable_channels, channels))
self._shape = tuple(shape) | def set_shape(self, shape):
"""Update the shape."""
channels = shape[-1]
acceptable_channels = ACCEPTABLE_CHANNELS[self._encoding_format]
if channels not in acceptable_channels:
raise ValueError('Acceptable `channels` for %s: %s (was %s)' % (
self._encoding_format, acceptable_channels, channels))
self._shape = tuple(shape) | [
"Update",
"the",
"shape",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L104-L111 | [
"def",
"set_shape",
"(",
"self",
",",
"shape",
")",
":",
"channels",
"=",
"shape",
"[",
"-",
"1",
"]",
"acceptable_channels",
"=",
"ACCEPTABLE_CHANNELS",
"[",
"self",
".",
"_encoding_format",
"]",
"if",
"channels",
"not",
"in",
"acceptable_channels",
":",
"r... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image._encode_image | Returns np_image encoded as jpeg or png. | tensorflow_datasets/core/features/image_feature.py | def _encode_image(self, np_image):
"""Returns np_image encoded as jpeg or png."""
if np_image.dtype != np.uint8:
raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype)
utils.assert_shape_match(np_image.shape, self._shape)
return self._runner.run(ENCODE_FN[self._encoding_format], np_image) | def _encode_image(self, np_image):
"""Returns np_image encoded as jpeg or png."""
if np_image.dtype != np.uint8:
raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype)
utils.assert_shape_match(np_image.shape, self._shape)
return self._runner.run(ENCODE_FN[self._encoding_format], np_image) | [
"Returns",
"np_image",
"encoded",
"as",
"jpeg",
"or",
"png",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L128-L133 | [
"def",
"_encode_image",
"(",
"self",
",",
"np_image",
")",
":",
"if",
"np_image",
".",
"dtype",
"!=",
"np",
".",
"uint8",
":",
"raise",
"ValueError",
"(",
"'Image should be uint8. Detected: %s.'",
"%",
"np_image",
".",
"dtype",
")",
"utils",
".",
"assert_shape... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image.encode_example | Convert the given image into a dict convertible to tf example. | tensorflow_datasets/core/features/image_feature.py | def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinstance(image_or_path_or_fobj, six.string_types):
with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f:
encoded_image = image_f.read()
else:
encoded_image = image_or_path_or_fobj.read()
return encoded_image | def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinstance(image_or_path_or_fobj, six.string_types):
with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f:
encoded_image = image_f.read()
else:
encoded_image = image_or_path_or_fobj.read()
return encoded_image | [
"Convert",
"the",
"given",
"image",
"into",
"a",
"dict",
"convertible",
"to",
"tf",
"example",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L135-L144 | [
"def",
"encode_example",
"(",
"self",
",",
"image_or_path_or_fobj",
")",
":",
"if",
"isinstance",
"(",
"image_or_path_or_fobj",
",",
"np",
".",
"ndarray",
")",
":",
"encoded_image",
"=",
"self",
".",
"_encode_image",
"(",
"image_or_path_or_fobj",
")",
"elif",
"i... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image.decode_example | Reconstruct the image from the tf example. | tensorflow_datasets/core/features/image_feature.py | def decode_example(self, example):
"""Reconstruct the image from the tf example."""
img = tf.image.decode_image(
example, channels=self._shape[-1], dtype=tf.uint8)
img.set_shape(self._shape)
return img | def decode_example(self, example):
"""Reconstruct the image from the tf example."""
img = tf.image.decode_image(
example, channels=self._shape[-1], dtype=tf.uint8)
img.set_shape(self._shape)
return img | [
"Reconstruct",
"the",
"image",
"from",
"the",
"tf",
"example",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L146-L151 | [
"def",
"decode_example",
"(",
"self",
",",
"example",
")",
":",
"img",
"=",
"tf",
".",
"image",
".",
"decode_image",
"(",
"example",
",",
"channels",
"=",
"self",
".",
"_shape",
"[",
"-",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"img",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image.save_metadata | See base class for details. | tensorflow_datasets/core/features/image_feature.py | def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
filepath = _get_metadata_filepath(data_dir, feature_name)
with tf.io.gfile.GFile(filepath, 'w') as f:
json.dump({
'shape': [-1 if d is None else d for d in self._shape],
'encoding_format': self._encoding_format,
}, f, sort_keys=True) | def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
filepath = _get_metadata_filepath(data_dir, feature_name)
with tf.io.gfile.GFile(filepath, 'w') as f:
json.dump({
'shape': [-1 if d is None else d for d in self._shape],
'encoding_format': self._encoding_format,
}, f, sort_keys=True) | [
"See",
"base",
"class",
"for",
"details",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L153-L160 | [
"def",
"save_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"filepath",
"=",
"_get_metadata_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Image.load_metadata | See base class for details. | tensorflow_datasets/core/features/image_feature.py | def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data = json.load(f)
self.set_encoding_format(info_data['encoding_format'])
self.set_shape([None if d == -1 else d for d in info_data['shape']]) | def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data = json.load(f)
self.set_encoding_format(info_data['encoding_format'])
self.set_shape([None if d == -1 else d for d in info_data['shape']]) | [
"See",
"base",
"class",
"for",
"details",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L162-L170 | [
"def",
"load_metadata",
"(",
"self",
",",
"data_dir",
",",
"feature_name",
"=",
"None",
")",
":",
"# Restore names if defined",
"filepath",
"=",
"_get_metadata_filepath",
"(",
"data_dir",
",",
"feature_name",
")",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exi... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _create_moving_sequence | Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w | tensorflow_datasets/video/moving_sequence.py | def _create_moving_sequence(image, pad_lefts, total_padding):
"""Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w
"""
with tf.name_scope("moving_sequence"):
def get_padded_image(args):
pad_left, = args
pad_right = total_padding - pad_left
padding = tf.stack([pad_left, pad_right], axis=-1)
z = tf.zeros((1, 2), dtype=pad_left.dtype)
padding = tf.concat([padding, z], axis=0)
return tf.pad(image, padding)
padded_images = tf.map_fn(
get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False,
back_prop=False)
return padded_images | def _create_moving_sequence(image, pad_lefts, total_padding):
"""Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w
"""
with tf.name_scope("moving_sequence"):
def get_padded_image(args):
pad_left, = args
pad_right = total_padding - pad_left
padding = tf.stack([pad_left, pad_right], axis=-1)
z = tf.zeros((1, 2), dtype=pad_left.dtype)
padding = tf.concat([padding, z], axis=0)
return tf.pad(image, padding)
padded_images = tf.map_fn(
get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False,
back_prop=False)
return padded_images | [
"Create",
"a",
"moving",
"image",
"sequence",
"from",
"the",
"given",
"image",
"a",
"left",
"padding",
"values",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_sequence.py#L27-L53 | [
"def",
"_create_moving_sequence",
"(",
"image",
",",
"pad_lefts",
",",
"total_padding",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"moving_sequence\"",
")",
":",
"def",
"get_padded_image",
"(",
"args",
")",
":",
"pad_left",
",",
"=",
"args",
"pad_right"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _get_linear_trajectory | Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor. | tensorflow_datasets/video/moving_sequence.py | def _get_linear_trajectory(x0, velocity, t):
"""Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor.
"""
x0 = tf.convert_to_tensor(x0)
velocity = tf.convert_to_tensor(velocity)
t = tf.convert_to_tensor(t)
if x0.shape.ndims != 1:
raise ValueError("x0 must be a rank 1 tensor")
if velocity.shape.ndims != 1:
raise ValueError("velocity must be a rank 1 tensor")
if t.shape.ndims != 1:
raise ValueError("t must be a rank 1 tensor")
x0 = tf.expand_dims(x0, axis=0)
velocity = tf.expand_dims(velocity, axis=0)
dx = velocity * tf.expand_dims(t, axis=-1)
linear_trajectories = x0 + dx
assert linear_trajectories.shape.ndims == 2, \
"linear_trajectories should be a rank 2 tensor"
return linear_trajectories | def _get_linear_trajectory(x0, velocity, t):
"""Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor.
"""
x0 = tf.convert_to_tensor(x0)
velocity = tf.convert_to_tensor(velocity)
t = tf.convert_to_tensor(t)
if x0.shape.ndims != 1:
raise ValueError("x0 must be a rank 1 tensor")
if velocity.shape.ndims != 1:
raise ValueError("velocity must be a rank 1 tensor")
if t.shape.ndims != 1:
raise ValueError("t must be a rank 1 tensor")
x0 = tf.expand_dims(x0, axis=0)
velocity = tf.expand_dims(velocity, axis=0)
dx = velocity * tf.expand_dims(t, axis=-1)
linear_trajectories = x0 + dx
assert linear_trajectories.shape.ndims == 2, \
"linear_trajectories should be a rank 2 tensor"
return linear_trajectories | [
"Construct",
"a",
"linear",
"trajectory",
"from",
"x0",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_sequence.py#L56-L82 | [
"def",
"_get_linear_trajectory",
"(",
"x0",
",",
"velocity",
",",
"t",
")",
":",
"x0",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x0",
")",
"velocity",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"velocity",
")",
"t",
"=",
"tf",
".",
"convert_to_tensor",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | image_as_moving_sequence | Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated. | tensorflow_datasets/video/moving_sequence.py | def image_as_moving_sequence(
image, sequence_length=20, output_size=(64, 64), velocity=0.1,
start_position=None):
"""Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated.
"""
ndims = 2
image = tf.convert_to_tensor(image)
if image.shape.ndims != 3:
raise ValueError("image must be rank 3, got %s" % str(image))
output_size = tf.TensorShape(output_size)
if len(output_size) != ndims:
raise ValueError("output_size must have exactly %d elements, got %s"
% (ndims, output_size))
image_shape = tf.shape(image)
if start_position is None:
start_position = tf.random.uniform((ndims,), dtype=tf.float32)
elif start_position.shape != (ndims,):
raise ValueError("start_positions must (%d,)" % ndims)
velocity = tf.convert_to_tensor(velocity, dtype=tf.float32)
if velocity.shape.ndims == 0:
velocity = _get_random_unit_vector(ndims, tf.float32) * velocity
elif velocity.shape.ndims != 1:
raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity)
t = tf.range(sequence_length, dtype=tf.float32)
trajectory = _get_linear_trajectory(start_position, velocity, t)
trajectory = _bounce_to_bbox(trajectory)
total_padding = output_size - image_shape[:2]
if not tf.executing_eagerly():
cond = tf.compat.v1.assert_greater(total_padding, -1)
with tf.control_dependencies([cond]):
total_padding = tf.identity(total_padding)
sequence_pad_lefts = tf.cast(
tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32)
sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding)
sequence.set_shape(
[sequence_length] + output_size.as_list() + [image.shape[-1]])
return MovingSequence(
image_sequence=sequence,
trajectory=trajectory,
start_position=start_position,
velocity=velocity) | def image_as_moving_sequence(
image, sequence_length=20, output_size=(64, 64), velocity=0.1,
start_position=None):
"""Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated.
"""
ndims = 2
image = tf.convert_to_tensor(image)
if image.shape.ndims != 3:
raise ValueError("image must be rank 3, got %s" % str(image))
output_size = tf.TensorShape(output_size)
if len(output_size) != ndims:
raise ValueError("output_size must have exactly %d elements, got %s"
% (ndims, output_size))
image_shape = tf.shape(image)
if start_position is None:
start_position = tf.random.uniform((ndims,), dtype=tf.float32)
elif start_position.shape != (ndims,):
raise ValueError("start_positions must (%d,)" % ndims)
velocity = tf.convert_to_tensor(velocity, dtype=tf.float32)
if velocity.shape.ndims == 0:
velocity = _get_random_unit_vector(ndims, tf.float32) * velocity
elif velocity.shape.ndims != 1:
raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity)
t = tf.range(sequence_length, dtype=tf.float32)
trajectory = _get_linear_trajectory(start_position, velocity, t)
trajectory = _bounce_to_bbox(trajectory)
total_padding = output_size - image_shape[:2]
if not tf.executing_eagerly():
cond = tf.compat.v1.assert_greater(total_padding, -1)
with tf.control_dependencies([cond]):
total_padding = tf.identity(total_padding)
sequence_pad_lefts = tf.cast(
tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32)
sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding)
sequence.set_shape(
[sequence_length] + output_size.as_list() + [image.shape[-1]])
return MovingSequence(
image_sequence=sequence,
trajectory=trajectory,
start_position=start_position,
velocity=velocity) | [
"Turn",
"simple",
"static",
"images",
"into",
"sequences",
"of",
"the",
"originals",
"bouncing",
"around",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_sequence.py#L115-L234 | [
"def",
"image_as_moving_sequence",
"(",
"image",
",",
"sequence_length",
"=",
"20",
",",
"output_size",
"=",
"(",
"64",
",",
"64",
")",
",",
"velocity",
"=",
"0.1",
",",
"start_position",
"=",
"None",
")",
":",
"ndims",
"=",
"2",
"image",
"=",
"tf",
".... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Nsynth._split_generators | Returns splits. | tensorflow_datasets/audio/nsynth.py | def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord" % split for split in _SPLITS
}
dl_urls["instrument_labels"] = (_BASE_DOWNLOAD_PATH +
"instrument_labels.txt")
dl_paths = dl_manager.download_and_extract(dl_urls)
instrument_labels = tf.io.gfile.GFile(dl_paths["instrument_labels"],
"r").read().strip().split("\n")
self.info.features["instrument"]["label"].names = instrument_labels
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={"path": dl_paths[split]}) for split in _SPLITS
] | def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord" % split for split in _SPLITS
}
dl_urls["instrument_labels"] = (_BASE_DOWNLOAD_PATH +
"instrument_labels.txt")
dl_paths = dl_manager.download_and_extract(dl_urls)
instrument_labels = tf.io.gfile.GFile(dl_paths["instrument_labels"],
"r").read().strip().split("\n")
self.info.features["instrument"]["label"].names = instrument_labels
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={"path": dl_paths[split]}) for split in _SPLITS
] | [
"Returns",
"splits",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/audio/nsynth.py#L117-L135 | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"dl_urls",
"=",
"{",
"split",
":",
"_BASE_DOWNLOAD_PATH",
"+",
"\"%s.tfrecord\"",
"%",
"split",
"for",
"split",
"in",
"_SPLITS",
"}",
"dl_urls",
"[",
"\"instrument_labels\"",
"]",
"=",
"(",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _str_to_version | Return the tuple (major, minor, patch) version extracted from the str. | tensorflow_datasets/core/utils/version.py | def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(
v if v == "*" else int(v)
for v in [res.group("major"), res.group("minor"), res.group("patch")]) | def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(
v if v == "*" else int(v)
for v in [res.group("major"), res.group("minor"), res.group("patch")]) | [
"Return",
"the",
"tuple",
"(",
"major",
"minor",
"patch",
")",
"version",
"extracted",
"from",
"the",
"str",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/version.py#L70-L83 | [
"def",
"_str_to_version",
"(",
"version_str",
",",
"allow_wildcard",
"=",
"False",
")",
":",
"reg",
"=",
"_VERSION_WILDCARD_REG",
"if",
"allow_wildcard",
"else",
"_VERSION_RESOLVED_REG",
"res",
"=",
"reg",
".",
"match",
"(",
"version_str",
")",
"if",
"not",
"res... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Version.match | Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard. | tensorflow_datasets/core/utils/version.py | def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"]) | def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"]) | [
"Returns",
"True",
"if",
"other_version",
"matches",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/version.py#L58-L67 | [
"def",
"match",
"(",
"self",
",",
"other_version",
")",
":",
"major",
",",
"minor",
",",
"patch",
"=",
"_str_to_version",
"(",
"other_version",
",",
"allow_wildcard",
"=",
"True",
")",
"return",
"(",
"major",
"in",
"[",
"self",
".",
"major",
",",
"\"*\""... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Imagenet2012._get_validation_labels | Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str). | tensorflow_datasets/image/imagenet.py | def _get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels)) | def _get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels)) | [
"Returns",
"labels",
"for",
"validation",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet.py#L86-L102 | [
"def",
"_get_validation_labels",
"(",
"val_path",
")",
":",
"labels_path",
"=",
"tfds",
".",
"core",
".",
"get_tfds_path",
"(",
"_VALIDATION_LABELS_FNAME",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
")",
"as",
"labels_f",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | Imagenet2012._generate_examples | Yields examples. | tensorflow_datasets/image/imagenet.py | def _generate_examples(self, archive, validation_labels=None):
"""Yields examples."""
if validation_labels: # Validation split
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
# Training split. Main archive contains archives names after a synset noun.
# Each sub-archive contains pictures associated to that synset.
for fname, fobj in archive:
label = fname[:-4] # fname is something like 'n01632458.tar'
# TODO(b/117643231): in py3, the following lines trigger tarfile module
# to call `fobj.seekable()`, which Gfile doesn't have. We should find an
# alternative, as this loads ~150MB in RAM.
fobj_mem = io.BytesIO(fobj.read())
for image_fname, image_fobj in tfds.download.iter_archive(
fobj_mem, tfds.download.ExtractMethod.TAR):
yield {
'file_name': image_fname,
'image': image_fobj,
'label': label,
} | def _generate_examples(self, archive, validation_labels=None):
"""Yields examples."""
if validation_labels: # Validation split
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
# Training split. Main archive contains archives names after a synset noun.
# Each sub-archive contains pictures associated to that synset.
for fname, fobj in archive:
label = fname[:-4] # fname is something like 'n01632458.tar'
# TODO(b/117643231): in py3, the following lines trigger tarfile module
# to call `fobj.seekable()`, which Gfile doesn't have. We should find an
# alternative, as this loads ~150MB in RAM.
fobj_mem = io.BytesIO(fobj.read())
for image_fname, image_fobj in tfds.download.iter_archive(
fobj_mem, tfds.download.ExtractMethod.TAR):
yield {
'file_name': image_fname,
'image': image_fobj,
'label': label,
} | [
"Yields",
"examples",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/imagenet.py#L131-L151 | [
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
",",
"validation_labels",
"=",
"None",
")",
":",
"if",
"validation_labels",
":",
"# Validation split",
"for",
"example",
"in",
"self",
".",
"_generate_examples_validation",
"(",
"archive",
",",
"validation_la... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | do_files_exist | Whether any of the filenames exist. | tensorflow_datasets/core/file_format_adapter.py | def do_files_exist(filenames):
"""Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames]
return any(preexisting) | def do_files_exist(filenames):
"""Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames]
return any(preexisting) | [
"Whether",
"any",
"of",
"the",
"filenames",
"exist",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L194-L197 | [
"def",
"do_files_exist",
"(",
"filenames",
")",
":",
"preexisting",
"=",
"[",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"return",
"any",
"(",
"preexisting",
")"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | get_incomplete_path | Returns a temporary filename based on filename. | tensorflow_datasets/core/file_format_adapter.py | def get_incomplete_path(filename):
"""Returns a temporary filename based on filename."""
random_suffix = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return filename + ".incomplete" + random_suffix | def get_incomplete_path(filename):
"""Returns a temporary filename based on filename."""
random_suffix = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return filename + ".incomplete" + random_suffix | [
"Returns",
"a",
"temporary",
"filename",
"based",
"on",
"filename",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L210-L214 | [
"def",
"get_incomplete_path",
"(",
"filename",
")",
":",
"random_suffix",
"=",
"\"\"",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"6",
")",
")",
"... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _incomplete_files | Create temporary files for filenames and rename on exit. | tensorflow_datasets/core/file_format_adapter.py | def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf.io.gfile.exists(tmp):
tf.io.gfile.remove(tmp) | def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf.io.gfile.exists(tmp):
tf.io.gfile.remove(tmp) | [
"Create",
"temporary",
"files",
"for",
"filenames",
"and",
"rename",
"on",
"exit",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L218-L228 | [
"def",
"_incomplete_files",
"(",
"filenames",
")",
":",
"tmp_files",
"=",
"[",
"get_incomplete_path",
"(",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"try",
":",
"yield",
"tmp_files",
"for",
"tmp",
",",
"output",
"in",
"zip",
"(",
"tmp_files",
",",
"f... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | incomplete_dir | Create temporary dir for dirname and rename on exit. | tensorflow_datasets/core/file_format_adapter.py | def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir) | def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir) | [
"Create",
"temporary",
"dir",
"for",
"dirname",
"and",
"rename",
"on",
"exit",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L232-L241 | [
"def",
"incomplete_dir",
"(",
"dirname",
")",
":",
"tmp_dir",
"=",
"get_incomplete_path",
"(",
"dirname",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"tmp_dir",
")",
"try",
":",
"yield",
"tmp_dir",
"tf",
".",
"io",
".",
"gfile",
".",
"ren... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _shuffle_tfrecord | Shuffle a single record file in memory. | tensorflow_datasets/core/file_format_adapter.py | def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
random_gen.shuffle(all_records)
# Write all record back
with tf.io.TFRecordWriter(path) as writer:
for record in utils.tqdm(
all_records, desc="Writing...", unit=" examples", leave=False):
writer.write(record) | def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
random_gen.shuffle(all_records)
# Write all record back
with tf.io.TFRecordWriter(path) as writer:
for record in utils.tqdm(
all_records, desc="Writing...", unit=" examples", leave=False):
writer.write(record) | [
"Shuffle",
"a",
"single",
"record",
"file",
"in",
"memory",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L244-L258 | [
"def",
"_shuffle_tfrecord",
"(",
"path",
",",
"random_gen",
")",
":",
"# Read all records",
"record_iter",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"io",
".",
"tf_record_iterator",
"(",
"path",
")",
"all_records",
"=",
"[",
"r",
"for",
"r",
"in",
"utils",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _write_tfrecords_from_generator | Writes generated str records to output_files in round-robin order. | tensorflow_datasets/core/file_format_adapter.py | def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) as tmp_files:
# Write all shards
writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files]
with _close_on_exit(writers) as writers:
logging.info("Writing TFRecords")
_round_robin_write(writers, generator)
# Shuffle each shard
if shuffle:
# WARNING: Using np instead of Python random because Python random
# produce different values between Python 2 and 3 and between
# architectures
random_gen = np.random.RandomState(42)
for path in utils.tqdm(
tmp_files, desc="Shuffling...", unit=" shard", leave=False):
_shuffle_tfrecord(path, random_gen=random_gen) | def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) as tmp_files:
# Write all shards
writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files]
with _close_on_exit(writers) as writers:
logging.info("Writing TFRecords")
_round_robin_write(writers, generator)
# Shuffle each shard
if shuffle:
# WARNING: Using np instead of Python random because Python random
# produce different values between Python 2 and 3 and between
# architectures
random_gen = np.random.RandomState(42)
for path in utils.tqdm(
tmp_files, desc="Shuffling...", unit=" shard", leave=False):
_shuffle_tfrecord(path, random_gen=random_gen) | [
"Writes",
"generated",
"str",
"records",
"to",
"output_files",
"in",
"round",
"-",
"robin",
"order",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L261-L281 | [
"def",
"_write_tfrecords_from_generator",
"(",
"generator",
",",
"output_files",
",",
"shuffle",
"=",
"True",
")",
":",
"if",
"do_files_exist",
"(",
"output_files",
")",
":",
"raise",
"ValueError",
"(",
"\"Pre-processed files already exists: {}.\"",
".",
"format",
"("... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _round_robin_write | Write records from generator round-robin across writers. | tensorflow_datasets/core/file_format_adapter.py | def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example) | def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example) | [
"Write",
"records",
"from",
"generator",
"round",
"-",
"robin",
"across",
"writers",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L284-L288 | [
"def",
"_round_robin_write",
"(",
"writers",
",",
"generator",
")",
":",
"for",
"i",
",",
"example",
"in",
"enumerate",
"(",
"utils",
".",
"tqdm",
"(",
"generator",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
")",
":",
"writers",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _item_to_tf_feature | Single item to a tf.train.Feature. | tensorflow_datasets/core/file_format_adapter.py | def _item_to_tf_feature(item, key_name):
"""Single item to a tf.train.Feature."""
v = item
if isinstance(v, (list, tuple)) and not v:
raise ValueError(
"Feature {} received an empty list value, so is unable to infer the "
"feature type to record. To support empty value, the corresponding "
"FeatureConnector should return a numpy array with the correct dtype "
"instead of a Python list.".format(key_name)
)
# Handle strings/bytes first
if isinstance(v, (six.binary_type, six.string_types)):
v = [tf.compat.as_bytes(v)]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, (tuple, list)) and
all(isinstance(x, (six.binary_type, six.string_types)) for x in v)):
v = [tf.compat.as_bytes(x) for x in v]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, np.ndarray) and
(v.dtype.kind in ("U", "S") or v.dtype == object)): # binary or unicode
v = [tf.compat.as_bytes(x) for x in v.flatten()]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
# Use NumPy for numeric types
v = np.array(v).flatten() # Convert v into a 1-d array
if np.issubdtype(v.dtype, np.integer):
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif np.issubdtype(v.dtype, np.floating):
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
else:
raise ValueError(
"Value received: {}.\n"
"tf.train.Feature does not support type {} for feature key {}. "
"This may indicate that one of the FeatureConnectors received an "
"unsupported value as input.".format(repr(v), repr(type(v)), key_name)
) | def _item_to_tf_feature(item, key_name):
"""Single item to a tf.train.Feature."""
v = item
if isinstance(v, (list, tuple)) and not v:
raise ValueError(
"Feature {} received an empty list value, so is unable to infer the "
"feature type to record. To support empty value, the corresponding "
"FeatureConnector should return a numpy array with the correct dtype "
"instead of a Python list.".format(key_name)
)
# Handle strings/bytes first
if isinstance(v, (six.binary_type, six.string_types)):
v = [tf.compat.as_bytes(v)]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, (tuple, list)) and
all(isinstance(x, (six.binary_type, six.string_types)) for x in v)):
v = [tf.compat.as_bytes(x) for x in v]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif (isinstance(v, np.ndarray) and
(v.dtype.kind in ("U", "S") or v.dtype == object)): # binary or unicode
v = [tf.compat.as_bytes(x) for x in v.flatten()]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
# Use NumPy for numeric types
v = np.array(v).flatten() # Convert v into a 1-d array
if np.issubdtype(v.dtype, np.integer):
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif np.issubdtype(v.dtype, np.floating):
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
else:
raise ValueError(
"Value received: {}.\n"
"tf.train.Feature does not support type {} for feature key {}. "
"This may indicate that one of the FeatureConnectors received an "
"unsupported value as input.".format(repr(v), repr(type(v)), key_name)
) | [
"Single",
"item",
"to",
"a",
"tf",
".",
"train",
".",
"Feature",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L307-L344 | [
"def",
"_item_to_tf_feature",
"(",
"item",
",",
"key_name",
")",
":",
"v",
"=",
"item",
"if",
"isinstance",
"(",
"v",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"not",
"v",
":",
"raise",
"ValueError",
"(",
"\"Feature {} received an empty list value, so... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _dict_to_tf_features | Builds tf.train.Features from (string -> int/float/str list) dictionary. | tensorflow_datasets/core/file_format_adapter.py | def _dict_to_tf_features(example_dict):
"""Builds tf.train.Features from (string -> int/float/str list) dictionary."""
features = {k: _item_to_tf_feature(v, k) for k, v
in six.iteritems(example_dict)}
return tf.train.Features(feature=features) | def _dict_to_tf_features(example_dict):
"""Builds tf.train.Features from (string -> int/float/str list) dictionary."""
features = {k: _item_to_tf_feature(v, k) for k, v
in six.iteritems(example_dict)}
return tf.train.Features(feature=features) | [
"Builds",
"tf",
".",
"train",
".",
"Features",
"from",
"(",
"string",
"-",
">",
"int",
"/",
"float",
"/",
"str",
"list",
")",
"dictionary",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/file_format_adapter.py#L347-L351 | [
"def",
"_dict_to_tf_features",
"(",
"example_dict",
")",
":",
"features",
"=",
"{",
"k",
":",
"_item_to_tf_feature",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"example_dict",
")",
"}",
"return",
"tf",
".",
"trai... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _async_tqdm | Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads. | tensorflow_datasets/core/utils/tqdm_utils.py | def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads.
"""
with tqdm_lib.tqdm(*args, **kwargs) as pbar:
pbar = _TqdmPbarAsync(pbar)
yield pbar
pbar.clear() # pop pbar from the active list of pbar
print() | def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads.
"""
with tqdm_lib.tqdm(*args, **kwargs) as pbar:
pbar = _TqdmPbarAsync(pbar)
yield pbar
pbar.clear() # pop pbar from the active list of pbar
print() | [
"Wrapper",
"around",
"Tqdm",
"which",
"can",
"be",
"updated",
"in",
"threads",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L79-L102 | [
"def",
"_async_tqdm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"tqdm_lib",
".",
"tqdm",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"as",
"pbar",
":",
"pbar",
"=",
"_TqdmPbarAsync",
"(",
"pbar",
")",
"yield",
"pbar",
"pbar",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _TqdmPbarAsync.update_total | Increment total pbar value. | tensorflow_datasets/core/utils/tqdm_utils.py | def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh() | def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh() | [
"Increment",
"total",
"pbar",
"value",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L114-L118 | [
"def",
"update_total",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"total",
"+=",
"n",
"self",
".",
"refresh",
"(",
")"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _TqdmPbarAsync.update | Increment current value. | tensorflow_datasets/core/utils/tqdm_utils.py | def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() | def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() | [
"Increment",
"current",
"value",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L120-L124 | [
"def",
"update",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"update",
"(",
"n",
")",
"self",
".",
"refresh",
"(",
")"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | AbstractReasoning._build_pcollection | Generate examples as dicts. | tensorflow_datasets/image/abstract_reasoning.py | def _build_pcollection(self, pipeline, folder, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
split_type = self.builder_config.split_type
filename = os.path.join(folder, "{}.tar.gz".format(split_type))
def _extract_data(inputs):
"""Extracts files from the tar archives."""
filename, split = inputs
with tf.io.gfile.GFile(filename, "rb") as f:
with tarfile.open(fileobj=f, mode="r") as tar:
for tarinfo in tar:
split_name = tarinfo.name.split("_")
if len(split_name) > 2 and split_name[2] == split:
buf = six.BytesIO()
shutil.copyfileobj(tar.extractfile(tarinfo), buf)
yield [tarinfo.name, buf.getvalue()]
def _process_example(inputs):
filename, data_string = inputs
buf = six.BytesIO(data_string)
buf.seek(0)
data = np.load(buf)
# Extract the images and convert to uint8. The reshape is required, see
# https://github.com/deepmind/abstract-reasoning-matrices.
all_images = np.uint8(data["image"].reshape(16, 160, 160, 1))
return {
"relation_structure_encoded": data["relation_structure_encoded"],
"target": data["target"],
"meta_target": data["meta_target"],
"context": all_images[:8],
"answers": all_images[8:],
"filename": filename,
}
# Beam might fuse together the _extract_data and _process_example which
# defeats the purpose of parallel processing. As a result, we reshard by
# doing a GroupByKey on random keys, and then flattening again.
def _add_random_keys(inputs):
key = str(random.randrange(10**10))
return key, inputs
def _remove_keys(inputs):
_, rows = inputs
for row in rows:
yield row
return (pipeline
| beam.Create([(filename, split)])
| beam.FlatMap(_extract_data)
| beam.Map(_add_random_keys)
| beam.GroupByKey()
| beam.FlatMap(_remove_keys)
| beam.Map(_process_example)) | def _build_pcollection(self, pipeline, folder, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
split_type = self.builder_config.split_type
filename = os.path.join(folder, "{}.tar.gz".format(split_type))
def _extract_data(inputs):
"""Extracts files from the tar archives."""
filename, split = inputs
with tf.io.gfile.GFile(filename, "rb") as f:
with tarfile.open(fileobj=f, mode="r") as tar:
for tarinfo in tar:
split_name = tarinfo.name.split("_")
if len(split_name) > 2 and split_name[2] == split:
buf = six.BytesIO()
shutil.copyfileobj(tar.extractfile(tarinfo), buf)
yield [tarinfo.name, buf.getvalue()]
def _process_example(inputs):
filename, data_string = inputs
buf = six.BytesIO(data_string)
buf.seek(0)
data = np.load(buf)
# Extract the images and convert to uint8. The reshape is required, see
# https://github.com/deepmind/abstract-reasoning-matrices.
all_images = np.uint8(data["image"].reshape(16, 160, 160, 1))
return {
"relation_structure_encoded": data["relation_structure_encoded"],
"target": data["target"],
"meta_target": data["meta_target"],
"context": all_images[:8],
"answers": all_images[8:],
"filename": filename,
}
# Beam might fuse together the _extract_data and _process_example which
# defeats the purpose of parallel processing. As a result, we reshard by
# doing a GroupByKey on random keys, and then flattening again.
def _add_random_keys(inputs):
key = str(random.randrange(10**10))
return key, inputs
def _remove_keys(inputs):
_, rows = inputs
for row in rows:
yield row
return (pipeline
| beam.Create([(filename, split)])
| beam.FlatMap(_extract_data)
| beam.Map(_add_random_keys)
| beam.GroupByKey()
| beam.FlatMap(_remove_keys)
| beam.Map(_process_example)) | [
"Generate",
"examples",
"as",
"dicts",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/abstract_reasoning.py#L250-L305 | [
"def",
"_build_pcollection",
"(",
"self",
",",
"pipeline",
",",
"folder",
",",
"split",
")",
":",
"beam",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"apache_beam",
"split_type",
"=",
"self",
".",
"builder_config",
".",
"split_type",
"filename",
"=",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _copy | Copy data read from src file obj to new file in dest_path. | tensorflow_datasets/core/download/extractor.py | def _copy(src_file, dest_path):
"""Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path))
with tf.io.gfile.GFile(dest_path, 'wb') as dest_file:
while True:
data = src_file.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
dest_file.write(data) | def _copy(src_file, dest_path):
"""Copy data read from src file obj to new file in dest_path."""
tf.io.gfile.makedirs(os.path.dirname(dest_path))
with tf.io.gfile.GFile(dest_path, 'wb') as dest_file:
while True:
data = src_file.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
dest_file.write(data) | [
"Copy",
"data",
"read",
"from",
"src",
"file",
"obj",
"to",
"new",
"file",
"in",
"dest_path",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L103-L111 | [
"def",
"_copy",
"(",
"src_file",
",",
"dest_path",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dest_path",
")",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"dest_path",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | iter_tar | Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
file is not allowed.
Yields:
(filepath, extracted_fobj) for each file in the archive. | tensorflow_datasets/core/download/extractor.py | def iter_tar(arch_f, gz=False, stream=False):
"""Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
file is not allowed.
Yields:
(filepath, extracted_fobj) for each file in the archive.
"""
read_type = 'r' + ('|' if stream else ':')
if gz:
read_type += 'gz'
with _open_or_pass(arch_f) as fobj:
tar = tarfile.open(mode=read_type, fileobj=fobj)
for member in tar:
extract_file = tar.extractfile(member)
if extract_file: # File with data (not directory):
path = _normpath(member.path)
if not path:
continue
yield [path, extract_file] | def iter_tar(arch_f, gz=False, stream=False):
"""Iter over tar archive, yielding (path, object-like) tuples.
Args:
arch_f: File object of the archive to iterate.
gz: If True, open a gzip'ed archive.
stream: If True, open the archive in stream mode which allows for faster
processing and less temporary disk consumption, but random access to the
file is not allowed.
Yields:
(filepath, extracted_fobj) for each file in the archive.
"""
read_type = 'r' + ('|' if stream else ':')
if gz:
read_type += 'gz'
with _open_or_pass(arch_f) as fobj:
tar = tarfile.open(mode=read_type, fileobj=fobj)
for member in tar:
extract_file = tar.extractfile(member)
if extract_file: # File with data (not directory):
path = _normpath(member.path)
if not path:
continue
yield [path, extract_file] | [
"Iter",
"over",
"tar",
"archive",
"yielding",
"(",
"path",
"object",
"-",
"like",
")",
"tuples",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L133-L158 | [
"def",
"iter_tar",
"(",
"arch_f",
",",
"gz",
"=",
"False",
",",
"stream",
"=",
"False",
")",
":",
"read_type",
"=",
"'r'",
"+",
"(",
"'|'",
"if",
"stream",
"else",
"':'",
")",
"if",
"gz",
":",
"read_type",
"+=",
"'gz'",
"with",
"_open_or_pass",
"(",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _Extractor.tqdm | Add a progression bar for the current extraction. | tensorflow_datasets/core/download/extractor.py | def tqdm(self):
"""Add a progression bar for the current extraction."""
with utils.async_tqdm(
total=0, desc='Extraction completed...', unit=' file') as pbar_path:
self._pbar_path = pbar_path
yield | def tqdm(self):
"""Add a progression bar for the current extraction."""
with utils.async_tqdm(
total=0, desc='Extraction completed...', unit=' file') as pbar_path:
self._pbar_path = pbar_path
yield | [
"Add",
"a",
"progression",
"bar",
"for",
"the",
"current",
"extraction",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L68-L73 | [
"def",
"tqdm",
"(",
"self",
")",
":",
"with",
"utils",
".",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Extraction completed...'",
",",
"unit",
"=",
"' file'",
")",
"as",
"pbar_path",
":",
"self",
".",
"_pbar_path",
"=",
"pbar_path",
"yield... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _Extractor.extract | Returns `promise.Promise` => to_path. | tensorflow_datasets/core/download/extractor.py | def extract(self, path, extract_method, to_path):
"""Returns `promise.Promise` => to_path."""
self._pbar_path.update_total(1)
if extract_method not in _EXTRACT_METHODS:
raise ValueError('Unknown extraction method "%s".' % extract_method)
future = self._executor.submit(self._sync_extract,
path, extract_method, to_path)
return promise.Promise.resolve(future) | def extract(self, path, extract_method, to_path):
"""Returns `promise.Promise` => to_path."""
self._pbar_path.update_total(1)
if extract_method not in _EXTRACT_METHODS:
raise ValueError('Unknown extraction method "%s".' % extract_method)
future = self._executor.submit(self._sync_extract,
path, extract_method, to_path)
return promise.Promise.resolve(future) | [
"Returns",
"promise",
".",
"Promise",
"=",
">",
"to_path",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L75-L82 | [
"def",
"extract",
"(",
"self",
",",
"path",
",",
"extract_method",
",",
"to_path",
")",
":",
"self",
".",
"_pbar_path",
".",
"update_total",
"(",
"1",
")",
"if",
"extract_method",
"not",
"in",
"_EXTRACT_METHODS",
":",
"raise",
"ValueError",
"(",
"'Unknown ex... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _Extractor._sync_extract | Returns `to_path` once resource has been extracted there. | tensorflow_datasets/core/download/extractor.py | def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp)
except BaseException as err:
msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
raise ExtractError(msg)
# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
# directories, so delete destination first, if it already exists.
if tf.io.gfile.exists(to_path):
tf.io.gfile.rmtree(to_path)
tf.io.gfile.rename(to_path_tmp, to_path)
self._pbar_path.update(1)
return to_path | def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp)
except BaseException as err:
msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
raise ExtractError(msg)
# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
# directories, so delete destination first, if it already exists.
if tf.io.gfile.exists(to_path):
tf.io.gfile.rmtree(to_path)
tf.io.gfile.rename(to_path_tmp, to_path)
self._pbar_path.update(1)
return to_path | [
"Returns",
"to_path",
"once",
"resource",
"has",
"been",
"extracted",
"there",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L84-L100 | [
"def",
"_sync_extract",
"(",
"self",
",",
"from_path",
",",
"method",
",",
"to_path",
")",
":",
"to_path_tmp",
"=",
"'%s%s_%s'",
"%",
"(",
"to_path",
",",
"constants",
".",
"INCOMPLETE_SUFFIX",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"try",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | to_serialized_field | Convert a `TensorInfo` object into a feature proto object. | tensorflow_datasets/core/features/feature.py | def to_serialized_field(tensor_info):
"""Convert a `TensorInfo` object into a feature proto object."""
# Select the type
dtype = tensor_info.dtype
# TODO(b/119937875): TF Examples proto only support int64, float32 and string
# This create limitation like float64 downsampled to float32, bool converted
# to int64 which is space ineficient, no support for complexes or quantized
if tensor_info.dtype.is_integer or tensor_info.dtype.is_bool:
dtype = tf.int64
elif tensor_info.dtype.is_floating:
dtype = tf.float32
# It seems quite space inefficient to convert bool to int64
# We may want to add support for complex, quantize dtype in the future
# TFRecord only support 3 types
if dtype not in (tf.int64, tf.float32, tf.string):
raise NotImplementedError(
'Serialization not implemented for {}'.format(dtype))
# Select the feature proto type in function of the unknown shape
if (tensor_info.shape is not None and # Shape is a sequence (None, ...)
tensor_info.shape.count(None) == 1 and
tensor_info.shape[0] is None):
return tf.io.FixedLenSequenceFeature(
shape=tensor_info.shape[1:],
dtype=dtype,
allow_missing=True,
)
# At least one dimension is undefined
elif tensor_info.shape is None or None in tensor_info.shape:
return tf.io.VarLenFeature(dtype=dtype)
else:
return tf.io.FixedLenFeature(
shape=tensor_info.shape,
dtype=dtype,
) | def to_serialized_field(tensor_info):
"""Convert a `TensorInfo` object into a feature proto object."""
# Select the type
dtype = tensor_info.dtype
# TODO(b/119937875): TF Examples proto only support int64, float32 and string
# This create limitation like float64 downsampled to float32, bool converted
# to int64 which is space ineficient, no support for complexes or quantized
if tensor_info.dtype.is_integer or tensor_info.dtype.is_bool:
dtype = tf.int64
elif tensor_info.dtype.is_floating:
dtype = tf.float32
# It seems quite space inefficient to convert bool to int64
# We may want to add support for complex, quantize dtype in the future
# TFRecord only support 3 types
if dtype not in (tf.int64, tf.float32, tf.string):
raise NotImplementedError(
'Serialization not implemented for {}'.format(dtype))
# Select the feature proto type in function of the unknown shape
if (tensor_info.shape is not None and # Shape is a sequence (None, ...)
tensor_info.shape.count(None) == 1 and
tensor_info.shape[0] is None):
return tf.io.FixedLenSequenceFeature(
shape=tensor_info.shape[1:],
dtype=dtype,
allow_missing=True,
)
# At least one dimension is undefined
elif tensor_info.shape is None or None in tensor_info.shape:
return tf.io.VarLenFeature(dtype=dtype)
else:
return tf.io.FixedLenFeature(
shape=tensor_info.shape,
dtype=dtype,
) | [
"Convert",
"a",
"TensorInfo",
"object",
"into",
"a",
"feature",
"proto",
"object",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L576-L612 | [
"def",
"to_serialized_field",
"(",
"tensor_info",
")",
":",
"# Select the type",
"dtype",
"=",
"tensor_info",
".",
"dtype",
"# TODO(b/119937875): TF Examples proto only support int64, float32 and string",
"# This create limitation like float64 downsampled to float32, bool converted",
"# ... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | to_feature | Convert the given value to Feature if necessary. | tensorflow_datasets/core/features/feature.py | def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:
raise ValueError('Feature not supported: {}'.format(value)) | def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:
raise ValueError('Feature not supported: {}'.format(value)) | [
"Convert",
"the",
"given",
"value",
"to",
"Feature",
"if",
"necessary",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L615-L624 | [
"def",
"to_feature",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"FeatureConnector",
")",
":",
"return",
"value",
"elif",
"utils",
".",
"is_dtype",
"(",
"value",
")",
":",
"# tf.int32, tf.string,...",
"return",
"Tensor",
"(",
"shape",
"=",... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | decode_single_feature_from_dict | Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_example | tensorflow_datasets/core/features/feature.py | def decode_single_feature_from_dict(
feature_k,
feature,
tfexample_dict):
"""Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_example
"""
# Singleton case
if not feature.serialized_keys:
data_to_decode = tfexample_dict[feature_k]
# Feature contains sub features
else:
# Extract the sub-features from the global feature dict
data_to_decode = {
k: tfexample_dict[posixpath.join(feature_k, k)]
for k in feature.serialized_keys
}
return feature.decode_example(data_to_decode) | def decode_single_feature_from_dict(
feature_k,
feature,
tfexample_dict):
"""Decode the given feature from the tfexample_dict.
Args:
feature_k (str): Feature key in the tfexample_dict
feature (FeatureConnector): Connector object to use to decode the field
tfexample_dict (dict): Dict containing the data to decode.
Returns:
decoded_feature: The output of the feature.decode_example
"""
# Singleton case
if not feature.serialized_keys:
data_to_decode = tfexample_dict[feature_k]
# Feature contains sub features
else:
# Extract the sub-features from the global feature dict
data_to_decode = {
k: tfexample_dict[posixpath.join(feature_k, k)]
for k in feature.serialized_keys
}
return feature.decode_example(data_to_decode) | [
"Decode",
"the",
"given",
"feature",
"from",
"the",
"tfexample_dict",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L627-L651 | [
"def",
"decode_single_feature_from_dict",
"(",
"feature_k",
",",
"feature",
",",
"tfexample_dict",
")",
":",
"# Singleton case",
"if",
"not",
"feature",
".",
"serialized_keys",
":",
"data_to_decode",
"=",
"tfexample_dict",
"[",
"feature_k",
"]",
"# Feature contains sub ... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | _assert_keys_match | Ensure the two list of keys matches. | tensorflow_datasets/core/features/feature.py | def _assert_keys_match(keys1, keys2):
"""Ensure the two list of keys matches."""
if set(keys1) != set(keys2):
raise ValueError('{} {}'.format(list(keys1), list(keys2))) | def _assert_keys_match(keys1, keys2):
"""Ensure the two list of keys matches."""
if set(keys1) != set(keys2):
raise ValueError('{} {}'.format(list(keys1), list(keys2))) | [
"Ensure",
"the",
"two",
"list",
"of",
"keys",
"matches",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L654-L657 | [
"def",
"_assert_keys_match",
"(",
"keys1",
",",
"keys2",
")",
":",
"if",
"set",
"(",
"keys1",
")",
"!=",
"set",
"(",
"keys2",
")",
":",
"raise",
"ValueError",
"(",
"'{} {}'",
".",
"format",
"(",
"list",
"(",
"keys1",
")",
",",
"list",
"(",
"keys2",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | FeaturesDict.get_tensor_info | See base class for details. | tensorflow_datasets/core/features/feature.py | def get_tensor_info(self):
"""See base class for details."""
return {
feature_key: feature.get_tensor_info()
for feature_key, feature in self._feature_dict.items()
} | def get_tensor_info(self):
"""See base class for details."""
return {
feature_key: feature.get_tensor_info()
for feature_key, feature in self._feature_dict.items()
} | [
"See",
"base",
"class",
"for",
"details",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L437-L442 | [
"def",
"get_tensor_info",
"(",
"self",
")",
":",
"return",
"{",
"feature_key",
":",
"feature",
".",
"get_tensor_info",
"(",
")",
"for",
"feature_key",
",",
"feature",
"in",
"self",
".",
"_feature_dict",
".",
"items",
"(",
")",
"}"
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | FeaturesDict.get_serialized_info | See base class for details. | tensorflow_datasets/core/features/feature.py | def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = feature.get_serialized_info()
# Features can be either containers (dict of other features) or plain
# features (ex: single tensor). Plain features have a None
# feature.features_keys
if not feature.serialized_keys:
features_dict[feature_key] = serialized_info
else:
# Sanity check which should always be True, as feature.serialized_keys
# is computed using feature.get_serialized_info()
_assert_keys_match(serialized_info.keys(), feature.serialized_keys)
features_dict.update({
posixpath.join(feature_key, k): v
for k, v in serialized_info.items()
})
return features_dict | def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = feature.get_serialized_info()
# Features can be either containers (dict of other features) or plain
# features (ex: single tensor). Plain features have a None
# feature.features_keys
if not feature.serialized_keys:
features_dict[feature_key] = serialized_info
else:
# Sanity check which should always be True, as feature.serialized_keys
# is computed using feature.get_serialized_info()
_assert_keys_match(serialized_info.keys(), feature.serialized_keys)
features_dict.update({
posixpath.join(feature_key, k): v
for k, v in serialized_info.items()
})
return features_dict | [
"See",
"base",
"class",
"for",
"details",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L444-L466 | [
"def",
"get_serialized_info",
"(",
"self",
")",
":",
"# Flatten tf-example features dict",
"# Use NonMutableDict to ensure there is no collision between features keys",
"features_dict",
"=",
"utils",
".",
"NonMutableDict",
"(",
")",
"for",
"feature_key",
",",
"feature",
"in",
... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | FeaturesDict.encode_example | See base class for details. | tensorflow_datasets/core/features/feature.py | def encode_example(self, example_dict):
"""See base class for details."""
# Flatten dict matching the tf-example features
# Use NonMutableDict to ensure there is no collision between features keys
tfexample_dict = utils.NonMutableDict()
# Iterate over example fields
for feature_key, (feature, example_value) in utils.zip_dict(
self._feature_dict, example_dict):
# Encode the field with the associated encoder
encoded_feature = feature.encode_example(example_value)
# Singleton case
if not feature.serialized_keys:
tfexample_dict[feature_key] = encoded_feature
# Feature contains sub features
else:
_assert_keys_match(encoded_feature.keys(), feature.serialized_keys)
tfexample_dict.update({
posixpath.join(feature_key, k): encoded_feature[k]
for k in feature.serialized_keys
})
return tfexample_dict | def encode_example(self, example_dict):
"""See base class for details."""
# Flatten dict matching the tf-example features
# Use NonMutableDict to ensure there is no collision between features keys
tfexample_dict = utils.NonMutableDict()
# Iterate over example fields
for feature_key, (feature, example_value) in utils.zip_dict(
self._feature_dict, example_dict):
# Encode the field with the associated encoder
encoded_feature = feature.encode_example(example_value)
# Singleton case
if not feature.serialized_keys:
tfexample_dict[feature_key] = encoded_feature
# Feature contains sub features
else:
_assert_keys_match(encoded_feature.keys(), feature.serialized_keys)
tfexample_dict.update({
posixpath.join(feature_key, k): encoded_feature[k]
for k in feature.serialized_keys
})
return tfexample_dict | [
"See",
"base",
"class",
"for",
"details",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L468-L490 | [
"def",
"encode_example",
"(",
"self",
",",
"example_dict",
")",
":",
"# Flatten dict matching the tf-example features",
"# Use NonMutableDict to ensure there is no collision between features keys",
"tfexample_dict",
"=",
"utils",
".",
"NonMutableDict",
"(",
")",
"# Iterate over exa... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
train | FeaturesDict.decode_example | See base class for details. | tensorflow_datasets/core/features/feature.py | def decode_example(self, tfexample_dict):
"""See base class for details."""
tensor_dict = {}
# Iterate over the Tensor dict keys
for feature_key, feature in six.iteritems(self._feature_dict):
decoded_feature = decode_single_feature_from_dict(
feature_k=feature_key,
feature=feature,
tfexample_dict=tfexample_dict,
)
tensor_dict[feature_key] = decoded_feature
return tensor_dict | def decode_example(self, tfexample_dict):
"""See base class for details."""
tensor_dict = {}
# Iterate over the Tensor dict keys
for feature_key, feature in six.iteritems(self._feature_dict):
decoded_feature = decode_single_feature_from_dict(
feature_k=feature_key,
feature=feature,
tfexample_dict=tfexample_dict,
)
tensor_dict[feature_key] = decoded_feature
return tensor_dict | [
"See",
"base",
"class",
"for",
"details",
"."
] | tensorflow/datasets | python | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L492-L503 | [
"def",
"decode_example",
"(",
"self",
",",
"tfexample_dict",
")",
":",
"tensor_dict",
"=",
"{",
"}",
"# Iterate over the Tensor dict keys",
"for",
"feature_key",
",",
"feature",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_feature_dict",
")",
":",
"decoded... | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.