id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
22,700
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
decode_schedule
def decode_schedule(string): """Decodes a string into a schedule tuple. Args: string: The string encoding of a schedule tuple. Returns: A schedule tuple, see encode_schedule for details. """ splits = string.split() steps = [int(x[1:]) for x in splits[1:] if x[0] == '@'] pmfs = np.reshape( [float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1]) return splits[0], tuplize(steps), tuplize(pmfs)
python
def decode_schedule(string): """Decodes a string into a schedule tuple. Args: string: The string encoding of a schedule tuple. Returns: A schedule tuple, see encode_schedule for details. """ splits = string.split() steps = [int(x[1:]) for x in splits[1:] if x[0] == '@'] pmfs = np.reshape( [float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1]) return splits[0], tuplize(steps), tuplize(pmfs)
[ "def", "decode_schedule", "(", "string", ")", ":", "splits", "=", "string", ".", "split", "(", ")", "steps", "=", "[", "int", "(", "x", "[", "1", ":", "]", ")", "for", "x", "in", "splits", "[", "1", ":", "]", "if", "x", "[", "0", "]", "==", "'@'", "]", "pmfs", "=", "np", ".", "reshape", "(", "[", "float", "(", "x", ")", "for", "x", "in", "splits", "[", "1", ":", "]", "if", "x", "[", "0", "]", "!=", "'@'", "]", ",", "[", "len", "(", "steps", ")", ",", "-", "1", "]", ")", "return", "splits", "[", "0", "]", ",", "tuplize", "(", "steps", ")", ",", "tuplize", "(", "pmfs", ")" ]
Decodes a string into a schedule tuple. Args: string: The string encoding of a schedule tuple. Returns: A schedule tuple, see encode_schedule for details.
[ "Decodes", "a", "string", "into", "a", "schedule", "tuple", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L397-L410
22,701
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
tuplize
def tuplize(nested): """Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples. """ if isinstance(nested, str): return nested try: return tuple(map(tuplize, nested)) except TypeError: return nested
python
def tuplize(nested): """Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples. """ if isinstance(nested, str): return nested try: return tuple(map(tuplize, nested)) except TypeError: return nested
[ "def", "tuplize", "(", "nested", ")", ":", "if", "isinstance", "(", "nested", ",", "str", ")", ":", "return", "nested", "try", ":", "return", "tuple", "(", "map", "(", "tuplize", ",", "nested", ")", ")", "except", "TypeError", ":", "return", "nested" ]
Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples.
[ "Recursively", "converts", "iterables", "into", "tuples", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L413-L427
22,702
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
MultiProblemV2.filepattern
def filepattern(self, *args, **kwargs): """Returns a list of filepatterns, one for each problem.""" return [p.filepattern(*args, **kwargs) for p in self.problems]
python
def filepattern(self, *args, **kwargs): """Returns a list of filepatterns, one for each problem.""" return [p.filepattern(*args, **kwargs) for p in self.problems]
[ "def", "filepattern", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "p", ".", "filepattern", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "p", "in", "self", ".", "problems", "]" ]
Returns a list of filepatterns, one for each problem.
[ "Returns", "a", "list", "of", "filepatterns", "one", "for", "each", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L82-L84
22,703
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
MultiProblemV2.generate_data
def generate_data(self, *args, **kwargs): """Generates data for each problem.""" for p in self.problems: p.generate_data(*args, **kwargs)
python
def generate_data(self, *args, **kwargs): """Generates data for each problem.""" for p in self.problems: p.generate_data(*args, **kwargs)
[ "def", "generate_data", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "p", "in", "self", ".", "problems", ":", "p", ".", "generate_data", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Generates data for each problem.
[ "Generates", "data", "for", "each", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L86-L89
22,704
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
MultiProblemV2.dataset
def dataset(self, mode, hparams=None, global_step=None, **kwargs): """Returns a dataset containing examples from multiple problems. Args: mode: A member of problem.DatasetSplit. hparams: A tf.HParams object, the model hparams. global_step: A scalar tensor used to compute the sampling distribution. If global_step is None, we call tf.train.get_or_create_global_step by default. **kwargs: Keywords for problem.Problem.Dataset. Returns: A dataset containing examples from multiple problems. """ datasets = [p.dataset(mode, **kwargs) for p in self.problems] datasets = [ d.map(lambda x, i=j: self.normalize_example( # pylint: disable=g-long-lambda dict(x, problem_id=tf.constant([i])), hparams)) for j, d in enumerate(datasets) # Tag examples with a problem_id. ] if mode is problem.DatasetSplit.TRAIN: if global_step is None: global_step = tf.train.get_or_create_global_step() pmf = get_schedule_distribution(self.schedule, global_step) return get_multi_dataset(datasets, pmf) elif self.only_eval_first_problem: return datasets[0] else: datasets = [d.repeat() for d in datasets] return tf.data.Dataset.zip(tuple(datasets)).flat_map( lambda *x: functools.reduce( # pylint: disable=g-long-lambda tf.data.Dataset.concatenate, map(tf.data.Dataset.from_tensors, x)))
python
def dataset(self, mode, hparams=None, global_step=None, **kwargs): """Returns a dataset containing examples from multiple problems. Args: mode: A member of problem.DatasetSplit. hparams: A tf.HParams object, the model hparams. global_step: A scalar tensor used to compute the sampling distribution. If global_step is None, we call tf.train.get_or_create_global_step by default. **kwargs: Keywords for problem.Problem.Dataset. Returns: A dataset containing examples from multiple problems. """ datasets = [p.dataset(mode, **kwargs) for p in self.problems] datasets = [ d.map(lambda x, i=j: self.normalize_example( # pylint: disable=g-long-lambda dict(x, problem_id=tf.constant([i])), hparams)) for j, d in enumerate(datasets) # Tag examples with a problem_id. ] if mode is problem.DatasetSplit.TRAIN: if global_step is None: global_step = tf.train.get_or_create_global_step() pmf = get_schedule_distribution(self.schedule, global_step) return get_multi_dataset(datasets, pmf) elif self.only_eval_first_problem: return datasets[0] else: datasets = [d.repeat() for d in datasets] return tf.data.Dataset.zip(tuple(datasets)).flat_map( lambda *x: functools.reduce( # pylint: disable=g-long-lambda tf.data.Dataset.concatenate, map(tf.data.Dataset.from_tensors, x)))
[ "def", "dataset", "(", "self", ",", "mode", ",", "hparams", "=", "None", ",", "global_step", "=", "None", ",", "*", "*", "kwargs", ")", ":", "datasets", "=", "[", "p", ".", "dataset", "(", "mode", ",", "*", "*", "kwargs", ")", "for", "p", "in", "self", ".", "problems", "]", "datasets", "=", "[", "d", ".", "map", "(", "lambda", "x", ",", "i", "=", "j", ":", "self", ".", "normalize_example", "(", "# pylint: disable=g-long-lambda", "dict", "(", "x", ",", "problem_id", "=", "tf", ".", "constant", "(", "[", "i", "]", ")", ")", ",", "hparams", ")", ")", "for", "j", ",", "d", "in", "enumerate", "(", "datasets", ")", "# Tag examples with a problem_id.", "]", "if", "mode", "is", "problem", ".", "DatasetSplit", ".", "TRAIN", ":", "if", "global_step", "is", "None", ":", "global_step", "=", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", "pmf", "=", "get_schedule_distribution", "(", "self", ".", "schedule", ",", "global_step", ")", "return", "get_multi_dataset", "(", "datasets", ",", "pmf", ")", "elif", "self", ".", "only_eval_first_problem", ":", "return", "datasets", "[", "0", "]", "else", ":", "datasets", "=", "[", "d", ".", "repeat", "(", ")", "for", "d", "in", "datasets", "]", "return", "tf", ".", "data", ".", "Dataset", ".", "zip", "(", "tuple", "(", "datasets", ")", ")", ".", "flat_map", "(", "lambda", "*", "x", ":", "functools", ".", "reduce", "(", "# pylint: disable=g-long-lambda", "tf", ".", "data", ".", "Dataset", ".", "concatenate", ",", "map", "(", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", ",", "x", ")", ")", ")" ]
Returns a dataset containing examples from multiple problems. Args: mode: A member of problem.DatasetSplit. hparams: A tf.HParams object, the model hparams. global_step: A scalar tensor used to compute the sampling distribution. If global_step is None, we call tf.train.get_or_create_global_step by default. **kwargs: Keywords for problem.Problem.Dataset. Returns: A dataset containing examples from multiple problems.
[ "Returns", "a", "dataset", "containing", "examples", "from", "multiple", "problems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L101-L133
22,705
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
MultiText2TextProblem.normalize_example
def normalize_example(self, example, hparams): """Assumes that example contains both inputs and targets.""" length = self.max_length(hparams) def _to_constant_shape(tensor): tensor = tensor[:length] tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])]) return tf.reshape(tensor, [length]) if self.has_inputs: example['inputs'] = _to_constant_shape(example['inputs']) example['targets'] = _to_constant_shape(example['targets']) elif 'inputs' in example: if self.packed_length: raise ValueError('cannot concatenate packed examples on the fly.') inputs = example.pop('inputs')[:-1] # Remove EOS token. targets = tf.concat([inputs, example['targets']], 0) example['targets'] = _to_constant_shape(targets) else: example['targets'] = _to_constant_shape(example['targets']) if self.packed_length: if self.has_inputs: if 'inputs_segmentation' in example: example['inputs_segmentation'] = _to_constant_shape( example['inputs_segmentation']) example['inputs_position'] = _to_constant_shape( example['inputs_position']) else: example['inputs_segmentation'] = tf.to_int64( tf.not_equal(example['inputs'], 0)) example['inputs_position'] = ( example['inputs_segmentation'] * tf.range(length, dtype=tf.int64)) if 'targets_segmentation' in example: example['targets_segmentation'] = _to_constant_shape( example['targets_segmentation']) example['targets_position'] = _to_constant_shape( example['targets_position']) else: example['targets_segmentation'] = tf.to_int64( tf.not_equal(example['targets'], 0)) example['targets_position'] = ( example['targets_segmentation'] * tf.range(length, dtype=tf.int64)) return example
python
def normalize_example(self, example, hparams): """Assumes that example contains both inputs and targets.""" length = self.max_length(hparams) def _to_constant_shape(tensor): tensor = tensor[:length] tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])]) return tf.reshape(tensor, [length]) if self.has_inputs: example['inputs'] = _to_constant_shape(example['inputs']) example['targets'] = _to_constant_shape(example['targets']) elif 'inputs' in example: if self.packed_length: raise ValueError('cannot concatenate packed examples on the fly.') inputs = example.pop('inputs')[:-1] # Remove EOS token. targets = tf.concat([inputs, example['targets']], 0) example['targets'] = _to_constant_shape(targets) else: example['targets'] = _to_constant_shape(example['targets']) if self.packed_length: if self.has_inputs: if 'inputs_segmentation' in example: example['inputs_segmentation'] = _to_constant_shape( example['inputs_segmentation']) example['inputs_position'] = _to_constant_shape( example['inputs_position']) else: example['inputs_segmentation'] = tf.to_int64( tf.not_equal(example['inputs'], 0)) example['inputs_position'] = ( example['inputs_segmentation'] * tf.range(length, dtype=tf.int64)) if 'targets_segmentation' in example: example['targets_segmentation'] = _to_constant_shape( example['targets_segmentation']) example['targets_position'] = _to_constant_shape( example['targets_position']) else: example['targets_segmentation'] = tf.to_int64( tf.not_equal(example['targets'], 0)) example['targets_position'] = ( example['targets_segmentation'] * tf.range(length, dtype=tf.int64)) return example
[ "def", "normalize_example", "(", "self", ",", "example", ",", "hparams", ")", ":", "length", "=", "self", ".", "max_length", "(", "hparams", ")", "def", "_to_constant_shape", "(", "tensor", ")", ":", "tensor", "=", "tensor", "[", ":", "length", "]", "tensor", "=", "tf", ".", "pad", "(", "tensor", ",", "[", "(", "0", ",", "length", "-", "tf", ".", "shape", "(", "tensor", ")", "[", "0", "]", ")", "]", ")", "return", "tf", ".", "reshape", "(", "tensor", ",", "[", "length", "]", ")", "if", "self", ".", "has_inputs", ":", "example", "[", "'inputs'", "]", "=", "_to_constant_shape", "(", "example", "[", "'inputs'", "]", ")", "example", "[", "'targets'", "]", "=", "_to_constant_shape", "(", "example", "[", "'targets'", "]", ")", "elif", "'inputs'", "in", "example", ":", "if", "self", ".", "packed_length", ":", "raise", "ValueError", "(", "'cannot concatenate packed examples on the fly.'", ")", "inputs", "=", "example", ".", "pop", "(", "'inputs'", ")", "[", ":", "-", "1", "]", "# Remove EOS token.", "targets", "=", "tf", ".", "concat", "(", "[", "inputs", ",", "example", "[", "'targets'", "]", "]", ",", "0", ")", "example", "[", "'targets'", "]", "=", "_to_constant_shape", "(", "targets", ")", "else", ":", "example", "[", "'targets'", "]", "=", "_to_constant_shape", "(", "example", "[", "'targets'", "]", ")", "if", "self", ".", "packed_length", ":", "if", "self", ".", "has_inputs", ":", "if", "'inputs_segmentation'", "in", "example", ":", "example", "[", "'inputs_segmentation'", "]", "=", "_to_constant_shape", "(", "example", "[", "'inputs_segmentation'", "]", ")", "example", "[", "'inputs_position'", "]", "=", "_to_constant_shape", "(", "example", "[", "'inputs_position'", "]", ")", "else", ":", "example", "[", "'inputs_segmentation'", "]", "=", "tf", ".", "to_int64", "(", "tf", ".", "not_equal", "(", "example", "[", "'inputs'", "]", ",", "0", ")", ")", "example", "[", "'inputs_position'", "]", "=", "(", "example", "[", "'inputs_segmentation'", "]", "*", "tf", ".", "range", "(", "length", ",", "dtype", "=", "tf", ".", "int64", ")", ")", "if", "'targets_segmentation'", "in", "example", ":", "example", "[", "'targets_segmentation'", "]", "=", "_to_constant_shape", "(", "example", "[", "'targets_segmentation'", "]", ")", "example", "[", "'targets_position'", "]", "=", "_to_constant_shape", "(", "example", "[", "'targets_position'", "]", ")", "else", ":", "example", "[", "'targets_segmentation'", "]", "=", "tf", ".", "to_int64", "(", "tf", ".", "not_equal", "(", "example", "[", "'targets'", "]", ",", "0", ")", ")", "example", "[", "'targets_position'", "]", "=", "(", "example", "[", "'targets_segmentation'", "]", "*", "tf", ".", "range", "(", "length", ",", "dtype", "=", "tf", ".", "int64", ")", ")", "return", "example" ]
Assumes that example contains both inputs and targets.
[ "Assumes", "that", "example", "contains", "both", "inputs", "and", "targets", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L139-L181
22,706
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
MultiText2TextProblem.generate_data_with_shared_vocab
def generate_data_with_shared_vocab(self, data_dir, tmp_dir, task_id=-1): """Generates TF-Records for problems using a global vocabulary file.""" global_vocab_filename = os.path.join(data_dir, self.vocab_filename) if not tf.gfile.Exists(global_vocab_filename): raise ValueError( 'Global vocabulary file: %s does not exist, ' 'please create one using build_vocab.py' % global_vocab_filename) # Before generating data, we copy the global vocabulary file to the children # locations. Although this is not the most disk efficient strategy, it # imposes the fewest changes to the text-to-text API. for p in self.problems: local_vocab_filename = os.path.join(data_dir, p.vocab_filename) if not tf.gfile.Exists(local_vocab_filename): tf.gfile.Copy(global_vocab_filename, local_vocab_filename) p.generate_data(data_dir, tmp_dir, task_id)
python
def generate_data_with_shared_vocab(self, data_dir, tmp_dir, task_id=-1): """Generates TF-Records for problems using a global vocabulary file.""" global_vocab_filename = os.path.join(data_dir, self.vocab_filename) if not tf.gfile.Exists(global_vocab_filename): raise ValueError( 'Global vocabulary file: %s does not exist, ' 'please create one using build_vocab.py' % global_vocab_filename) # Before generating data, we copy the global vocabulary file to the children # locations. Although this is not the most disk efficient strategy, it # imposes the fewest changes to the text-to-text API. for p in self.problems: local_vocab_filename = os.path.join(data_dir, p.vocab_filename) if not tf.gfile.Exists(local_vocab_filename): tf.gfile.Copy(global_vocab_filename, local_vocab_filename) p.generate_data(data_dir, tmp_dir, task_id)
[ "def", "generate_data_with_shared_vocab", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "task_id", "=", "-", "1", ")", ":", "global_vocab_filename", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "self", ".", "vocab_filename", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "global_vocab_filename", ")", ":", "raise", "ValueError", "(", "'Global vocabulary file: %s does not exist, '", "'please create one using build_vocab.py'", "%", "global_vocab_filename", ")", "# Before generating data, we copy the global vocabulary file to the children", "# locations. Although this is not the most disk efficient strategy, it", "# imposes the fewest changes to the text-to-text API.", "for", "p", "in", "self", ".", "problems", ":", "local_vocab_filename", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "p", ".", "vocab_filename", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "local_vocab_filename", ")", ":", "tf", ".", "gfile", ".", "Copy", "(", "global_vocab_filename", ",", "local_vocab_filename", ")", "p", ".", "generate_data", "(", "data_dir", ",", "tmp_dir", ",", "task_id", ")" ]
Generates TF-Records for problems using a global vocabulary file.
[ "Generates", "TF", "-", "Records", "for", "problems", "using", "a", "global", "vocabulary", "file", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L183-L197
22,707
tensorflow/tensor2tensor
tensor2tensor/layers/area_attention.py
lengths_to_area_mask
def lengths_to_area_mask(feature_length, length, max_area_size): """Generates a non-padding mask for areas based on lengths. Args: feature_length: a tensor of [batch_size] length: the length of the batch max_area_size: the maximum area size considered Returns: mask: a tensor in shape of [batch_size, num_areas] """ paddings = tf.cast(tf.expand_dims( tf.logical_not( tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32) _, _, area_sum, _, _ = compute_area_features(paddings, max_area_width=max_area_size) mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2]) return mask
python
def lengths_to_area_mask(feature_length, length, max_area_size): """Generates a non-padding mask for areas based on lengths. Args: feature_length: a tensor of [batch_size] length: the length of the batch max_area_size: the maximum area size considered Returns: mask: a tensor in shape of [batch_size, num_areas] """ paddings = tf.cast(tf.expand_dims( tf.logical_not( tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32) _, _, area_sum, _, _ = compute_area_features(paddings, max_area_width=max_area_size) mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2]) return mask
[ "def", "lengths_to_area_mask", "(", "feature_length", ",", "length", ",", "max_area_size", ")", ":", "paddings", "=", "tf", ".", "cast", "(", "tf", ".", "expand_dims", "(", "tf", ".", "logical_not", "(", "tf", ".", "sequence_mask", "(", "feature_length", ",", "maxlen", "=", "length", ")", ")", ",", "2", ")", ",", "tf", ".", "float32", ")", "_", ",", "_", ",", "area_sum", ",", "_", ",", "_", "=", "compute_area_features", "(", "paddings", ",", "max_area_width", "=", "max_area_size", ")", "mask", "=", "tf", ".", "squeeze", "(", "tf", ".", "logical_not", "(", "tf", ".", "cast", "(", "area_sum", ",", "tf", ".", "bool", ")", ")", ",", "[", "2", "]", ")", "return", "mask" ]
Generates a non-padding mask for areas based on lengths. Args: feature_length: a tensor of [batch_size] length: the length of the batch max_area_size: the maximum area size considered Returns: mask: a tensor in shape of [batch_size, num_areas]
[ "Generates", "a", "non", "-", "padding", "mask", "for", "areas", "based", "on", "lengths", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/area_attention.py#L27-L44
22,708
tensorflow/tensor2tensor
tensor2tensor/layers/area_attention.py
_pool_one_shape
def _pool_one_shape(features_2d, area_width, area_height, batch_size, width, height, depth, fn=tf.reduce_max, name=None): """Pools for an area in features_2d. Args: features_2d: a Tensor in a shape of [batch_size, height, width, depth]. area_width: the max width allowed for an area. area_height: the max height allowed for an area. batch_size: the batch size. width: the width of the memory. height: the height of the memory. depth: the depth of the features. fn: the TF function for the pooling. name: the op name. Returns: pool_tensor: A Tensor of shape [batch_size, num_areas, depth] """ with tf.name_scope(name, default_name="pool_one_shape"): images = [] for y_shift in range(area_height): image_height = tf.maximum(height - area_height + 1 + y_shift, 0) for x_shift in range(area_width): image_width = tf.maximum(width - area_width + 1 + x_shift, 0) area = features_2d[:, y_shift:image_height, x_shift:image_width, :] flatten_area = tf.reshape(area, [batch_size, -1, depth, 1]) images.append(flatten_area) image_tensor = tf.concat(images, axis=3) max_tensor = fn(image_tensor, axis=3) return max_tensor
python
def _pool_one_shape(features_2d, area_width, area_height, batch_size, width, height, depth, fn=tf.reduce_max, name=None): """Pools for an area in features_2d. Args: features_2d: a Tensor in a shape of [batch_size, height, width, depth]. area_width: the max width allowed for an area. area_height: the max height allowed for an area. batch_size: the batch size. width: the width of the memory. height: the height of the memory. depth: the depth of the features. fn: the TF function for the pooling. name: the op name. Returns: pool_tensor: A Tensor of shape [batch_size, num_areas, depth] """ with tf.name_scope(name, default_name="pool_one_shape"): images = [] for y_shift in range(area_height): image_height = tf.maximum(height - area_height + 1 + y_shift, 0) for x_shift in range(area_width): image_width = tf.maximum(width - area_width + 1 + x_shift, 0) area = features_2d[:, y_shift:image_height, x_shift:image_width, :] flatten_area = tf.reshape(area, [batch_size, -1, depth, 1]) images.append(flatten_area) image_tensor = tf.concat(images, axis=3) max_tensor = fn(image_tensor, axis=3) return max_tensor
[ "def", "_pool_one_shape", "(", "features_2d", ",", "area_width", ",", "area_height", ",", "batch_size", ",", "width", ",", "height", ",", "depth", ",", "fn", "=", "tf", ".", "reduce_max", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "default_name", "=", "\"pool_one_shape\"", ")", ":", "images", "=", "[", "]", "for", "y_shift", "in", "range", "(", "area_height", ")", ":", "image_height", "=", "tf", ".", "maximum", "(", "height", "-", "area_height", "+", "1", "+", "y_shift", ",", "0", ")", "for", "x_shift", "in", "range", "(", "area_width", ")", ":", "image_width", "=", "tf", ".", "maximum", "(", "width", "-", "area_width", "+", "1", "+", "x_shift", ",", "0", ")", "area", "=", "features_2d", "[", ":", ",", "y_shift", ":", "image_height", ",", "x_shift", ":", "image_width", ",", ":", "]", "flatten_area", "=", "tf", ".", "reshape", "(", "area", ",", "[", "batch_size", ",", "-", "1", ",", "depth", ",", "1", "]", ")", "images", ".", "append", "(", "flatten_area", ")", "image_tensor", "=", "tf", ".", "concat", "(", "images", ",", "axis", "=", "3", ")", "max_tensor", "=", "fn", "(", "image_tensor", ",", "axis", "=", "3", ")", "return", "max_tensor" ]
Pools for an area in features_2d. Args: features_2d: a Tensor in a shape of [batch_size, height, width, depth]. area_width: the max width allowed for an area. area_height: the max height allowed for an area. batch_size: the batch size. width: the width of the memory. height: the height of the memory. depth: the depth of the features. fn: the TF function for the pooling. name: the op name. Returns: pool_tensor: A Tensor of shape [batch_size, num_areas, depth]
[ "Pools", "for", "an", "area", "in", "features_2d", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/area_attention.py#L47-L75
22,709
tensorflow/tensor2tensor
tensor2tensor/layers/area_attention.py
compute_area_features
def compute_area_features(features, max_area_width, max_area_height=1, height=1, epsilon=1e-6): """Computes features for each area. Args: features: a Tensor in a shape of [batch_size, height * width, depth]. max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. height: the height of the image. epsilon: the epsilon added to the variance for computing standard deviation. Returns: area_mean: A Tensor of shape [batch_size, num_areas, depth] area_std: A Tensor of shape [batch_size, num_areas, depth] area_sum: A Tensor of shape [batch_size, num_areas, depth] area_heights: A Tensor of shape [batch_size, num_areas, 1] area_widths: A Tensor of shape [batch_size, num_areas, 1] """ with tf.name_scope("compute_area_features"): tf.logging.info("area_attention compute_area_features: %d x %d", max_area_height, max_area_width) area_sum, area_heights, area_widths = _compute_sum_image( features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) area_squared_sum, _, _ = _compute_sum_image( tf.pow(features, 2), max_area_width=max_area_width, max_area_height=max_area_height, height=height) sizes = tf.multiply(area_heights, area_widths) float_area_sizes = tf.to_float(sizes) area_mean = tf.div(area_sum, float_area_sizes) s2_n = tf.div(area_squared_sum, float_area_sizes) area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2)) area_std = tf.sqrt(tf.abs(area_variance) + epsilon) return area_mean, area_std, area_sum, area_heights, area_widths
python
def compute_area_features(features, max_area_width, max_area_height=1, height=1, epsilon=1e-6): """Computes features for each area. Args: features: a Tensor in a shape of [batch_size, height * width, depth]. max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. height: the height of the image. epsilon: the epsilon added to the variance for computing standard deviation. Returns: area_mean: A Tensor of shape [batch_size, num_areas, depth] area_std: A Tensor of shape [batch_size, num_areas, depth] area_sum: A Tensor of shape [batch_size, num_areas, depth] area_heights: A Tensor of shape [batch_size, num_areas, 1] area_widths: A Tensor of shape [batch_size, num_areas, 1] """ with tf.name_scope("compute_area_features"): tf.logging.info("area_attention compute_area_features: %d x %d", max_area_height, max_area_width) area_sum, area_heights, area_widths = _compute_sum_image( features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) area_squared_sum, _, _ = _compute_sum_image( tf.pow(features, 2), max_area_width=max_area_width, max_area_height=max_area_height, height=height) sizes = tf.multiply(area_heights, area_widths) float_area_sizes = tf.to_float(sizes) area_mean = tf.div(area_sum, float_area_sizes) s2_n = tf.div(area_squared_sum, float_area_sizes) area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2)) area_std = tf.sqrt(tf.abs(area_variance) + epsilon) return area_mean, area_std, area_sum, area_heights, area_widths
[ "def", "compute_area_features", "(", "features", ",", "max_area_width", ",", "max_area_height", "=", "1", ",", "height", "=", "1", ",", "epsilon", "=", "1e-6", ")", ":", "with", "tf", ".", "name_scope", "(", "\"compute_area_features\"", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"area_attention compute_area_features: %d x %d\"", ",", "max_area_height", ",", "max_area_width", ")", "area_sum", ",", "area_heights", ",", "area_widths", "=", "_compute_sum_image", "(", "features", ",", "max_area_width", "=", "max_area_width", ",", "max_area_height", "=", "max_area_height", ",", "height", "=", "height", ")", "area_squared_sum", ",", "_", ",", "_", "=", "_compute_sum_image", "(", "tf", ".", "pow", "(", "features", ",", "2", ")", ",", "max_area_width", "=", "max_area_width", ",", "max_area_height", "=", "max_area_height", ",", "height", "=", "height", ")", "sizes", "=", "tf", ".", "multiply", "(", "area_heights", ",", "area_widths", ")", "float_area_sizes", "=", "tf", ".", "to_float", "(", "sizes", ")", "area_mean", "=", "tf", ".", "div", "(", "area_sum", ",", "float_area_sizes", ")", "s2_n", "=", "tf", ".", "div", "(", "area_squared_sum", ",", "float_area_sizes", ")", "area_variance", "=", "tf", ".", "subtract", "(", "s2_n", ",", "tf", ".", "pow", "(", "area_mean", ",", "2", ")", ")", "area_std", "=", "tf", ".", "sqrt", "(", "tf", ".", "abs", "(", "area_variance", ")", "+", "epsilon", ")", "return", "area_mean", ",", "area_std", ",", "area_sum", ",", "area_heights", ",", "area_widths" ]
Computes features for each area. Args: features: a Tensor in a shape of [batch_size, height * width, depth]. max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. height: the height of the image. epsilon: the epsilon added to the variance for computing standard deviation. Returns: area_mean: A Tensor of shape [batch_size, num_areas, depth] area_std: A Tensor of shape [batch_size, num_areas, depth] area_sum: A Tensor of shape [batch_size, num_areas, depth] area_heights: A Tensor of shape [batch_size, num_areas, 1] area_widths: A Tensor of shape [batch_size, num_areas, 1]
[ "Computes", "features", "for", "each", "area", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/area_attention.py#L199-L231
22,710
tensorflow/tensor2tensor
tensor2tensor/layers/area_attention.py
compute_area_key
def compute_area_key(features, max_area_width, max_area_height=1, height=1, mode="mean", training=True, name=None): """Computes the key for each area. Args: features: a Tensor in a shape of [batch_size, height * width, depth]. max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. height: the height of the image. mode: whether to combine different area features or only use the vector mean of each area, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". training: indicating if it is in the training mode. name: the name for setting the variable scope. Returns: area_key: a Tensor in the shape of [batch_size, num_areas, depth] """ tf.logging.info("area_attention mode=%s", mode) area_mean, area_std, _, area_heights, area_widths =\ compute_area_features(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) if mode == "mean": return area_mean elif mode == "max": area_max, _, _ = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) return area_max elif mode == "sample": if training: area_mean += (area_std * tf.random_normal(tf.shape(area_std))) return area_mean with tf.variable_scope( name, default_name="combine_area_features", values=[area_mean, area_std, area_heights, area_widths]): depth = common_layers.shape_list(area_mean)[-1] height_embed = tf.nn.embedding_lookup( params=tf.get_variable("area_height_emb", [max_area_height, depth // 2]), ids=area_heights[:, :, 0] - 1) width_embed = tf.nn.embedding_lookup( params=tf.get_variable("area_width_emb", [max_area_width, depth // 2]), ids=area_widths[:, :, 0] - 1) size_embed = tf.concat([height_embed, width_embed], -1) if mode == "concat": feature_concat = tf.concat([area_mean, area_std, size_embed], -1) elif mode == "max_concat": area_max, _, _ = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) feature_concat = tf.concat([area_max, size_embed], -1) elif mode == "sum": feature_concat = size_embed + area_mean + area_std elif mode == "sample_concat": if training: area_mean += (area_std * tf.random_normal(tf.shape(area_std))) feature_concat = tf.concat([area_mean, size_embed], -1) elif mode == "sample_sum": if training: area_mean += (area_std * tf.random_normal(tf.shape(area_std))) feature_concat = area_mean + size_embed else: raise ValueError("Unsupported area key mode=%s" % mode) feature_hidden = tf.layers.dense(inputs=feature_concat, units=depth, activation=tf.nn.relu) area_key = tf.layers.dense(feature_hidden, units=depth) return area_key
python
def compute_area_key(features, max_area_width, max_area_height=1, height=1, mode="mean", training=True, name=None): """Computes the key for each area. Args: features: a Tensor in a shape of [batch_size, height * width, depth]. max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. height: the height of the image. mode: whether to combine different area features or only use the vector mean of each area, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". training: indicating if it is in the training mode. name: the name for setting the variable scope. Returns: area_key: a Tensor in the shape of [batch_size, num_areas, depth] """ tf.logging.info("area_attention mode=%s", mode) area_mean, area_std, _, area_heights, area_widths =\ compute_area_features(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) if mode == "mean": return area_mean elif mode == "max": area_max, _, _ = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) return area_max elif mode == "sample": if training: area_mean += (area_std * tf.random_normal(tf.shape(area_std))) return area_mean with tf.variable_scope( name, default_name="combine_area_features", values=[area_mean, area_std, area_heights, area_widths]): depth = common_layers.shape_list(area_mean)[-1] height_embed = tf.nn.embedding_lookup( params=tf.get_variable("area_height_emb", [max_area_height, depth // 2]), ids=area_heights[:, :, 0] - 1) width_embed = tf.nn.embedding_lookup( params=tf.get_variable("area_width_emb", [max_area_width, depth // 2]), ids=area_widths[:, :, 0] - 1) size_embed = tf.concat([height_embed, width_embed], -1) if mode == "concat": feature_concat = tf.concat([area_mean, area_std, size_embed], -1) elif mode == "max_concat": area_max, _, _ = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height) feature_concat = tf.concat([area_max, size_embed], -1) elif mode == "sum": feature_concat = size_embed + area_mean + area_std elif mode == "sample_concat": if training: area_mean += (area_std * tf.random_normal(tf.shape(area_std))) feature_concat = tf.concat([area_mean, size_embed], -1) elif mode == "sample_sum": if training: area_mean += (area_std * tf.random_normal(tf.shape(area_std))) feature_concat = area_mean + size_embed else: raise ValueError("Unsupported area key mode=%s" % mode) feature_hidden = tf.layers.dense(inputs=feature_concat, units=depth, activation=tf.nn.relu) area_key = tf.layers.dense(feature_hidden, units=depth) return area_key
[ "def", "compute_area_key", "(", "features", ",", "max_area_width", ",", "max_area_height", "=", "1", ",", "height", "=", "1", ",", "mode", "=", "\"mean\"", ",", "training", "=", "True", ",", "name", "=", "None", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"area_attention mode=%s\"", ",", "mode", ")", "area_mean", ",", "area_std", ",", "_", ",", "area_heights", ",", "area_widths", "=", "compute_area_features", "(", "features", ",", "max_area_width", "=", "max_area_width", ",", "max_area_height", "=", "max_area_height", ",", "height", "=", "height", ")", "if", "mode", "==", "\"mean\"", ":", "return", "area_mean", "elif", "mode", "==", "\"max\"", ":", "area_max", ",", "_", ",", "_", "=", "basic_pool", "(", "features", ",", "max_area_width", "=", "max_area_width", ",", "max_area_height", "=", "max_area_height", ",", "height", "=", "height", ")", "return", "area_max", "elif", "mode", "==", "\"sample\"", ":", "if", "training", ":", "area_mean", "+=", "(", "area_std", "*", "tf", ".", "random_normal", "(", "tf", ".", "shape", "(", "area_std", ")", ")", ")", "return", "area_mean", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"combine_area_features\"", ",", "values", "=", "[", "area_mean", ",", "area_std", ",", "area_heights", ",", "area_widths", "]", ")", ":", "depth", "=", "common_layers", ".", "shape_list", "(", "area_mean", ")", "[", "-", "1", "]", "height_embed", "=", "tf", ".", "nn", ".", "embedding_lookup", "(", "params", "=", "tf", ".", "get_variable", "(", "\"area_height_emb\"", ",", "[", "max_area_height", ",", "depth", "//", "2", "]", ")", ",", "ids", "=", "area_heights", "[", ":", ",", ":", ",", "0", "]", "-", "1", ")", "width_embed", "=", "tf", ".", "nn", ".", "embedding_lookup", "(", "params", "=", "tf", ".", "get_variable", "(", "\"area_width_emb\"", ",", "[", "max_area_width", ",", "depth", "//", "2", "]", ")", ",", "ids", "=", "area_widths", "[", ":", ",", ":", ",", "0", "]", "-", "1", ")", "size_embed", "=", "tf", ".", "concat", "(", "[", "height_embed", ",", "width_embed", "]", ",", "-", "1", ")", "if", "mode", "==", "\"concat\"", ":", "feature_concat", "=", "tf", ".", "concat", "(", "[", "area_mean", ",", "area_std", ",", "size_embed", "]", ",", "-", "1", ")", "elif", "mode", "==", "\"max_concat\"", ":", "area_max", ",", "_", ",", "_", "=", "basic_pool", "(", "features", ",", "max_area_width", "=", "max_area_width", ",", "max_area_height", "=", "max_area_height", ",", "height", "=", "height", ")", "feature_concat", "=", "tf", ".", "concat", "(", "[", "area_max", ",", "size_embed", "]", ",", "-", "1", ")", "elif", "mode", "==", "\"sum\"", ":", "feature_concat", "=", "size_embed", "+", "area_mean", "+", "area_std", "elif", "mode", "==", "\"sample_concat\"", ":", "if", "training", ":", "area_mean", "+=", "(", "area_std", "*", "tf", ".", "random_normal", "(", "tf", ".", "shape", "(", "area_std", ")", ")", ")", "feature_concat", "=", "tf", ".", "concat", "(", "[", "area_mean", ",", "size_embed", "]", ",", "-", "1", ")", "elif", "mode", "==", "\"sample_sum\"", ":", "if", "training", ":", "area_mean", "+=", "(", "area_std", "*", "tf", ".", "random_normal", "(", "tf", ".", "shape", "(", "area_std", ")", ")", ")", "feature_concat", "=", "area_mean", "+", "size_embed", "else", ":", "raise", "ValueError", "(", "\"Unsupported area key mode=%s\"", "%", "mode", ")", "feature_hidden", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "feature_concat", ",", "units", "=", "depth", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "area_key", "=", "tf", ".", "layers", ".", "dense", "(", "feature_hidden", ",", "units", "=", "depth", ")", "return", "area_key" ]
Computes the key for each area. Args: features: a Tensor in a shape of [batch_size, height * width, depth]. max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. height: the height of the image. mode: whether to combine different area features or only use the vector mean of each area, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". training: indicating if it is in the training mode. name: the name for setting the variable scope. Returns: area_key: a Tensor in the shape of [batch_size, num_areas, depth]
[ "Computes", "the", "key", "for", "each", "area", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/area_attention.py#L234-L302
22,711
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
setup_directories
def setup_directories(base_dir, subdirs): """Setup directories.""" base_dir = os.path.expanduser(base_dir) tf.gfile.MakeDirs(base_dir) all_dirs = {} for subdir in subdirs: if isinstance(subdir, six.string_types): subdir_tuple = (subdir,) else: subdir_tuple = subdir dir_name = os.path.join(base_dir, *subdir_tuple) tf.gfile.MakeDirs(dir_name) all_dirs[subdir] = dir_name return all_dirs
python
def setup_directories(base_dir, subdirs): """Setup directories.""" base_dir = os.path.expanduser(base_dir) tf.gfile.MakeDirs(base_dir) all_dirs = {} for subdir in subdirs: if isinstance(subdir, six.string_types): subdir_tuple = (subdir,) else: subdir_tuple = subdir dir_name = os.path.join(base_dir, *subdir_tuple) tf.gfile.MakeDirs(dir_name) all_dirs[subdir] = dir_name return all_dirs
[ "def", "setup_directories", "(", "base_dir", ",", "subdirs", ")", ":", "base_dir", "=", "os", ".", "path", ".", "expanduser", "(", "base_dir", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "base_dir", ")", "all_dirs", "=", "{", "}", "for", "subdir", "in", "subdirs", ":", "if", "isinstance", "(", "subdir", ",", "six", ".", "string_types", ")", ":", "subdir_tuple", "=", "(", "subdir", ",", ")", "else", ":", "subdir_tuple", "=", "subdir", "dir_name", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "*", "subdir_tuple", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "dir_name", ")", "all_dirs", "[", "subdir", "]", "=", "dir_name", "return", "all_dirs" ]
Setup directories.
[ "Setup", "directories", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L68-L82
22,712
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
make_relative_timing_fn
def make_relative_timing_fn(): """Make a function that logs the duration since it was made.""" start_time = time.time() def format_relative_time(): time_delta = time.time() - start_time return str(datetime.timedelta(seconds=time_delta)) def log_relative_time(): tf.logging.info("Timing: %s", format_relative_time()) return log_relative_time
python
def make_relative_timing_fn(): """Make a function that logs the duration since it was made.""" start_time = time.time() def format_relative_time(): time_delta = time.time() - start_time return str(datetime.timedelta(seconds=time_delta)) def log_relative_time(): tf.logging.info("Timing: %s", format_relative_time()) return log_relative_time
[ "def", "make_relative_timing_fn", "(", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "def", "format_relative_time", "(", ")", ":", "time_delta", "=", "time", ".", "time", "(", ")", "-", "start_time", "return", "str", "(", "datetime", ".", "timedelta", "(", "seconds", "=", "time_delta", ")", ")", "def", "log_relative_time", "(", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Timing: %s\"", ",", "format_relative_time", "(", ")", ")", "return", "log_relative_time" ]
Make a function that logs the duration since it was made.
[ "Make", "a", "function", "that", "logs", "the", "duration", "since", "it", "was", "made", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L85-L96
22,713
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
train_supervised
def train_supervised(problem, model_name, hparams, data_dir, output_dir, train_steps, eval_steps, local_eval_frequency=None, schedule="continuous_train_and_eval"): """Train supervised.""" if local_eval_frequency is None: local_eval_frequency = FLAGS.local_eval_frequency exp_fn = trainer_lib.create_experiment_fn( model_name, problem, data_dir, train_steps, eval_steps, min_eval_frequency=local_eval_frequency ) run_config = trainer_lib.create_run_config(model_name, model_dir=output_dir) exp = exp_fn(run_config, hparams) getattr(exp, schedule)()
python
def train_supervised(problem, model_name, hparams, data_dir, output_dir, train_steps, eval_steps, local_eval_frequency=None, schedule="continuous_train_and_eval"): """Train supervised.""" if local_eval_frequency is None: local_eval_frequency = FLAGS.local_eval_frequency exp_fn = trainer_lib.create_experiment_fn( model_name, problem, data_dir, train_steps, eval_steps, min_eval_frequency=local_eval_frequency ) run_config = trainer_lib.create_run_config(model_name, model_dir=output_dir) exp = exp_fn(run_config, hparams) getattr(exp, schedule)()
[ "def", "train_supervised", "(", "problem", ",", "model_name", ",", "hparams", ",", "data_dir", ",", "output_dir", ",", "train_steps", ",", "eval_steps", ",", "local_eval_frequency", "=", "None", ",", "schedule", "=", "\"continuous_train_and_eval\"", ")", ":", "if", "local_eval_frequency", "is", "None", ":", "local_eval_frequency", "=", "FLAGS", ".", "local_eval_frequency", "exp_fn", "=", "trainer_lib", ".", "create_experiment_fn", "(", "model_name", ",", "problem", ",", "data_dir", ",", "train_steps", ",", "eval_steps", ",", "min_eval_frequency", "=", "local_eval_frequency", ")", "run_config", "=", "trainer_lib", ".", "create_run_config", "(", "model_name", ",", "model_dir", "=", "output_dir", ")", "exp", "=", "exp_fn", "(", "run_config", ",", "hparams", ")", "getattr", "(", "exp", ",", "schedule", ")", "(", ")" ]
Train supervised.
[ "Train", "supervised", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L125-L138
22,714
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
train_agent
def train_agent(real_env, learner, world_model_dir, hparams, epoch): """Train the PPO agent in the simulated environment.""" initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, hparams.frame_stack_size, hparams.simulation_random_starts, hparams.simulation_flip_first_random_for_beginning ) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=hparams.simulated_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir, sim_video_dir=os.path.join( learner.agent_model_dir, "sim_videos_{}".format(epoch) ) ) base_algo_str = hparams.base_algo train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) if hparams.wm_policy_param_sharing: train_hparams.optimizer_zero_grads = True rl_utils.update_hparams_from_hparams( train_hparams, hparams, base_algo_str + "_" ) final_epoch = hparams.epochs - 1 is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch is_final_epoch = epoch == final_epoch env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1 learner.train( env_fn, train_hparams, simulated=True, save_continuously=True, epoch=epoch, env_step_multiplier=env_step_multiplier )
python
def train_agent(real_env, learner, world_model_dir, hparams, epoch): """Train the PPO agent in the simulated environment.""" initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, hparams.frame_stack_size, hparams.simulation_random_starts, hparams.simulation_flip_first_random_for_beginning ) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=hparams.simulated_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir, sim_video_dir=os.path.join( learner.agent_model_dir, "sim_videos_{}".format(epoch) ) ) base_algo_str = hparams.base_algo train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) if hparams.wm_policy_param_sharing: train_hparams.optimizer_zero_grads = True rl_utils.update_hparams_from_hparams( train_hparams, hparams, base_algo_str + "_" ) final_epoch = hparams.epochs - 1 is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch is_final_epoch = epoch == final_epoch env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1 learner.train( env_fn, train_hparams, simulated=True, save_continuously=True, epoch=epoch, env_step_multiplier=env_step_multiplier )
[ "def", "train_agent", "(", "real_env", ",", "learner", ",", "world_model_dir", ",", "hparams", ",", "epoch", ")", ":", "initial_frame_chooser", "=", "rl_utils", ".", "make_initial_frame_chooser", "(", "real_env", ",", "hparams", ".", "frame_stack_size", ",", "hparams", ".", "simulation_random_starts", ",", "hparams", ".", "simulation_flip_first_random_for_beginning", ")", "env_fn", "=", "rl", ".", "make_simulated_env_fn_from_hparams", "(", "real_env", ",", "hparams", ",", "batch_size", "=", "hparams", ".", "simulated_batch_size", ",", "initial_frame_chooser", "=", "initial_frame_chooser", ",", "model_dir", "=", "world_model_dir", ",", "sim_video_dir", "=", "os", ".", "path", ".", "join", "(", "learner", ".", "agent_model_dir", ",", "\"sim_videos_{}\"", ".", "format", "(", "epoch", ")", ")", ")", "base_algo_str", "=", "hparams", ".", "base_algo", "train_hparams", "=", "trainer_lib", ".", "create_hparams", "(", "hparams", ".", "base_algo_params", ")", "if", "hparams", ".", "wm_policy_param_sharing", ":", "train_hparams", ".", "optimizer_zero_grads", "=", "True", "rl_utils", ".", "update_hparams_from_hparams", "(", "train_hparams", ",", "hparams", ",", "base_algo_str", "+", "\"_\"", ")", "final_epoch", "=", "hparams", ".", "epochs", "-", "1", "is_special_epoch", "=", "(", "epoch", "+", "3", ")", "==", "final_epoch", "or", "(", "epoch", "+", "7", ")", "==", "final_epoch", "is_final_epoch", "=", "epoch", "==", "final_epoch", "env_step_multiplier", "=", "3", "if", "is_final_epoch", "else", "2", "if", "is_special_epoch", "else", "1", "learner", ".", "train", "(", "env_fn", ",", "train_hparams", ",", "simulated", "=", "True", ",", "save_continuously", "=", "True", ",", "epoch", "=", "epoch", ",", "env_step_multiplier", "=", "env_step_multiplier", ")" ]
Train the PPO agent in the simulated environment.
[ "Train", "the", "PPO", "agent", "in", "the", "simulated", "environment", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L141-L170
22,715
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
train_agent_real_env
def train_agent_real_env(env, learner, hparams, epoch): """Train the PPO agent in the real environment.""" base_algo_str = hparams.base_algo train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) rl_utils.update_hparams_from_hparams( train_hparams, hparams, "real_" + base_algo_str + "_" ) if hparams.wm_policy_param_sharing: train_hparams.optimizer_zero_grads = True env_fn = rl.make_real_env_fn(env) num_env_steps = real_env_step_increment(hparams) learner.train( env_fn, train_hparams, simulated=False, save_continuously=False, epoch=epoch, sampling_temp=hparams.real_sampling_temp, num_env_steps=num_env_steps, ) # Save unfinished rollouts to history. env.reset()
python
def train_agent_real_env(env, learner, hparams, epoch): """Train the PPO agent in the real environment.""" base_algo_str = hparams.base_algo train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) rl_utils.update_hparams_from_hparams( train_hparams, hparams, "real_" + base_algo_str + "_" ) if hparams.wm_policy_param_sharing: train_hparams.optimizer_zero_grads = True env_fn = rl.make_real_env_fn(env) num_env_steps = real_env_step_increment(hparams) learner.train( env_fn, train_hparams, simulated=False, save_continuously=False, epoch=epoch, sampling_temp=hparams.real_sampling_temp, num_env_steps=num_env_steps, ) # Save unfinished rollouts to history. env.reset()
[ "def", "train_agent_real_env", "(", "env", ",", "learner", ",", "hparams", ",", "epoch", ")", ":", "base_algo_str", "=", "hparams", ".", "base_algo", "train_hparams", "=", "trainer_lib", ".", "create_hparams", "(", "hparams", ".", "base_algo_params", ")", "rl_utils", ".", "update_hparams_from_hparams", "(", "train_hparams", ",", "hparams", ",", "\"real_\"", "+", "base_algo_str", "+", "\"_\"", ")", "if", "hparams", ".", "wm_policy_param_sharing", ":", "train_hparams", ".", "optimizer_zero_grads", "=", "True", "env_fn", "=", "rl", ".", "make_real_env_fn", "(", "env", ")", "num_env_steps", "=", "real_env_step_increment", "(", "hparams", ")", "learner", ".", "train", "(", "env_fn", ",", "train_hparams", ",", "simulated", "=", "False", ",", "save_continuously", "=", "False", ",", "epoch", "=", "epoch", ",", "sampling_temp", "=", "hparams", ".", "real_sampling_temp", ",", "num_env_steps", "=", "num_env_steps", ",", ")", "# Save unfinished rollouts to history.", "env", ".", "reset", "(", ")" ]
Train the PPO agent in the real environment.
[ "Train", "the", "PPO", "agent", "in", "the", "real", "environment", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L173-L196
22,716
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
train_world_model
def train_world_model( env, data_dir, output_dir, hparams, world_model_steps_num, epoch ): """Train the world model on problem_name.""" world_model_steps_num += world_model_step_increment( hparams, is_initial_epoch=(epoch == 0) ) model_hparams = trainer_lib.create_hparams(hparams.generative_model_params) model_hparams.learning_rate = model_hparams.learning_rate_constant if epoch > 0: model_hparams.learning_rate *= hparams.learning_rate_bump if hparams.wm_policy_param_sharing: model_hparams.optimizer_zero_grads = True restarter = Restarter("world_model", output_dir, world_model_steps_num) if restarter.should_skip: return world_model_steps_num with restarter.training_loop(): train_supervised( problem=env, model_name=hparams.generative_model, hparams=model_hparams, data_dir=data_dir, output_dir=output_dir, train_steps=restarter.target_global_step, eval_steps=100, local_eval_frequency=2000 ) return world_model_steps_num
python
def train_world_model( env, data_dir, output_dir, hparams, world_model_steps_num, epoch ): """Train the world model on problem_name.""" world_model_steps_num += world_model_step_increment( hparams, is_initial_epoch=(epoch == 0) ) model_hparams = trainer_lib.create_hparams(hparams.generative_model_params) model_hparams.learning_rate = model_hparams.learning_rate_constant if epoch > 0: model_hparams.learning_rate *= hparams.learning_rate_bump if hparams.wm_policy_param_sharing: model_hparams.optimizer_zero_grads = True restarter = Restarter("world_model", output_dir, world_model_steps_num) if restarter.should_skip: return world_model_steps_num with restarter.training_loop(): train_supervised( problem=env, model_name=hparams.generative_model, hparams=model_hparams, data_dir=data_dir, output_dir=output_dir, train_steps=restarter.target_global_step, eval_steps=100, local_eval_frequency=2000 ) return world_model_steps_num
[ "def", "train_world_model", "(", "env", ",", "data_dir", ",", "output_dir", ",", "hparams", ",", "world_model_steps_num", ",", "epoch", ")", ":", "world_model_steps_num", "+=", "world_model_step_increment", "(", "hparams", ",", "is_initial_epoch", "=", "(", "epoch", "==", "0", ")", ")", "model_hparams", "=", "trainer_lib", ".", "create_hparams", "(", "hparams", ".", "generative_model_params", ")", "model_hparams", ".", "learning_rate", "=", "model_hparams", ".", "learning_rate_constant", "if", "epoch", ">", "0", ":", "model_hparams", ".", "learning_rate", "*=", "hparams", ".", "learning_rate_bump", "if", "hparams", ".", "wm_policy_param_sharing", ":", "model_hparams", ".", "optimizer_zero_grads", "=", "True", "restarter", "=", "Restarter", "(", "\"world_model\"", ",", "output_dir", ",", "world_model_steps_num", ")", "if", "restarter", ".", "should_skip", ":", "return", "world_model_steps_num", "with", "restarter", ".", "training_loop", "(", ")", ":", "train_supervised", "(", "problem", "=", "env", ",", "model_name", "=", "hparams", ".", "generative_model", ",", "hparams", "=", "model_hparams", ",", "data_dir", "=", "data_dir", ",", "output_dir", "=", "output_dir", ",", "train_steps", "=", "restarter", ".", "target_global_step", ",", "eval_steps", "=", "100", ",", "local_eval_frequency", "=", "2000", ")", "return", "world_model_steps_num" ]
Train the world model on problem_name.
[ "Train", "the", "world", "model", "on", "problem_name", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L199-L228
22,717
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based.py
load_metrics
def load_metrics(event_dir, epoch): """Loads metrics for this epoch if they have already been written. This reads the entire event file but it's small with just per-epoch metrics. Args: event_dir: TODO(koz4k): Document this. epoch: TODO(koz4k): Document this. Returns: metrics. """ metrics = {} for filename in tf.gfile.ListDirectory(event_dir): path = os.path.join(event_dir, filename) for event in tf.train.summary_iterator(path): if event.step == epoch and event.HasField("summary"): value = event.summary.value[0] metrics[value.tag] = value.simple_value return metrics
python
def load_metrics(event_dir, epoch): """Loads metrics for this epoch if they have already been written. This reads the entire event file but it's small with just per-epoch metrics. Args: event_dir: TODO(koz4k): Document this. epoch: TODO(koz4k): Document this. Returns: metrics. """ metrics = {} for filename in tf.gfile.ListDirectory(event_dir): path = os.path.join(event_dir, filename) for event in tf.train.summary_iterator(path): if event.step == epoch and event.HasField("summary"): value = event.summary.value[0] metrics[value.tag] = value.simple_value return metrics
[ "def", "load_metrics", "(", "event_dir", ",", "epoch", ")", ":", "metrics", "=", "{", "}", "for", "filename", "in", "tf", ".", "gfile", ".", "ListDirectory", "(", "event_dir", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "event_dir", ",", "filename", ")", "for", "event", "in", "tf", ".", "train", ".", "summary_iterator", "(", "path", ")", ":", "if", "event", ".", "step", "==", "epoch", "and", "event", ".", "HasField", "(", "\"summary\"", ")", ":", "value", "=", "event", ".", "summary", ".", "value", "[", "0", "]", "metrics", "[", "value", ".", "tag", "]", "=", "value", ".", "simple_value", "return", "metrics" ]
Loads metrics for this epoch if they have already been written. This reads the entire event file but it's small with just per-epoch metrics. Args: event_dir: TODO(koz4k): Document this. epoch: TODO(koz4k): Document this. Returns: metrics.
[ "Loads", "metrics", "for", "this", "epoch", "if", "they", "have", "already", "been", "written", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based.py#L231-L250
22,718
tensorflow/tensor2tensor
tensor2tensor/models/research/gene_expression.py
conv_layer
def conv_layer(x, hidden_size, kernel_size, stride, pooling_window, dropout_rate, dilation_rate, name="conv"): """Single conv layer with relu, optional pooling, and dropout.""" with tf.variable_scope(name): out = x out = common_layers.conv1d_block( out, hidden_size, [(dilation_rate, kernel_size)], strides=stride, first_relu=False, padding="same") out = tf.nn.relu(out) if pooling_window: out = tf.layers.max_pooling1d( out, pooling_window, pooling_window, padding="same") out = tf.layers.dropout(out, dropout_rate) return out
python
def conv_layer(x, hidden_size, kernel_size, stride, pooling_window, dropout_rate, dilation_rate, name="conv"): """Single conv layer with relu, optional pooling, and dropout.""" with tf.variable_scope(name): out = x out = common_layers.conv1d_block( out, hidden_size, [(dilation_rate, kernel_size)], strides=stride, first_relu=False, padding="same") out = tf.nn.relu(out) if pooling_window: out = tf.layers.max_pooling1d( out, pooling_window, pooling_window, padding="same") out = tf.layers.dropout(out, dropout_rate) return out
[ "def", "conv_layer", "(", "x", ",", "hidden_size", ",", "kernel_size", ",", "stride", ",", "pooling_window", ",", "dropout_rate", ",", "dilation_rate", ",", "name", "=", "\"conv\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "out", "=", "x", "out", "=", "common_layers", ".", "conv1d_block", "(", "out", ",", "hidden_size", ",", "[", "(", "dilation_rate", ",", "kernel_size", ")", "]", ",", "strides", "=", "stride", ",", "first_relu", "=", "False", ",", "padding", "=", "\"same\"", ")", "out", "=", "tf", ".", "nn", ".", "relu", "(", "out", ")", "if", "pooling_window", ":", "out", "=", "tf", ".", "layers", ".", "max_pooling1d", "(", "out", ",", "pooling_window", ",", "pooling_window", ",", "padding", "=", "\"same\"", ")", "out", "=", "tf", ".", "layers", ".", "dropout", "(", "out", ",", "dropout_rate", ")", "return", "out" ]
Single conv layer with relu, optional pooling, and dropout.
[ "Single", "conv", "layer", "with", "relu", "optional", "pooling", "and", "dropout", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/gene_expression.py#L92-L114
22,719
tensorflow/tensor2tensor
tensor2tensor/models/research/gene_expression.py
gene_expression_conv_base
def gene_expression_conv_base(): """Hparams for GeneExpressionConv model.""" hparams = common_hparams.basic_params1() batch_size = 10 output_length = 2048 inputs_per_output = 128 chunk_size = 4 input_length = output_length * inputs_per_output // chunk_size hparams.batch_size = input_length * batch_size hparams.dropout = 0.1 hparams.add_hparam("num_conv_layers", 4) hparams.add_hparam("num_dconv_layers", 7) # The product of these pooling windows should match # input_length/target_length. hparams.add_hparam("pooling_windows", [2, 2, 2, 4]) hparams.hidden_size = 256 hparams.kernel_width = 20 hparams.add_hparam("stride", 1) return hparams
python
def gene_expression_conv_base(): """Hparams for GeneExpressionConv model.""" hparams = common_hparams.basic_params1() batch_size = 10 output_length = 2048 inputs_per_output = 128 chunk_size = 4 input_length = output_length * inputs_per_output // chunk_size hparams.batch_size = input_length * batch_size hparams.dropout = 0.1 hparams.add_hparam("num_conv_layers", 4) hparams.add_hparam("num_dconv_layers", 7) # The product of these pooling windows should match # input_length/target_length. hparams.add_hparam("pooling_windows", [2, 2, 2, 4]) hparams.hidden_size = 256 hparams.kernel_width = 20 hparams.add_hparam("stride", 1) return hparams
[ "def", "gene_expression_conv_base", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "batch_size", "=", "10", "output_length", "=", "2048", "inputs_per_output", "=", "128", "chunk_size", "=", "4", "input_length", "=", "output_length", "*", "inputs_per_output", "//", "chunk_size", "hparams", ".", "batch_size", "=", "input_length", "*", "batch_size", "hparams", ".", "dropout", "=", "0.1", "hparams", ".", "add_hparam", "(", "\"num_conv_layers\"", ",", "4", ")", "hparams", ".", "add_hparam", "(", "\"num_dconv_layers\"", ",", "7", ")", "# The product of these pooling windows should match", "# input_length/target_length.", "hparams", ".", "add_hparam", "(", "\"pooling_windows\"", ",", "[", "2", ",", "2", ",", "2", ",", "4", "]", ")", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "kernel_width", "=", "20", "hparams", ".", "add_hparam", "(", "\"stride\"", ",", "1", ")", "return", "hparams" ]
Hparams for GeneExpressionConv model.
[ "Hparams", "for", "GeneExpressionConv", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/gene_expression.py#L128-L149
22,720
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
compress_self_attention_layer
def compress_self_attention_layer(x, hparams, name=None): """Attend function.""" with tf.variable_scope(name, default_name="compress_self_attention"): x, xshape, _ = cia.maybe_reshape_4d_to_3d(x) y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) res = common_layers.layer_postprocess(x, y, hparams) return tf.reshape(res, xshape)
python
def compress_self_attention_layer(x, hparams, name=None): """Attend function.""" with tf.variable_scope(name, default_name="compress_self_attention"): x, xshape, _ = cia.maybe_reshape_4d_to_3d(x) y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) res = common_layers.layer_postprocess(x, y, hparams) return tf.reshape(res, xshape)
[ "def", "compress_self_attention_layer", "(", "x", ",", "hparams", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"compress_self_attention\"", ")", ":", "x", ",", "xshape", ",", "_", "=", "cia", ".", "maybe_reshape_4d_to_3d", "(", "x", ")", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "None", ",", "None", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ")", "res", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "return", "tf", ".", "reshape", "(", "res", ",", "xshape", ")" ]
Attend function.
[ "Attend", "function", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L35-L48
22,721
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
compute_nats_and_bits_per_dim
def compute_nats_and_bits_per_dim(data_dim, latent_dim, average_reconstruction, average_prior): """Computes negative ELBO, which is an upper bound on the negative likelihood. Args: data_dim: int-like indicating data dimensionality. latent_dim: int-like indicating latent dimensionality. average_reconstruction: Scalar Tensor indicating the reconstruction cost averaged over all data dimensions and any data batches. average_prior: Scalar Tensor indicating the negative log-prior probability averaged over all latent dimensions and any data batches. Returns: Tuple of scalar Tensors, representing the nats and bits per data dimension (e.g., subpixels) respectively. """ with tf.name_scope(None, default_name="compute_nats_per_dim"): data_dim = tf.cast(data_dim, average_reconstruction.dtype) latent_dim = tf.cast(latent_dim, average_prior.dtype) negative_log_likelihood = data_dim * average_reconstruction negative_log_prior = latent_dim * average_prior negative_elbo = negative_log_likelihood + negative_log_prior nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim") bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim") return nats_per_dim, bits_per_dim
python
def compute_nats_and_bits_per_dim(data_dim, latent_dim, average_reconstruction, average_prior): """Computes negative ELBO, which is an upper bound on the negative likelihood. Args: data_dim: int-like indicating data dimensionality. latent_dim: int-like indicating latent dimensionality. average_reconstruction: Scalar Tensor indicating the reconstruction cost averaged over all data dimensions and any data batches. average_prior: Scalar Tensor indicating the negative log-prior probability averaged over all latent dimensions and any data batches. Returns: Tuple of scalar Tensors, representing the nats and bits per data dimension (e.g., subpixels) respectively. """ with tf.name_scope(None, default_name="compute_nats_per_dim"): data_dim = tf.cast(data_dim, average_reconstruction.dtype) latent_dim = tf.cast(latent_dim, average_prior.dtype) negative_log_likelihood = data_dim * average_reconstruction negative_log_prior = latent_dim * average_prior negative_elbo = negative_log_likelihood + negative_log_prior nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim") bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim") return nats_per_dim, bits_per_dim
[ "def", "compute_nats_and_bits_per_dim", "(", "data_dim", ",", "latent_dim", ",", "average_reconstruction", ",", "average_prior", ")", ":", "with", "tf", ".", "name_scope", "(", "None", ",", "default_name", "=", "\"compute_nats_per_dim\"", ")", ":", "data_dim", "=", "tf", ".", "cast", "(", "data_dim", ",", "average_reconstruction", ".", "dtype", ")", "latent_dim", "=", "tf", ".", "cast", "(", "latent_dim", ",", "average_prior", ".", "dtype", ")", "negative_log_likelihood", "=", "data_dim", "*", "average_reconstruction", "negative_log_prior", "=", "latent_dim", "*", "average_prior", "negative_elbo", "=", "negative_log_likelihood", "+", "negative_log_prior", "nats_per_dim", "=", "tf", ".", "divide", "(", "negative_elbo", ",", "data_dim", ",", "name", "=", "\"nats_per_dim\"", ")", "bits_per_dim", "=", "tf", ".", "divide", "(", "nats_per_dim", ",", "tf", ".", "log", "(", "2.", ")", ",", "name", "=", "\"bits_per_dim\"", ")", "return", "nats_per_dim", ",", "bits_per_dim" ]
Computes negative ELBO, which is an upper bound on the negative likelihood. Args: data_dim: int-like indicating data dimensionality. latent_dim: int-like indicating latent dimensionality. average_reconstruction: Scalar Tensor indicating the reconstruction cost averaged over all data dimensions and any data batches. average_prior: Scalar Tensor indicating the negative log-prior probability averaged over all latent dimensions and any data batches. Returns: Tuple of scalar Tensors, representing the nats and bits per data dimension (e.g., subpixels) respectively.
[ "Computes", "negative", "ELBO", "which", "is", "an", "upper", "bound", "on", "the", "negative", "likelihood", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L51-L77
22,722
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
multinomial_sample
def multinomial_sample(x, vocab_size=None, sampling_method="random", temperature=1.0): """Multinomial sampling from a n-dimensional tensor. Args: x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial. vocab_size: Number of classes in multinomial distribution. sampling_method: String, "random" or otherwise deterministic. temperature: Positive float. Returns: Tensor of shape [...]. """ vocab_size = vocab_size or common_layers.shape_list(x)[-1] if sampling_method == "random" and temperature > 0.0: samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1) else: samples = tf.argmax(x, axis=-1) reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) return reshaped_samples
python
def multinomial_sample(x, vocab_size=None, sampling_method="random", temperature=1.0): """Multinomial sampling from a n-dimensional tensor. Args: x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial. vocab_size: Number of classes in multinomial distribution. sampling_method: String, "random" or otherwise deterministic. temperature: Positive float. Returns: Tensor of shape [...]. """ vocab_size = vocab_size or common_layers.shape_list(x)[-1] if sampling_method == "random" and temperature > 0.0: samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1) else: samples = tf.argmax(x, axis=-1) reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) return reshaped_samples
[ "def", "multinomial_sample", "(", "x", ",", "vocab_size", "=", "None", ",", "sampling_method", "=", "\"random\"", ",", "temperature", "=", "1.0", ")", ":", "vocab_size", "=", "vocab_size", "or", "common_layers", ".", "shape_list", "(", "x", ")", "[", "-", "1", "]", "if", "sampling_method", "==", "\"random\"", "and", "temperature", ">", "0.0", ":", "samples", "=", "tf", ".", "multinomial", "(", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "vocab_size", "]", ")", "/", "temperature", ",", "1", ")", "else", ":", "samples", "=", "tf", ".", "argmax", "(", "x", ",", "axis", "=", "-", "1", ")", "reshaped_samples", "=", "tf", ".", "reshape", "(", "samples", ",", "common_layers", ".", "shape_list", "(", "x", ")", "[", ":", "-", "1", "]", ")", "return", "reshaped_samples" ]
Multinomial sampling from a n-dimensional tensor. Args: x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial. vocab_size: Number of classes in multinomial distribution. sampling_method: String, "random" or otherwise deterministic. temperature: Positive float. Returns: Tensor of shape [...].
[ "Multinomial", "sampling", "from", "a", "n", "-", "dimensional", "tensor", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L80-L99
22,723
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
ae_latent_sample_beam
def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams): """Samples from the latent space in the autoencoder. Args: latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of its first two dimensions are used. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings to attend to in decoder. ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. embed: Callable which embeds discrete latent hot-vectors and a hidden size and returns dense vectors. hparams: HParams. Returns: Tensor of shape [batch, length]. """ def symbols_to_logits_fn(ids): """Go from ids to logits.""" ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros. latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]]) with tf.variable_scope(tf.get_variable_scope(), reuse=False): latents_dense = embed( tf.one_hot(latents_discrete, depth=2**hparams.bottleneck_bits), hparams.hidden_size) latents_pred = transformer_latent_decoder( latents_dense, inputs, ed, hparams, name="latent_prediction") logits = tf.layers.dense( latents_pred, 2**hparams.bottleneck_bits, name="logits_dense") current_output_position = common_layers.shape_list(ids)[1] - 1 logits = logits[:, current_output_position, :] return logits initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32) length = tf.shape(latents_dense_in)[1] ids, _, _ = beam_search.beam_search( symbols_to_logits_fn, initial_ids, 1, length, 2**hparams.bottleneck_bits, alpha=0.0, eos_id=-1, stop_early=False) res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam. return res[:, 1:]
python
def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams): """Samples from the latent space in the autoencoder. Args: latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of its first two dimensions are used. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings to attend to in decoder. ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. embed: Callable which embeds discrete latent hot-vectors and a hidden size and returns dense vectors. hparams: HParams. Returns: Tensor of shape [batch, length]. """ def symbols_to_logits_fn(ids): """Go from ids to logits.""" ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros. latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]]) with tf.variable_scope(tf.get_variable_scope(), reuse=False): latents_dense = embed( tf.one_hot(latents_discrete, depth=2**hparams.bottleneck_bits), hparams.hidden_size) latents_pred = transformer_latent_decoder( latents_dense, inputs, ed, hparams, name="latent_prediction") logits = tf.layers.dense( latents_pred, 2**hparams.bottleneck_bits, name="logits_dense") current_output_position = common_layers.shape_list(ids)[1] - 1 logits = logits[:, current_output_position, :] return logits initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32) length = tf.shape(latents_dense_in)[1] ids, _, _ = beam_search.beam_search( symbols_to_logits_fn, initial_ids, 1, length, 2**hparams.bottleneck_bits, alpha=0.0, eos_id=-1, stop_early=False) res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam. return res[:, 1:]
[ "def", "ae_latent_sample_beam", "(", "latents_dense_in", ",", "inputs", ",", "ed", ",", "embed", ",", "hparams", ")", ":", "def", "symbols_to_logits_fn", "(", "ids", ")", ":", "\"\"\"Go from ids to logits.\"\"\"", "ids", "=", "tf", ".", "expand_dims", "(", "ids", ",", "axis", "=", "2", ")", "# Ids start with added all-zeros.", "latents_discrete", "=", "tf", ".", "pad", "(", "ids", "[", ":", ",", "1", ":", "]", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "1", "]", ",", "[", "0", ",", "0", "]", "]", ")", "with", "tf", ".", "variable_scope", "(", "tf", ".", "get_variable_scope", "(", ")", ",", "reuse", "=", "False", ")", ":", "latents_dense", "=", "embed", "(", "tf", ".", "one_hot", "(", "latents_discrete", ",", "depth", "=", "2", "**", "hparams", ".", "bottleneck_bits", ")", ",", "hparams", ".", "hidden_size", ")", "latents_pred", "=", "transformer_latent_decoder", "(", "latents_dense", ",", "inputs", ",", "ed", ",", "hparams", ",", "name", "=", "\"latent_prediction\"", ")", "logits", "=", "tf", ".", "layers", ".", "dense", "(", "latents_pred", ",", "2", "**", "hparams", ".", "bottleneck_bits", ",", "name", "=", "\"logits_dense\"", ")", "current_output_position", "=", "common_layers", ".", "shape_list", "(", "ids", ")", "[", "1", "]", "-", "1", "logits", "=", "logits", "[", ":", ",", "current_output_position", ",", ":", "]", "return", "logits", "initial_ids", "=", "tf", ".", "zeros", "(", "[", "tf", ".", "shape", "(", "latents_dense_in", ")", "[", "0", "]", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "length", "=", "tf", ".", "shape", "(", "latents_dense_in", ")", "[", "1", "]", "ids", ",", "_", ",", "_", "=", "beam_search", ".", "beam_search", "(", "symbols_to_logits_fn", ",", "initial_ids", ",", "1", ",", "length", ",", "2", "**", "hparams", ".", "bottleneck_bits", ",", "alpha", "=", "0.0", ",", "eos_id", "=", "-", "1", ",", "stop_early", "=", "False", ")", "res", "=", "tf", ".", "expand_dims", "(", "ids", "[", ":", ",", "0", ",", ":", "]", ",", "axis", "=", "2", ")", "# Pick first beam.", "return", "res", "[", ":", ",", "1", ":", "]" ]
Samples from the latent space in the autoencoder. Args: latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of its first two dimensions are used. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings to attend to in decoder. ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. embed: Callable which embeds discrete latent hot-vectors and a hidden size and returns dense vectors. hparams: HParams. Returns: Tensor of shape [batch, length].
[ "Samples", "from", "the", "latent", "space", "in", "the", "autoencoder", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L133-L182
22,724
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
residual_block_layer
def residual_block_layer(inputs, hparams): """Residual block over inputs. Runs a residual block consisting of conv: kernel_size x kernel_size conv: 1x1 dropout, add and normalize according to hparams.layer_postprocess_sequence. Args: inputs: Tensor of shape [batch, height, width, hparams.hidden_size]. hparams: HParams. Returns: Tensor of shape [batch, height, width, hparams.hidden_size]. """ kernel = (hparams.res_kernel_size, hparams.res_kernel_size) x = inputs for i in range(hparams.num_res_layers): with tf.variable_scope("res_conv_%d" % i): # kernel_size x kernel_size conv block y = common_layers.conv_block( common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, [((1, 1), kernel)], strides=(1, 1), padding="SAME", name="residual_conv") # 1x1 conv block y = common_layers.conv_block( y, hparams.hidden_size, [((1, 1), (1, 1))], strides=(1, 1), padding="SAME", name="residual_dense") x = common_layers.layer_postprocess(x, y, hparams) return x
python
def residual_block_layer(inputs, hparams): """Residual block over inputs. Runs a residual block consisting of conv: kernel_size x kernel_size conv: 1x1 dropout, add and normalize according to hparams.layer_postprocess_sequence. Args: inputs: Tensor of shape [batch, height, width, hparams.hidden_size]. hparams: HParams. Returns: Tensor of shape [batch, height, width, hparams.hidden_size]. """ kernel = (hparams.res_kernel_size, hparams.res_kernel_size) x = inputs for i in range(hparams.num_res_layers): with tf.variable_scope("res_conv_%d" % i): # kernel_size x kernel_size conv block y = common_layers.conv_block( common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, [((1, 1), kernel)], strides=(1, 1), padding="SAME", name="residual_conv") # 1x1 conv block y = common_layers.conv_block( y, hparams.hidden_size, [((1, 1), (1, 1))], strides=(1, 1), padding="SAME", name="residual_dense") x = common_layers.layer_postprocess(x, y, hparams) return x
[ "def", "residual_block_layer", "(", "inputs", ",", "hparams", ")", ":", "kernel", "=", "(", "hparams", ".", "res_kernel_size", ",", "hparams", ".", "res_kernel_size", ")", "x", "=", "inputs", "for", "i", "in", "range", "(", "hparams", ".", "num_res_layers", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"res_conv_%d\"", "%", "i", ")", ":", "# kernel_size x kernel_size conv block", "y", "=", "common_layers", ".", "conv_block", "(", "common_layers", ".", "layer_norm", "(", "x", ",", "hparams", ".", "hidden_size", ",", "name", "=", "\"lnorm\"", ")", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "kernel", ")", "]", ",", "strides", "=", "(", "1", ",", "1", ")", ",", "padding", "=", "\"SAME\"", ",", "name", "=", "\"residual_conv\"", ")", "# 1x1 conv block", "y", "=", "common_layers", ".", "conv_block", "(", "y", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "1", ",", "1", ")", ")", "]", ",", "strides", "=", "(", "1", ",", "1", ")", ",", "padding", "=", "\"SAME\"", ",", "name", "=", "\"residual_dense\"", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "return", "x" ]
Residual block over inputs. Runs a residual block consisting of conv: kernel_size x kernel_size conv: 1x1 dropout, add and normalize according to hparams.layer_postprocess_sequence. Args: inputs: Tensor of shape [batch, height, width, hparams.hidden_size]. hparams: HParams. Returns: Tensor of shape [batch, height, width, hparams.hidden_size].
[ "Residual", "block", "over", "inputs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L185-L219
22,725
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
transformer_text_encoder
def transformer_text_encoder(inputs, target_space, hparams, name=None): """Transformer text encoder over inputs with unmasked full attention. Args: inputs: Tensor of shape [batch, length, 1, hparams.hidden_size]. target_space: int. Used for encoding inputs under a target space id. hparams: HParams. name: string, variable scope. Returns: encoder_output: Tensor of shape [batch, length, hparams.hidden_size]. ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias for any padded tokens. """ with tf.variable_scope(name, default_name="transformer_text_encoder"): inputs = common_layers.flatten4d3d(inputs) [ encoder_input, encoder_self_attention_bias, ed, ] = transformer_layers.transformer_prepare_encoder( inputs, target_space=target_space, hparams=hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) encoder_output = transformer_layers.transformer_encoder( encoder_input, encoder_self_attention_bias, hparams) return encoder_output, ed
python
def transformer_text_encoder(inputs, target_space, hparams, name=None): """Transformer text encoder over inputs with unmasked full attention. Args: inputs: Tensor of shape [batch, length, 1, hparams.hidden_size]. target_space: int. Used for encoding inputs under a target space id. hparams: HParams. name: string, variable scope. Returns: encoder_output: Tensor of shape [batch, length, hparams.hidden_size]. ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias for any padded tokens. """ with tf.variable_scope(name, default_name="transformer_text_encoder"): inputs = common_layers.flatten4d3d(inputs) [ encoder_input, encoder_self_attention_bias, ed, ] = transformer_layers.transformer_prepare_encoder( inputs, target_space=target_space, hparams=hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) encoder_output = transformer_layers.transformer_encoder( encoder_input, encoder_self_attention_bias, hparams) return encoder_output, ed
[ "def", "transformer_text_encoder", "(", "inputs", ",", "target_space", ",", "hparams", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"transformer_text_encoder\"", ")", ":", "inputs", "=", "common_layers", ".", "flatten4d3d", "(", "inputs", ")", "[", "encoder_input", ",", "encoder_self_attention_bias", ",", "ed", ",", "]", "=", "transformer_layers", ".", "transformer_prepare_encoder", "(", "inputs", ",", "target_space", "=", "target_space", ",", "hparams", "=", "hparams", ")", "encoder_input", "=", "tf", ".", "nn", ".", "dropout", "(", "encoder_input", ",", "1.0", "-", "hparams", ".", "dropout", ")", "encoder_output", "=", "transformer_layers", ".", "transformer_encoder", "(", "encoder_input", ",", "encoder_self_attention_bias", ",", "hparams", ")", "return", "encoder_output", ",", "ed" ]
Transformer text encoder over inputs with unmasked full attention. Args: inputs: Tensor of shape [batch, length, 1, hparams.hidden_size]. target_space: int. Used for encoding inputs under a target space id. hparams: HParams. name: string, variable scope. Returns: encoder_output: Tensor of shape [batch, length, hparams.hidden_size]. ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias for any padded tokens.
[ "Transformer", "text", "encoder", "over", "inputs", "with", "unmasked", "full", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L391-L419
22,726
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
transformer_image_decoder
def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None): """Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size]. """ with tf.variable_scope(name, default_name="transformer_dec"): batch_size = common_layers.shape_list(targets)[0] targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, hparams.num_channels * hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(targets, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_decoder_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, hparams.img_len * hparams.num_channels, hparams.hidden_size]) return decoder_output
python
def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None): """Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size]. """ with tf.variable_scope(name, default_name="transformer_dec"): batch_size = common_layers.shape_list(targets)[0] targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, hparams.num_channels * hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(targets, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_decoder_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, hparams.img_len * hparams.num_channels, hparams.hidden_size]) return decoder_output
[ "def", "transformer_image_decoder", "(", "targets", ",", "encoder_output", ",", "ed_attention_bias", ",", "hparams", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"transformer_dec\"", ")", ":", "batch_size", "=", "common_layers", ".", "shape_list", "(", "targets", ")", "[", "0", "]", "targets", "=", "tf", ".", "reshape", "(", "targets", ",", "[", "batch_size", ",", "hparams", ".", "img_len", ",", "hparams", ".", "img_len", ",", "hparams", ".", "num_channels", "*", "hparams", ".", "hidden_size", "]", ")", "decoder_input", ",", "_", ",", "_", "=", "cia", ".", "prepare_decoder", "(", "targets", ",", "hparams", ")", "decoder_output", "=", "cia", ".", "transformer_decoder_layers", "(", "decoder_input", ",", "encoder_output", ",", "hparams", ".", "num_decoder_layers", "or", "hparams", ".", "num_hidden_layers", ",", "hparams", ",", "attention_type", "=", "hparams", ".", "dec_attention_type", ",", "encoder_decoder_attention_bias", "=", "ed_attention_bias", ",", "name", "=", "\"decoder\"", ")", "decoder_output", "=", "tf", ".", "reshape", "(", "decoder_output", ",", "[", "batch_size", ",", "hparams", ".", "img_len", ",", "hparams", ".", "img_len", "*", "hparams", ".", "num_channels", ",", "hparams", ".", "hidden_size", "]", ")", "return", "decoder_output" ]
Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size].
[ "Transformer", "image", "decoder", "over", "targets", "with", "local", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L422-L462
22,727
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
transformer_latent_decoder
def transformer_latent_decoder(x, encoder_output, ed_attention_bias, hparams, name=None): """Transformer decoder over latents using latent_attention_type. Args: x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length_q, hparams.hidden_size]. """ with tf.variable_scope(name, default_name="transformer_latent_dec"): batch_size = common_layers.shape_list(x)[0] compressed_img_len = (hparams.img_len // 2**(hparams.num_compress_steps // 2)) x = tf.reshape(x, [batch_size, compressed_img_len, compressed_img_len * hparams.num_latents, hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(x, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_latent_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.latent_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, compressed_img_len**2 * hparams.num_latents, hparams.hidden_size]) return decoder_output
python
def transformer_latent_decoder(x, encoder_output, ed_attention_bias, hparams, name=None): """Transformer decoder over latents using latent_attention_type. Args: x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length_q, hparams.hidden_size]. """ with tf.variable_scope(name, default_name="transformer_latent_dec"): batch_size = common_layers.shape_list(x)[0] compressed_img_len = (hparams.img_len // 2**(hparams.num_compress_steps // 2)) x = tf.reshape(x, [batch_size, compressed_img_len, compressed_img_len * hparams.num_latents, hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(x, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_latent_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.latent_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, compressed_img_len**2 * hparams.num_latents, hparams.hidden_size]) return decoder_output
[ "def", "transformer_latent_decoder", "(", "x", ",", "encoder_output", ",", "ed_attention_bias", ",", "hparams", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"transformer_latent_dec\"", ")", ":", "batch_size", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "0", "]", "compressed_img_len", "=", "(", "hparams", ".", "img_len", "//", "2", "**", "(", "hparams", ".", "num_compress_steps", "//", "2", ")", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch_size", ",", "compressed_img_len", ",", "compressed_img_len", "*", "hparams", ".", "num_latents", ",", "hparams", ".", "hidden_size", "]", ")", "decoder_input", ",", "_", ",", "_", "=", "cia", ".", "prepare_decoder", "(", "x", ",", "hparams", ")", "decoder_output", "=", "cia", ".", "transformer_decoder_layers", "(", "decoder_input", ",", "encoder_output", ",", "hparams", ".", "num_latent_layers", "or", "hparams", ".", "num_hidden_layers", ",", "hparams", ",", "attention_type", "=", "hparams", ".", "latent_attention_type", ",", "encoder_decoder_attention_bias", "=", "ed_attention_bias", ",", "name", "=", "\"decoder\"", ")", "decoder_output", "=", "tf", ".", "reshape", "(", "decoder_output", ",", "[", "batch_size", ",", "compressed_img_len", "**", "2", "*", "hparams", ".", "num_latents", ",", "hparams", ".", "hidden_size", "]", ")", "return", "decoder_output" ]
Transformer decoder over latents using latent_attention_type. Args: x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length_q, hparams.hidden_size].
[ "Transformer", "decoder", "over", "latents", "using", "latent_attention_type", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L465-L506
22,728
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
latent_prediction_model
def latent_prediction_model(inputs, ed_attention_bias, latents_discrete, latents_dense, hparams, vocab_size=None, name=None): """Transformer-based latent prediction model. It is an autoregressive decoder over latents_discrete given inputs. Args: inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to attend to for the decoder on latents. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. latents_discrete: Tensor of shape [batch, length_q, vocab_size]. One-hot latents to compute log-probability of given inputs. latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). hparams: HParams. vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits. name: string, variable scope. Returns: latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size]. latents_pred_loss: Tensor of shape [batch, length_q]. """ with tf.variable_scope(name, default_name="latent_prediction"): if hparams.mode != tf.estimator.ModeKeys.PREDICT: latents_pred = transformer_latent_decoder(tf.stop_gradient(latents_dense), inputs, ed_attention_bias, hparams, name) if vocab_size is None: vocab_size = 2**hparams.bottleneck_bits if not hparams.soft_em: # TODO(trandustin): latents_discrete is not one-hot from # discrete_bottleneck unless hparams.soft_em is True. Refactor. latents_discrete = tf.one_hot(latents_discrete, depth=vocab_size) _, latent_pred_loss = ae_latent_softmax( latents_pred, tf.stop_gradient(latents_discrete), vocab_size, hparams) return latents_pred, latent_pred_loss
python
def latent_prediction_model(inputs, ed_attention_bias, latents_discrete, latents_dense, hparams, vocab_size=None, name=None): """Transformer-based latent prediction model. It is an autoregressive decoder over latents_discrete given inputs. Args: inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to attend to for the decoder on latents. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. latents_discrete: Tensor of shape [batch, length_q, vocab_size]. One-hot latents to compute log-probability of given inputs. latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). hparams: HParams. vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits. name: string, variable scope. Returns: latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size]. latents_pred_loss: Tensor of shape [batch, length_q]. """ with tf.variable_scope(name, default_name="latent_prediction"): if hparams.mode != tf.estimator.ModeKeys.PREDICT: latents_pred = transformer_latent_decoder(tf.stop_gradient(latents_dense), inputs, ed_attention_bias, hparams, name) if vocab_size is None: vocab_size = 2**hparams.bottleneck_bits if not hparams.soft_em: # TODO(trandustin): latents_discrete is not one-hot from # discrete_bottleneck unless hparams.soft_em is True. Refactor. latents_discrete = tf.one_hot(latents_discrete, depth=vocab_size) _, latent_pred_loss = ae_latent_softmax( latents_pred, tf.stop_gradient(latents_discrete), vocab_size, hparams) return latents_pred, latent_pred_loss
[ "def", "latent_prediction_model", "(", "inputs", ",", "ed_attention_bias", ",", "latents_discrete", ",", "latents_dense", ",", "hparams", ",", "vocab_size", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"latent_prediction\"", ")", ":", "if", "hparams", ".", "mode", "!=", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ":", "latents_pred", "=", "transformer_latent_decoder", "(", "tf", ".", "stop_gradient", "(", "latents_dense", ")", ",", "inputs", ",", "ed_attention_bias", ",", "hparams", ",", "name", ")", "if", "vocab_size", "is", "None", ":", "vocab_size", "=", "2", "**", "hparams", ".", "bottleneck_bits", "if", "not", "hparams", ".", "soft_em", ":", "# TODO(trandustin): latents_discrete is not one-hot from", "# discrete_bottleneck unless hparams.soft_em is True. Refactor.", "latents_discrete", "=", "tf", ".", "one_hot", "(", "latents_discrete", ",", "depth", "=", "vocab_size", ")", "_", ",", "latent_pred_loss", "=", "ae_latent_softmax", "(", "latents_pred", ",", "tf", ".", "stop_gradient", "(", "latents_discrete", ")", ",", "vocab_size", ",", "hparams", ")", "return", "latents_pred", ",", "latent_pred_loss" ]
Transformer-based latent prediction model. It is an autoregressive decoder over latents_discrete given inputs. Args: inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to attend to for the decoder on latents. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. latents_discrete: Tensor of shape [batch, length_q, vocab_size]. One-hot latents to compute log-probability of given inputs. latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). hparams: HParams. vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits. name: string, variable scope. Returns: latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size]. latents_pred_loss: Tensor of shape [batch, length_q].
[ "Transformer", "-", "based", "latent", "prediction", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L529-L573
22,729
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
iaf_flow
def iaf_flow(one_hot_assignments, scale_weights, scale_bias, num_codes, summary=True, name=None): """Performs a single IAF flow using scale and normalization transformations. Args: one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size, latent_size, num_codes]. scale_weights: Tensor corresponding to lower triangular matrix used to autoregressively generate scale matrix from assignments. To ensure the lower-triangular matrix has length of latent_size, scale_weights should be a rank-one tensor with size latent_size * (latent_size + 1) / 2. scale_bias: Bias tensor to be added to scale tensor, with shape [latent_size, num_codes]. If scale weights are zero, initialize scale_bias to be log(exp(1.) / 2. - 1) so initial transformation is identity. num_codes: Number of codes in codebook. summary: Whether to save summaries. name: String used for name scope. Returns: flow_output: Transformed one-hot assignments. inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding to transformation. """ with tf.name_scope(name, default_name="iaf"): # Pad the one_hot_assignments by zeroing out the first latent dimension and # shifting the rest down by one (and removing the last dimension). padded_assignments = tf.pad( one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :] scale_bijector = tfp.distributions.bijectors.Affine( scale_tril=tfp.distributions.fill_triangular(scale_weights)) scale = scale_bijector.forward( tf.transpose(padded_assignments, [0, 1, 3, 2])) # Transpose the bijector output since it performs a batch matmul. scale = tf.transpose(scale, [0, 1, 3, 2]) scale = tf.nn.softplus(scale) scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...]) # Don't need last dimension since the transformation keeps it constant. scale = scale[..., :-1] z = one_hot_assignments[..., :-1] unnormalized_probs = tf.concat([z * scale, one_hot_assignments[..., -1, tf.newaxis]], axis=-1) normalizer = tf.reduce_sum(unnormalized_probs, axis=-1) flow_output = unnormalized_probs / (normalizer[..., tf.newaxis]) inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1) + num_codes * tf.log(normalizer)) if summary: tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1])) tf.summary.histogram("iaf/inverse_log_det_jacobian", tf.reshape(inverse_log_det_jacobian, [-1])) return flow_output, inverse_log_det_jacobian
python
def iaf_flow(one_hot_assignments, scale_weights, scale_bias, num_codes, summary=True, name=None): """Performs a single IAF flow using scale and normalization transformations. Args: one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size, latent_size, num_codes]. scale_weights: Tensor corresponding to lower triangular matrix used to autoregressively generate scale matrix from assignments. To ensure the lower-triangular matrix has length of latent_size, scale_weights should be a rank-one tensor with size latent_size * (latent_size + 1) / 2. scale_bias: Bias tensor to be added to scale tensor, with shape [latent_size, num_codes]. If scale weights are zero, initialize scale_bias to be log(exp(1.) / 2. - 1) so initial transformation is identity. num_codes: Number of codes in codebook. summary: Whether to save summaries. name: String used for name scope. Returns: flow_output: Transformed one-hot assignments. inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding to transformation. """ with tf.name_scope(name, default_name="iaf"): # Pad the one_hot_assignments by zeroing out the first latent dimension and # shifting the rest down by one (and removing the last dimension). padded_assignments = tf.pad( one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :] scale_bijector = tfp.distributions.bijectors.Affine( scale_tril=tfp.distributions.fill_triangular(scale_weights)) scale = scale_bijector.forward( tf.transpose(padded_assignments, [0, 1, 3, 2])) # Transpose the bijector output since it performs a batch matmul. scale = tf.transpose(scale, [0, 1, 3, 2]) scale = tf.nn.softplus(scale) scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...]) # Don't need last dimension since the transformation keeps it constant. scale = scale[..., :-1] z = one_hot_assignments[..., :-1] unnormalized_probs = tf.concat([z * scale, one_hot_assignments[..., -1, tf.newaxis]], axis=-1) normalizer = tf.reduce_sum(unnormalized_probs, axis=-1) flow_output = unnormalized_probs / (normalizer[..., tf.newaxis]) inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1) + num_codes * tf.log(normalizer)) if summary: tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1])) tf.summary.histogram("iaf/inverse_log_det_jacobian", tf.reshape(inverse_log_det_jacobian, [-1])) return flow_output, inverse_log_det_jacobian
[ "def", "iaf_flow", "(", "one_hot_assignments", ",", "scale_weights", ",", "scale_bias", ",", "num_codes", ",", "summary", "=", "True", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "default_name", "=", "\"iaf\"", ")", ":", "# Pad the one_hot_assignments by zeroing out the first latent dimension and", "# shifting the rest down by one (and removing the last dimension).", "padded_assignments", "=", "tf", ".", "pad", "(", "one_hot_assignments", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "1", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "[", ":", ",", ":", ",", ":", "-", "1", ",", ":", "]", "scale_bijector", "=", "tfp", ".", "distributions", ".", "bijectors", ".", "Affine", "(", "scale_tril", "=", "tfp", ".", "distributions", ".", "fill_triangular", "(", "scale_weights", ")", ")", "scale", "=", "scale_bijector", ".", "forward", "(", "tf", ".", "transpose", "(", "padded_assignments", ",", "[", "0", ",", "1", ",", "3", ",", "2", "]", ")", ")", "# Transpose the bijector output since it performs a batch matmul.", "scale", "=", "tf", ".", "transpose", "(", "scale", ",", "[", "0", ",", "1", ",", "3", ",", "2", "]", ")", "scale", "=", "tf", ".", "nn", ".", "softplus", "(", "scale", ")", "scale", "=", "scale", "+", "tf", ".", "nn", ".", "softplus", "(", "scale_bias", "[", "tf", ".", "newaxis", ",", "tf", ".", "newaxis", ",", "...", "]", ")", "# Don't need last dimension since the transformation keeps it constant.", "scale", "=", "scale", "[", "...", ",", ":", "-", "1", "]", "z", "=", "one_hot_assignments", "[", "...", ",", ":", "-", "1", "]", "unnormalized_probs", "=", "tf", ".", "concat", "(", "[", "z", "*", "scale", ",", "one_hot_assignments", "[", "...", ",", "-", "1", ",", "tf", ".", "newaxis", "]", "]", ",", "axis", "=", "-", "1", ")", "normalizer", "=", "tf", ".", "reduce_sum", "(", "unnormalized_probs", ",", "axis", "=", "-", "1", ")", "flow_output", "=", "unnormalized_probs", "/", "(", "normalizer", "[", "...", ",", "tf", ".", "newaxis", "]", ")", "inverse_log_det_jacobian", "=", "(", "-", "tf", ".", "reduce_sum", "(", "tf", ".", "log", "(", "scale", ")", ",", "axis", "=", "-", "1", ")", "+", "num_codes", "*", "tf", ".", "log", "(", "normalizer", ")", ")", "if", "summary", ":", "tf", ".", "summary", ".", "histogram", "(", "\"iaf/scale\"", ",", "tf", ".", "reshape", "(", "scale", ",", "[", "-", "1", "]", ")", ")", "tf", ".", "summary", ".", "histogram", "(", "\"iaf/inverse_log_det_jacobian\"", ",", "tf", ".", "reshape", "(", "inverse_log_det_jacobian", ",", "[", "-", "1", "]", ")", ")", "return", "flow_output", ",", "inverse_log_det_jacobian" ]
Performs a single IAF flow using scale and normalization transformations. Args: one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size, latent_size, num_codes]. scale_weights: Tensor corresponding to lower triangular matrix used to autoregressively generate scale matrix from assignments. To ensure the lower-triangular matrix has length of latent_size, scale_weights should be a rank-one tensor with size latent_size * (latent_size + 1) / 2. scale_bias: Bias tensor to be added to scale tensor, with shape [latent_size, num_codes]. If scale weights are zero, initialize scale_bias to be log(exp(1.) / 2. - 1) so initial transformation is identity. num_codes: Number of codes in codebook. summary: Whether to save summaries. name: String used for name scope. Returns: flow_output: Transformed one-hot assignments. inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding to transformation.
[ "Performs", "a", "single", "IAF", "flow", "using", "scale", "and", "normalization", "transformations", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L703-L758
22,730
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_lsun.py
_get_lsun
def _get_lsun(directory, category, split_name): """Downloads all lsun files to directory unless they are there.""" generator_utils.maybe_download(directory, _LSUN_DATA_FILENAME % (category, split_name), _LSUN_URL % (category, split_name))
python
def _get_lsun(directory, category, split_name): """Downloads all lsun files to directory unless they are there.""" generator_utils.maybe_download(directory, _LSUN_DATA_FILENAME % (category, split_name), _LSUN_URL % (category, split_name))
[ "def", "_get_lsun", "(", "directory", ",", "category", ",", "split_name", ")", ":", "generator_utils", ".", "maybe_download", "(", "directory", ",", "_LSUN_DATA_FILENAME", "%", "(", "category", ",", "split_name", ")", ",", "_LSUN_URL", "%", "(", "category", ",", "split_name", ")", ")" ]
Downloads all lsun files to directory unless they are there.
[ "Downloads", "all", "lsun", "files", "to", "directory", "unless", "they", "are", "there", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_lsun.py#L40-L44
22,731
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
_mixed_precision_is_enabled
def _mixed_precision_is_enabled(hparams): """Should be the same as in common_attention, avoiding import.""" activation_dtype = hparams.activation_dtype weight_dtype = hparams.weight_dtype return activation_dtype == tf.float16 and weight_dtype == tf.float32
python
def _mixed_precision_is_enabled(hparams): """Should be the same as in common_attention, avoiding import.""" activation_dtype = hparams.activation_dtype weight_dtype = hparams.weight_dtype return activation_dtype == tf.float16 and weight_dtype == tf.float32
[ "def", "_mixed_precision_is_enabled", "(", "hparams", ")", ":", "activation_dtype", "=", "hparams", ".", "activation_dtype", "weight_dtype", "=", "hparams", ".", "weight_dtype", "return", "activation_dtype", "==", "tf", ".", "float16", "and", "weight_dtype", "==", "tf", ".", "float32" ]
Should be the same as in common_attention, avoiding import.
[ "Should", "be", "the", "same", "as", "in", "common_attention", "avoiding", "import", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L36-L40
22,732
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
optimize
def optimize(loss, learning_rate, hparams, use_tpu=False, variables=None): """Minimize loss.""" loss = weight_decay_and_noise(loss, hparams, learning_rate) loss = tf.identity(loss, name="total_loss") if variables is None: variables = tf.trainable_variables() # Print trainable variables. log_variable_sizes(variables, verbose=hparams.summarize_vars) # Print non-trainable variables. non_trainable_variables = list( set(tf.global_variables()) - set(variables)) log_variable_sizes(non_trainable_variables, tag="Non-trainable variables", verbose=hparams.summarize_vars) if hparams.summarize_vars: summarize_variables(variables) # Summarize non-trainable variables as well summarize_variables(non_trainable_variables, tag="Non-trainable variables") diet_vars = [ v for v in tf.global_variables() if v.dtype == dtypes.float16_ref ] log_variable_sizes( diet_vars, "Diet Variables", verbose=hparams.summarize_vars) opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) if use_tpu: opt = tf.contrib.tpu.CrossShardOptimizer(opt) opt_summaries = [] if common_layers.should_generate_summaries(): tf.summary.scalar("learning_rate", learning_rate) opt_summaries.append("loss") if hparams.summarize_grads: tf.logging.info("Summarizing gradients") opt_summaries.extend( ["gradients", "gradient_norm", "global_gradient_norm"]) if hparams.clip_grad_norm: tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) if hparams.grad_noise_scale: tf.logging.info("Adding noise to gradients, noise scale: %0.5f", hparams.grad_noise_scale) train_op = tf.contrib.layers.optimize_loss( name="training", loss=loss, global_step=tf.train.get_or_create_global_step(), learning_rate=learning_rate, clip_gradients=hparams.clip_grad_norm or None, gradient_noise_scale=hparams.grad_noise_scale or None, optimizer=opt, summaries=opt_summaries, colocate_gradients_with_ops=True, variables=variables) return train_op
python
def optimize(loss, learning_rate, hparams, use_tpu=False, variables=None): """Minimize loss.""" loss = weight_decay_and_noise(loss, hparams, learning_rate) loss = tf.identity(loss, name="total_loss") if variables is None: variables = tf.trainable_variables() # Print trainable variables. log_variable_sizes(variables, verbose=hparams.summarize_vars) # Print non-trainable variables. non_trainable_variables = list( set(tf.global_variables()) - set(variables)) log_variable_sizes(non_trainable_variables, tag="Non-trainable variables", verbose=hparams.summarize_vars) if hparams.summarize_vars: summarize_variables(variables) # Summarize non-trainable variables as well summarize_variables(non_trainable_variables, tag="Non-trainable variables") diet_vars = [ v for v in tf.global_variables() if v.dtype == dtypes.float16_ref ] log_variable_sizes( diet_vars, "Diet Variables", verbose=hparams.summarize_vars) opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) if use_tpu: opt = tf.contrib.tpu.CrossShardOptimizer(opt) opt_summaries = [] if common_layers.should_generate_summaries(): tf.summary.scalar("learning_rate", learning_rate) opt_summaries.append("loss") if hparams.summarize_grads: tf.logging.info("Summarizing gradients") opt_summaries.extend( ["gradients", "gradient_norm", "global_gradient_norm"]) if hparams.clip_grad_norm: tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) if hparams.grad_noise_scale: tf.logging.info("Adding noise to gradients, noise scale: %0.5f", hparams.grad_noise_scale) train_op = tf.contrib.layers.optimize_loss( name="training", loss=loss, global_step=tf.train.get_or_create_global_step(), learning_rate=learning_rate, clip_gradients=hparams.clip_grad_norm or None, gradient_noise_scale=hparams.grad_noise_scale or None, optimizer=opt, summaries=opt_summaries, colocate_gradients_with_ops=True, variables=variables) return train_op
[ "def", "optimize", "(", "loss", ",", "learning_rate", ",", "hparams", ",", "use_tpu", "=", "False", ",", "variables", "=", "None", ")", ":", "loss", "=", "weight_decay_and_noise", "(", "loss", ",", "hparams", ",", "learning_rate", ")", "loss", "=", "tf", ".", "identity", "(", "loss", ",", "name", "=", "\"total_loss\"", ")", "if", "variables", "is", "None", ":", "variables", "=", "tf", ".", "trainable_variables", "(", ")", "# Print trainable variables.", "log_variable_sizes", "(", "variables", ",", "verbose", "=", "hparams", ".", "summarize_vars", ")", "# Print non-trainable variables.", "non_trainable_variables", "=", "list", "(", "set", "(", "tf", ".", "global_variables", "(", ")", ")", "-", "set", "(", "variables", ")", ")", "log_variable_sizes", "(", "non_trainable_variables", ",", "tag", "=", "\"Non-trainable variables\"", ",", "verbose", "=", "hparams", ".", "summarize_vars", ")", "if", "hparams", ".", "summarize_vars", ":", "summarize_variables", "(", "variables", ")", "# Summarize non-trainable variables as well", "summarize_variables", "(", "non_trainable_variables", ",", "tag", "=", "\"Non-trainable variables\"", ")", "diet_vars", "=", "[", "v", "for", "v", "in", "tf", ".", "global_variables", "(", ")", "if", "v", ".", "dtype", "==", "dtypes", ".", "float16_ref", "]", "log_variable_sizes", "(", "diet_vars", ",", "\"Diet Variables\"", ",", "verbose", "=", "hparams", ".", "summarize_vars", ")", "opt", "=", "ConditionalOptimizer", "(", "hparams", ".", "optimizer", ",", "learning_rate", ",", "hparams", ",", "use_tpu", ")", "if", "use_tpu", ":", "opt", "=", "tf", ".", "contrib", ".", "tpu", ".", "CrossShardOptimizer", "(", "opt", ")", "opt_summaries", "=", "[", "]", "if", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"learning_rate\"", ",", "learning_rate", ")", "opt_summaries", ".", "append", "(", "\"loss\"", ")", "if", "hparams", ".", "summarize_grads", ":", "tf", ".", "logging", ".", "info", "(", "\"Summarizing gradients\"", ")", "opt_summaries", ".", "extend", "(", "[", "\"gradients\"", ",", "\"gradient_norm\"", ",", "\"global_gradient_norm\"", "]", ")", "if", "hparams", ".", "clip_grad_norm", ":", "tf", ".", "logging", ".", "info", "(", "\"Clipping gradients, norm: %0.5f\"", ",", "hparams", ".", "clip_grad_norm", ")", "if", "hparams", ".", "grad_noise_scale", ":", "tf", ".", "logging", ".", "info", "(", "\"Adding noise to gradients, noise scale: %0.5f\"", ",", "hparams", ".", "grad_noise_scale", ")", "train_op", "=", "tf", ".", "contrib", ".", "layers", ".", "optimize_loss", "(", "name", "=", "\"training\"", ",", "loss", "=", "loss", ",", "global_step", "=", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", ",", "learning_rate", "=", "learning_rate", ",", "clip_gradients", "=", "hparams", ".", "clip_grad_norm", "or", "None", ",", "gradient_noise_scale", "=", "hparams", ".", "grad_noise_scale", "or", "None", ",", "optimizer", "=", "opt", ",", "summaries", "=", "opt_summaries", ",", "colocate_gradients_with_ops", "=", "True", ",", "variables", "=", "variables", ")", "return", "train_op" ]
Minimize loss.
[ "Minimize", "loss", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L43-L94
22,733
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
weight_decay_and_noise
def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None): """Apply weight decay and weight noise.""" if var_list is None: var_list = tf.trainable_variables() decay_vars = [v for v in var_list] noise_vars = [v for v in var_list if "/body/" in v.name] weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars) if hparams.weight_decay and common_layers.should_generate_summaries(): tf.summary.scalar("losses/weight_decay", weight_decay_loss) weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate, noise_vars) with tf.control_dependencies(weight_noise_ops): loss = tf.identity(loss) loss += weight_decay_loss return loss
python
def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None): """Apply weight decay and weight noise.""" if var_list is None: var_list = tf.trainable_variables() decay_vars = [v for v in var_list] noise_vars = [v for v in var_list if "/body/" in v.name] weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars) if hparams.weight_decay and common_layers.should_generate_summaries(): tf.summary.scalar("losses/weight_decay", weight_decay_loss) weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate, noise_vars) with tf.control_dependencies(weight_noise_ops): loss = tf.identity(loss) loss += weight_decay_loss return loss
[ "def", "weight_decay_and_noise", "(", "loss", ",", "hparams", ",", "learning_rate", ",", "var_list", "=", "None", ")", ":", "if", "var_list", "is", "None", ":", "var_list", "=", "tf", ".", "trainable_variables", "(", ")", "decay_vars", "=", "[", "v", "for", "v", "in", "var_list", "]", "noise_vars", "=", "[", "v", "for", "v", "in", "var_list", "if", "\"/body/\"", "in", "v", ".", "name", "]", "weight_decay_loss", "=", "weight_decay", "(", "hparams", ".", "weight_decay", ",", "decay_vars", ")", "if", "hparams", ".", "weight_decay", "and", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"losses/weight_decay\"", ",", "weight_decay_loss", ")", "weight_noise_ops", "=", "weight_noise", "(", "hparams", ".", "weight_noise", ",", "learning_rate", ",", "noise_vars", ")", "with", "tf", ".", "control_dependencies", "(", "weight_noise_ops", ")", ":", "loss", "=", "tf", ".", "identity", "(", "loss", ")", "loss", "+=", "weight_decay_loss", "return", "loss" ]
Apply weight decay and weight noise.
[ "Apply", "weight", "decay", "and", "weight", "noise", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L238-L256
22,734
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
weight_noise
def weight_noise(noise_rate, learning_rate, var_list): """Apply weight noise to vars in var_list.""" if not noise_rate: return [tf.no_op()] tf.logging.info("Applying weight noise scaled by learning rate, " "noise_rate: %0.5f", noise_rate) noise_ops = [] for v in var_list: with tf.device(v.device): # pylint: disable=protected-access scale = noise_rate * learning_rate * 0.001 if common_layers.should_generate_summaries(): tf.summary.scalar("weight_noise_scale", scale) noise = tf.truncated_normal(v.shape) * scale noise_op = v.assign_add(noise) noise_ops.append(noise_op) return noise_ops
python
def weight_noise(noise_rate, learning_rate, var_list): """Apply weight noise to vars in var_list.""" if not noise_rate: return [tf.no_op()] tf.logging.info("Applying weight noise scaled by learning rate, " "noise_rate: %0.5f", noise_rate) noise_ops = [] for v in var_list: with tf.device(v.device): # pylint: disable=protected-access scale = noise_rate * learning_rate * 0.001 if common_layers.should_generate_summaries(): tf.summary.scalar("weight_noise_scale", scale) noise = tf.truncated_normal(v.shape) * scale noise_op = v.assign_add(noise) noise_ops.append(noise_op) return noise_ops
[ "def", "weight_noise", "(", "noise_rate", ",", "learning_rate", ",", "var_list", ")", ":", "if", "not", "noise_rate", ":", "return", "[", "tf", ".", "no_op", "(", ")", "]", "tf", ".", "logging", ".", "info", "(", "\"Applying weight noise scaled by learning rate, \"", "\"noise_rate: %0.5f\"", ",", "noise_rate", ")", "noise_ops", "=", "[", "]", "for", "v", "in", "var_list", ":", "with", "tf", ".", "device", "(", "v", ".", "device", ")", ":", "# pylint: disable=protected-access", "scale", "=", "noise_rate", "*", "learning_rate", "*", "0.001", "if", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"weight_noise_scale\"", ",", "scale", ")", "noise", "=", "tf", ".", "truncated_normal", "(", "v", ".", "shape", ")", "*", "scale", "noise_op", "=", "v", ".", "assign_add", "(", "noise", ")", "noise_ops", ".", "append", "(", "noise_op", ")", "return", "noise_ops" ]
Apply weight noise to vars in var_list.
[ "Apply", "weight", "noise", "to", "vars", "in", "var_list", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L259-L278
22,735
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
weight_decay
def weight_decay(decay_rate, var_list, skip_biases=True): """Apply weight decay to vars in var_list.""" if not decay_rate: return 0. tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate) weight_decays = [] for v in var_list: # Weight decay. # This is a heuristic way to detect biases that works for main tf.layers. is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0") if not (skip_biases and is_bias): with tf.device(v.device): v_loss = tf.nn.l2_loss(v) weight_decays.append(v_loss) return tf.add_n(weight_decays) * decay_rate
python
def weight_decay(decay_rate, var_list, skip_biases=True): """Apply weight decay to vars in var_list.""" if not decay_rate: return 0. tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate) weight_decays = [] for v in var_list: # Weight decay. # This is a heuristic way to detect biases that works for main tf.layers. is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0") if not (skip_biases and is_bias): with tf.device(v.device): v_loss = tf.nn.l2_loss(v) weight_decays.append(v_loss) return tf.add_n(weight_decays) * decay_rate
[ "def", "weight_decay", "(", "decay_rate", ",", "var_list", ",", "skip_biases", "=", "True", ")", ":", "if", "not", "decay_rate", ":", "return", "0.", "tf", ".", "logging", ".", "info", "(", "\"Applying weight decay, decay_rate: %0.5f\"", ",", "decay_rate", ")", "weight_decays", "=", "[", "]", "for", "v", "in", "var_list", ":", "# Weight decay.", "# This is a heuristic way to detect biases that works for main tf.layers.", "is_bias", "=", "len", "(", "v", ".", "shape", ".", "as_list", "(", ")", ")", "==", "1", "and", "v", ".", "name", ".", "endswith", "(", "\"bias:0\"", ")", "if", "not", "(", "skip_biases", "and", "is_bias", ")", ":", "with", "tf", ".", "device", "(", "v", ".", "device", ")", ":", "v_loss", "=", "tf", ".", "nn", ".", "l2_loss", "(", "v", ")", "weight_decays", ".", "append", "(", "v_loss", ")", "return", "tf", ".", "add_n", "(", "weight_decays", ")", "*", "decay_rate" ]
Apply weight decay to vars in var_list.
[ "Apply", "weight", "decay", "to", "vars", "in", "var_list", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L281-L298
22,736
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
summarize_variables
def summarize_variables(var_list=None, tag=None): """Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/. """ if var_list is None: var_list = tf.trainable_variables() if tag is None: tag = "training_variables/" name_to_var = {v.name: v for v in var_list} for v_name in list(name_to_var): v = name_to_var[v_name] tf.summary.histogram(tag + v_name, v)
python
def summarize_variables(var_list=None, tag=None): """Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/. """ if var_list is None: var_list = tf.trainable_variables() if tag is None: tag = "training_variables/" name_to_var = {v.name: v for v in var_list} for v_name in list(name_to_var): v = name_to_var[v_name] tf.summary.histogram(tag + v_name, v)
[ "def", "summarize_variables", "(", "var_list", "=", "None", ",", "tag", "=", "None", ")", ":", "if", "var_list", "is", "None", ":", "var_list", "=", "tf", ".", "trainable_variables", "(", ")", "if", "tag", "is", "None", ":", "tag", "=", "\"training_variables/\"", "name_to_var", "=", "{", "v", ".", "name", ":", "v", "for", "v", "in", "var_list", "}", "for", "v_name", "in", "list", "(", "name_to_var", ")", ":", "v", "=", "name_to_var", "[", "v_name", "]", "tf", ".", "summary", ".", "histogram", "(", "tag", "+", "v_name", ",", "v", ")" ]
Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/.
[ "Summarize", "the", "variables", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L330-L345
22,737
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
get_variable_initializer
def get_variable_initializer(hparams): """Get variable initializer from hparams.""" if not hparams.initializer: return None mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN, value=hparams.initializer_gain, hparams=hparams) if not tf.executing_eagerly(): tf.logging.info("Using variable initializer: %s", hparams.initializer) if hparams.initializer == "orthogonal": return tf.orthogonal_initializer(gain=hparams.initializer_gain) elif hparams.initializer == "uniform": max_val = 0.1 * hparams.initializer_gain return tf.random_uniform_initializer(-max_val, max_val) elif hparams.initializer == "normal_unit_scaling": return tf.variance_scaling_initializer( hparams.initializer_gain, mode="fan_avg", distribution="normal") elif hparams.initializer == "uniform_unit_scaling": return tf.variance_scaling_initializer( hparams.initializer_gain, mode="fan_avg", distribution="uniform") elif hparams.initializer == "xavier": return tf.initializers.glorot_uniform() else: raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
python
def get_variable_initializer(hparams): """Get variable initializer from hparams.""" if not hparams.initializer: return None mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN, value=hparams.initializer_gain, hparams=hparams) if not tf.executing_eagerly(): tf.logging.info("Using variable initializer: %s", hparams.initializer) if hparams.initializer == "orthogonal": return tf.orthogonal_initializer(gain=hparams.initializer_gain) elif hparams.initializer == "uniform": max_val = 0.1 * hparams.initializer_gain return tf.random_uniform_initializer(-max_val, max_val) elif hparams.initializer == "normal_unit_scaling": return tf.variance_scaling_initializer( hparams.initializer_gain, mode="fan_avg", distribution="normal") elif hparams.initializer == "uniform_unit_scaling": return tf.variance_scaling_initializer( hparams.initializer_gain, mode="fan_avg", distribution="uniform") elif hparams.initializer == "xavier": return tf.initializers.glorot_uniform() else: raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
[ "def", "get_variable_initializer", "(", "hparams", ")", ":", "if", "not", "hparams", ".", "initializer", ":", "return", "None", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "MODEL_HP_INITIALIZER_GAIN", ",", "value", "=", "hparams", ".", "initializer_gain", ",", "hparams", "=", "hparams", ")", "if", "not", "tf", ".", "executing_eagerly", "(", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Using variable initializer: %s\"", ",", "hparams", ".", "initializer", ")", "if", "hparams", ".", "initializer", "==", "\"orthogonal\"", ":", "return", "tf", ".", "orthogonal_initializer", "(", "gain", "=", "hparams", ".", "initializer_gain", ")", "elif", "hparams", ".", "initializer", "==", "\"uniform\"", ":", "max_val", "=", "0.1", "*", "hparams", ".", "initializer_gain", "return", "tf", ".", "random_uniform_initializer", "(", "-", "max_val", ",", "max_val", ")", "elif", "hparams", ".", "initializer", "==", "\"normal_unit_scaling\"", ":", "return", "tf", ".", "variance_scaling_initializer", "(", "hparams", ".", "initializer_gain", ",", "mode", "=", "\"fan_avg\"", ",", "distribution", "=", "\"normal\"", ")", "elif", "hparams", ".", "initializer", "==", "\"uniform_unit_scaling\"", ":", "return", "tf", ".", "variance_scaling_initializer", "(", "hparams", ".", "initializer_gain", ",", "mode", "=", "\"fan_avg\"", ",", "distribution", "=", "\"uniform\"", ")", "elif", "hparams", ".", "initializer", "==", "\"xavier\"", ":", "return", "tf", ".", "initializers", ".", "glorot_uniform", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Unrecognized initializer: %s\"", "%", "hparams", ".", "initializer", ")" ]
Get variable initializer from hparams.
[ "Get", "variable", "initializer", "from", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L348-L373
22,738
tensorflow/tensor2tensor
tensor2tensor/layers/vqa_layers.py
summarize_tensors
def summarize_tensors(tensor_dict, tag=None): """Summarize the tensors. Args: tensor_dict: a dictionary of tensors. tag: name scope of the summary; defaults to tensors/. """ if tag is None: tag = "tensors/" for t_name in list(tensor_dict): t = tensor_dict[t_name] tf.summary.histogram(tag + t_name, t)
python
def summarize_tensors(tensor_dict, tag=None): """Summarize the tensors. Args: tensor_dict: a dictionary of tensors. tag: name scope of the summary; defaults to tensors/. """ if tag is None: tag = "tensors/" for t_name in list(tensor_dict): t = tensor_dict[t_name] tf.summary.histogram(tag + t_name, t)
[ "def", "summarize_tensors", "(", "tensor_dict", ",", "tag", "=", "None", ")", ":", "if", "tag", "is", "None", ":", "tag", "=", "\"tensors/\"", "for", "t_name", "in", "list", "(", "tensor_dict", ")", ":", "t", "=", "tensor_dict", "[", "t_name", "]", "tf", ".", "summary", ".", "histogram", "(", "tag", "+", "t_name", ",", "t", ")" ]
Summarize the tensors. Args: tensor_dict: a dictionary of tensors. tag: name scope of the summary; defaults to tensors/.
[ "Summarize", "the", "tensors", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vqa_layers.py#L33-L45
22,739
tensorflow/tensor2tensor
tensor2tensor/layers/vqa_layers.py
image_embedding
def image_embedding(images, model_fn=resnet_v1_152, trainable=True, is_training=True, weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True, add_summaries=False, reuse=False): """Extract image features from pretrained resnet model.""" is_resnet_training = trainable and is_training batch_norm_params = { "is_training": is_resnet_training, "trainable": trainable, "decay": batch_norm_decay, "epsilon": batch_norm_epsilon, "scale": batch_norm_scale, } if trainable: weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay) else: weights_regularizer = None with tf.variable_scope(model_fn.__name__, [images], reuse=reuse) as scope: with slim.arg_scope( [slim.conv2d], weights_regularizer=weights_regularizer, trainable=trainable): with slim.arg_scope( [slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], is_training=is_resnet_training, trainable=trainable): with slim.arg_scope([slim.max_pool2d], padding="SAME"): net, end_points = model_fn( images, num_classes=None, global_pool=False, is_training=is_resnet_training, reuse=reuse, scope=scope) if add_summaries: for v in end_points.values(): tf.contrib.layers.summaries.summarize_activation(v) return net
python
def image_embedding(images, model_fn=resnet_v1_152, trainable=True, is_training=True, weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True, add_summaries=False, reuse=False): """Extract image features from pretrained resnet model.""" is_resnet_training = trainable and is_training batch_norm_params = { "is_training": is_resnet_training, "trainable": trainable, "decay": batch_norm_decay, "epsilon": batch_norm_epsilon, "scale": batch_norm_scale, } if trainable: weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay) else: weights_regularizer = None with tf.variable_scope(model_fn.__name__, [images], reuse=reuse) as scope: with slim.arg_scope( [slim.conv2d], weights_regularizer=weights_regularizer, trainable=trainable): with slim.arg_scope( [slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], is_training=is_resnet_training, trainable=trainable): with slim.arg_scope([slim.max_pool2d], padding="SAME"): net, end_points = model_fn( images, num_classes=None, global_pool=False, is_training=is_resnet_training, reuse=reuse, scope=scope) if add_summaries: for v in end_points.values(): tf.contrib.layers.summaries.summarize_activation(v) return net
[ "def", "image_embedding", "(", "images", ",", "model_fn", "=", "resnet_v1_152", ",", "trainable", "=", "True", ",", "is_training", "=", "True", ",", "weight_decay", "=", "0.0001", ",", "batch_norm_decay", "=", "0.997", ",", "batch_norm_epsilon", "=", "1e-5", ",", "batch_norm_scale", "=", "True", ",", "add_summaries", "=", "False", ",", "reuse", "=", "False", ")", ":", "is_resnet_training", "=", "trainable", "and", "is_training", "batch_norm_params", "=", "{", "\"is_training\"", ":", "is_resnet_training", ",", "\"trainable\"", ":", "trainable", ",", "\"decay\"", ":", "batch_norm_decay", ",", "\"epsilon\"", ":", "batch_norm_epsilon", ",", "\"scale\"", ":", "batch_norm_scale", ",", "}", "if", "trainable", ":", "weights_regularizer", "=", "tf", ".", "contrib", ".", "layers", ".", "l2_regularizer", "(", "weight_decay", ")", "else", ":", "weights_regularizer", "=", "None", "with", "tf", ".", "variable_scope", "(", "model_fn", ".", "__name__", ",", "[", "images", "]", ",", "reuse", "=", "reuse", ")", "as", "scope", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "conv2d", "]", ",", "weights_regularizer", "=", "weights_regularizer", ",", "trainable", "=", "trainable", ")", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "conv2d", "]", ",", "weights_initializer", "=", "slim", ".", "variance_scaling_initializer", "(", ")", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "normalizer_fn", "=", "slim", ".", "batch_norm", ",", "normalizer_params", "=", "batch_norm_params", ")", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "batch_norm", "]", ",", "is_training", "=", "is_resnet_training", ",", "trainable", "=", "trainable", ")", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "max_pool2d", "]", ",", "padding", "=", "\"SAME\"", ")", ":", "net", ",", "end_points", "=", "model_fn", "(", "images", ",", "num_classes", "=", "None", ",", "global_pool", "=", "False", ",", "is_training", "=", "is_resnet_training", ",", "reuse", "=", "reuse", ",", "scope", "=", "scope", ")", "if", "add_summaries", ":", "for", "v", "in", "end_points", ".", "values", "(", ")", ":", "tf", ".", "contrib", ".", "layers", ".", "summaries", ".", "summarize_activation", "(", "v", ")", "return", "net" ]
Extract image features from pretrained resnet model.
[ "Extract", "image", "features", "from", "pretrained", "resnet", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vqa_layers.py#L48-L99
22,740
tensorflow/tensor2tensor
tensor2tensor/data_generators/audio.py
timit_generator
def timit_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None, vocab_size=0): """Data generator for TIMIT transcription problem. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many inputs and labels to generate. start_from: from which input to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. If this is not provided then the target sentence will be encoded by character. vocab_size: integer target to generate vocabulary size to. Yields: A dictionary representing the images with the following fields: * inputs: a float sequence containing the audio data * audio/channel_count: an integer * audio/sample_count: an integer * audio/sample_width: an integer * targets: an integer sequence representing the encoded sentence """ del data_dir eos_list = [1] if eos_list is None else eos_list if vocab_filename is not None: # TODO(lukaszkaiser): Correct this call to generate a vocabulary. No data # sources are being passed. # vocab_symbolizer = generator_utils.get_or_generate_vocab( # data_dir, tmp_dir, vocab_filename, vocab_size) del vocab_size vocab_symbolizer = None assert False _get_timit(tmp_dir) datasets = (_TIMIT_TRAIN_DATASETS if training else _TIMIT_TEST_DATASETS) i = 0 for timit_data_dir, (audio_ext, transcription_ext) in datasets: timit_data_dir = os.path.join(tmp_dir, timit_data_dir) data_files = _collect_data(timit_data_dir, audio_ext, transcription_ext) data_pairs = data_files.values() for input_file, target_file in sorted(data_pairs)[start_from:]: if i == how_many: return i += 1 audio_data, sample_count, sample_width, num_channels = _get_audio_data( input_file) text_data = _get_text_data(target_file) if vocab_filename is None: label = [ord(c) for c in text_data] + eos_list else: label = vocab_symbolizer.encode(text_data) + eos_list yield { "inputs": audio_data, "audio/channel_count": [num_channels], "audio/sample_count": [sample_count], "audio/sample_width": [sample_width], "targets": label }
python
def timit_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None, vocab_size=0): """Data generator for TIMIT transcription problem. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many inputs and labels to generate. start_from: from which input to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. If this is not provided then the target sentence will be encoded by character. vocab_size: integer target to generate vocabulary size to. Yields: A dictionary representing the images with the following fields: * inputs: a float sequence containing the audio data * audio/channel_count: an integer * audio/sample_count: an integer * audio/sample_width: an integer * targets: an integer sequence representing the encoded sentence """ del data_dir eos_list = [1] if eos_list is None else eos_list if vocab_filename is not None: # TODO(lukaszkaiser): Correct this call to generate a vocabulary. No data # sources are being passed. # vocab_symbolizer = generator_utils.get_or_generate_vocab( # data_dir, tmp_dir, vocab_filename, vocab_size) del vocab_size vocab_symbolizer = None assert False _get_timit(tmp_dir) datasets = (_TIMIT_TRAIN_DATASETS if training else _TIMIT_TEST_DATASETS) i = 0 for timit_data_dir, (audio_ext, transcription_ext) in datasets: timit_data_dir = os.path.join(tmp_dir, timit_data_dir) data_files = _collect_data(timit_data_dir, audio_ext, transcription_ext) data_pairs = data_files.values() for input_file, target_file in sorted(data_pairs)[start_from:]: if i == how_many: return i += 1 audio_data, sample_count, sample_width, num_channels = _get_audio_data( input_file) text_data = _get_text_data(target_file) if vocab_filename is None: label = [ord(c) for c in text_data] + eos_list else: label = vocab_symbolizer.encode(text_data) + eos_list yield { "inputs": audio_data, "audio/channel_count": [num_channels], "audio/sample_count": [sample_count], "audio/sample_width": [sample_width], "targets": label }
[ "def", "timit_generator", "(", "data_dir", ",", "tmp_dir", ",", "training", ",", "how_many", ",", "start_from", "=", "0", ",", "eos_list", "=", "None", ",", "vocab_filename", "=", "None", ",", "vocab_size", "=", "0", ")", ":", "del", "data_dir", "eos_list", "=", "[", "1", "]", "if", "eos_list", "is", "None", "else", "eos_list", "if", "vocab_filename", "is", "not", "None", ":", "# TODO(lukaszkaiser): Correct this call to generate a vocabulary. No data", "# sources are being passed.", "# vocab_symbolizer = generator_utils.get_or_generate_vocab(", "# data_dir, tmp_dir, vocab_filename, vocab_size)", "del", "vocab_size", "vocab_symbolizer", "=", "None", "assert", "False", "_get_timit", "(", "tmp_dir", ")", "datasets", "=", "(", "_TIMIT_TRAIN_DATASETS", "if", "training", "else", "_TIMIT_TEST_DATASETS", ")", "i", "=", "0", "for", "timit_data_dir", ",", "(", "audio_ext", ",", "transcription_ext", ")", "in", "datasets", ":", "timit_data_dir", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "timit_data_dir", ")", "data_files", "=", "_collect_data", "(", "timit_data_dir", ",", "audio_ext", ",", "transcription_ext", ")", "data_pairs", "=", "data_files", ".", "values", "(", ")", "for", "input_file", ",", "target_file", "in", "sorted", "(", "data_pairs", ")", "[", "start_from", ":", "]", ":", "if", "i", "==", "how_many", ":", "return", "i", "+=", "1", "audio_data", ",", "sample_count", ",", "sample_width", ",", "num_channels", "=", "_get_audio_data", "(", "input_file", ")", "text_data", "=", "_get_text_data", "(", "target_file", ")", "if", "vocab_filename", "is", "None", ":", "label", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "text_data", "]", "+", "eos_list", "else", ":", "label", "=", "vocab_symbolizer", ".", "encode", "(", "text_data", ")", "+", "eos_list", "yield", "{", "\"inputs\"", ":", "audio_data", ",", "\"audio/channel_count\"", ":", "[", "num_channels", "]", ",", "\"audio/sample_count\"", ":", "[", "sample_count", "]", ",", "\"audio/sample_width\"", ":", "[", "sample_width", "]", ",", "\"targets\"", ":", "label", "}" ]
Data generator for TIMIT transcription problem. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many inputs and labels to generate. start_from: from which input to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. If this is not provided then the target sentence will be encoded by character. vocab_size: integer target to generate vocabulary size to. Yields: A dictionary representing the images with the following fields: * inputs: a float sequence containing the audio data * audio/channel_count: an integer * audio/sample_count: an integer * audio/sample_width: an integer * targets: an integer sequence representing the encoded sentence
[ "Data", "generator", "for", "TIMIT", "transcription", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/audio.py#L98-L162
22,741
tensorflow/tensor2tensor
tensor2tensor/data_generators/wikitext103.py
_build_vocab
def _build_vocab(filename, vocab_dir, vocab_name): """Reads a file to build a vocabulary. Args: filename: file to read list of words from. vocab_dir: directory where to save the vocabulary. vocab_name: vocab file name. Returns: text encoder. """ vocab_path = os.path.join(vocab_dir, vocab_name) if not tf.gfile.Exists(vocab_path): with tf.gfile.GFile(filename, "r") as f: data = f.read().split() counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) encoder = text_encoder.TokenTextEncoder(None, vocab_list=words) encoder.store_to_file(vocab_path) else: encoder = text_encoder.TokenTextEncoder(vocab_path) return encoder
python
def _build_vocab(filename, vocab_dir, vocab_name): """Reads a file to build a vocabulary. Args: filename: file to read list of words from. vocab_dir: directory where to save the vocabulary. vocab_name: vocab file name. Returns: text encoder. """ vocab_path = os.path.join(vocab_dir, vocab_name) if not tf.gfile.Exists(vocab_path): with tf.gfile.GFile(filename, "r") as f: data = f.read().split() counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) encoder = text_encoder.TokenTextEncoder(None, vocab_list=words) encoder.store_to_file(vocab_path) else: encoder = text_encoder.TokenTextEncoder(vocab_path) return encoder
[ "def", "_build_vocab", "(", "filename", ",", "vocab_dir", ",", "vocab_name", ")", ":", "vocab_path", "=", "os", ".", "path", ".", "join", "(", "vocab_dir", ",", "vocab_name", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "vocab_path", ")", ":", "with", "tf", ".", "gfile", ".", "GFile", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", ".", "split", "(", ")", "counter", "=", "collections", ".", "Counter", "(", "data", ")", "count_pairs", "=", "sorted", "(", "counter", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "-", "x", "[", "1", "]", ",", "x", "[", "0", "]", ")", ")", "words", ",", "_", "=", "list", "(", "zip", "(", "*", "count_pairs", ")", ")", "encoder", "=", "text_encoder", ".", "TokenTextEncoder", "(", "None", ",", "vocab_list", "=", "words", ")", "encoder", ".", "store_to_file", "(", "vocab_path", ")", "else", ":", "encoder", "=", "text_encoder", ".", "TokenTextEncoder", "(", "vocab_path", ")", "return", "encoder" ]
Reads a file to build a vocabulary. Args: filename: file to read list of words from. vocab_dir: directory where to save the vocabulary. vocab_name: vocab file name. Returns: text encoder.
[ "Reads", "a", "file", "to", "build", "a", "vocabulary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikitext103.py#L37-L59
22,742
tensorflow/tensor2tensor
tensor2tensor/models/research/aligned.py
aligned_8k_grouped
def aligned_8k_grouped(): """version for languagemodel_wiki_scramble8k50. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92 3.3 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15 Returns: a hparams object """ hparams = aligned_grouped() hparams.batch_size = 8192 # hparams.attention_image_summary = False hparams.num_groups = 16 hparams.multiplicative_overhead = 1.1 return hparams
python
def aligned_8k_grouped(): """version for languagemodel_wiki_scramble8k50. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92 3.3 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15 Returns: a hparams object """ hparams = aligned_grouped() hparams.batch_size = 8192 # hparams.attention_image_summary = False hparams.num_groups = 16 hparams.multiplicative_overhead = 1.1 return hparams
[ "def", "aligned_8k_grouped", "(", ")", ":", "hparams", "=", "aligned_grouped", "(", ")", "hparams", ".", "batch_size", "=", "8192", "# hparams.attention_image_summary = False", "hparams", ".", "num_groups", "=", "16", "hparams", ".", "multiplicative_overhead", "=", "1.1", "return", "hparams" ]
version for languagemodel_wiki_scramble8k50. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92 3.3 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15 Returns: a hparams object
[ "version", "for", "languagemodel_wiki_scramble8k50", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/aligned.py#L512-L527
22,743
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
_expand_to_beam_size
def _expand_to_beam_size(tensor, beam_size): """Tiles a given tensor by beam_size. Args: tensor: tensor to tile [batch_size, ...] beam_size: How much to tile the tensor by. Returns: Tiled tensor [batch_size, beam_size, ...] """ tensor = tf.expand_dims(tensor, axis=1) tile_dims = [1] * tensor.shape.ndims tile_dims[1] = beam_size return tf.tile(tensor, tile_dims)
python
def _expand_to_beam_size(tensor, beam_size): """Tiles a given tensor by beam_size. Args: tensor: tensor to tile [batch_size, ...] beam_size: How much to tile the tensor by. Returns: Tiled tensor [batch_size, beam_size, ...] """ tensor = tf.expand_dims(tensor, axis=1) tile_dims = [1] * tensor.shape.ndims tile_dims[1] = beam_size return tf.tile(tensor, tile_dims)
[ "def", "_expand_to_beam_size", "(", "tensor", ",", "beam_size", ")", ":", "tensor", "=", "tf", ".", "expand_dims", "(", "tensor", ",", "axis", "=", "1", ")", "tile_dims", "=", "[", "1", "]", "*", "tensor", ".", "shape", ".", "ndims", "tile_dims", "[", "1", "]", "=", "beam_size", "return", "tf", ".", "tile", "(", "tensor", ",", "tile_dims", ")" ]
Tiles a given tensor by beam_size. Args: tensor: tensor to tile [batch_size, ...] beam_size: How much to tile the tensor by. Returns: Tiled tensor [batch_size, beam_size, ...]
[ "Tiles", "a", "given", "tensor", "by", "beam_size", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L68-L82
22,744
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
get_state_shape_invariants
def get_state_shape_invariants(tensor): """Returns the shape of the tensor but sets middle dims to None.""" shape = tensor.shape.as_list() for i in range(1, len(shape) - 1): shape[i] = None return tf.TensorShape(shape)
python
def get_state_shape_invariants(tensor): """Returns the shape of the tensor but sets middle dims to None.""" shape = tensor.shape.as_list() for i in range(1, len(shape) - 1): shape[i] = None return tf.TensorShape(shape)
[ "def", "get_state_shape_invariants", "(", "tensor", ")", ":", "shape", "=", "tensor", ".", "shape", ".", "as_list", "(", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "shape", ")", "-", "1", ")", ":", "shape", "[", "i", "]", "=", "None", "return", "tf", ".", "TensorShape", "(", "shape", ")" ]
Returns the shape of the tensor but sets middle dims to None.
[ "Returns", "the", "shape", "of", "the", "tensor", "but", "sets", "middle", "dims", "to", "None", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L85-L90
22,745
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
compute_batch_indices
def compute_batch_indices(batch_size, beam_size): """Computes the i'th coordinate that contains the batch index for gathers. Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which batch the beam item is in. This will create the i of the i,j coordinate needed for the gather. Args: batch_size: Batch size beam_size: Size of the beam. Returns: batch_pos: [batch_size, beam_size] tensor of ids """ batch_pos = tf.range(batch_size * beam_size) // beam_size batch_pos = tf.reshape(batch_pos, [batch_size, beam_size]) return batch_pos
python
def compute_batch_indices(batch_size, beam_size): """Computes the i'th coordinate that contains the batch index for gathers. Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which batch the beam item is in. This will create the i of the i,j coordinate needed for the gather. Args: batch_size: Batch size beam_size: Size of the beam. Returns: batch_pos: [batch_size, beam_size] tensor of ids """ batch_pos = tf.range(batch_size * beam_size) // beam_size batch_pos = tf.reshape(batch_pos, [batch_size, beam_size]) return batch_pos
[ "def", "compute_batch_indices", "(", "batch_size", ",", "beam_size", ")", ":", "batch_pos", "=", "tf", ".", "range", "(", "batch_size", "*", "beam_size", ")", "//", "beam_size", "batch_pos", "=", "tf", ".", "reshape", "(", "batch_pos", ",", "[", "batch_size", ",", "beam_size", "]", ")", "return", "batch_pos" ]
Computes the i'th coordinate that contains the batch index for gathers. Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which batch the beam item is in. This will create the i of the i,j coordinate needed for the gather. Args: batch_size: Batch size beam_size: Size of the beam. Returns: batch_pos: [batch_size, beam_size] tensor of ids
[ "Computes", "the", "i", "th", "coordinate", "that", "contains", "the", "batch", "index", "for", "gathers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L93-L108
22,746
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
fast_tpu_gather
def fast_tpu_gather(params, indices, name=None): """Fast gather implementation for models running on TPU. This function use one_hot and batch matmul to do gather, which is faster than gather_nd on TPU. For params that have dtype of int32 (sequences to gather from), batch_gather is used to keep accuracy. Args: params: A tensor from which to gather values. [batch_size, original_size, ...] indices: A tensor used as the index to gather values. [batch_size, selected_size]. name: A string, name of the operation (optional). Returns: gather_result: A tensor that has the same rank as params. [batch_size, selected_size, ...] """ with tf.name_scope(name): dtype = params.dtype def _gather(params, indices): """Fast gather using one_hot and batch matmul.""" if dtype != tf.float32: params = tf.to_float(params) shape = common_layers.shape_list(params) indices_shape = common_layers.shape_list(indices) ndims = params.shape.ndims # Adjust the shape of params to match one-hot indices, which is the # requirement of Batch MatMul. if ndims == 2: params = tf.expand_dims(params, axis=-1) if ndims > 3: params = tf.reshape(params, [shape[0], shape[1], -1]) gather_result = tf.matmul( tf.one_hot(indices, shape[1], dtype=params.dtype), params) if ndims == 2: gather_result = tf.squeeze(gather_result, axis=-1) if ndims > 3: shape[1] = indices_shape[1] gather_result = tf.reshape(gather_result, shape) if dtype != tf.float32: gather_result = tf.cast(gather_result, dtype) return gather_result # If the dtype is int, use the gather instead of one_hot matmul to avoid # precision loss. The max int value can be represented by bfloat16 in MXU is # 256, which is smaller than the possible id values. Encoding/decoding can # potentially used to make it work, but the benenfit is small right now. if dtype.is_integer: gather_result = tf.batch_gather(params, indices) else: gather_result = _gather(params, indices) return gather_result
python
def fast_tpu_gather(params, indices, name=None): """Fast gather implementation for models running on TPU. This function use one_hot and batch matmul to do gather, which is faster than gather_nd on TPU. For params that have dtype of int32 (sequences to gather from), batch_gather is used to keep accuracy. Args: params: A tensor from which to gather values. [batch_size, original_size, ...] indices: A tensor used as the index to gather values. [batch_size, selected_size]. name: A string, name of the operation (optional). Returns: gather_result: A tensor that has the same rank as params. [batch_size, selected_size, ...] """ with tf.name_scope(name): dtype = params.dtype def _gather(params, indices): """Fast gather using one_hot and batch matmul.""" if dtype != tf.float32: params = tf.to_float(params) shape = common_layers.shape_list(params) indices_shape = common_layers.shape_list(indices) ndims = params.shape.ndims # Adjust the shape of params to match one-hot indices, which is the # requirement of Batch MatMul. if ndims == 2: params = tf.expand_dims(params, axis=-1) if ndims > 3: params = tf.reshape(params, [shape[0], shape[1], -1]) gather_result = tf.matmul( tf.one_hot(indices, shape[1], dtype=params.dtype), params) if ndims == 2: gather_result = tf.squeeze(gather_result, axis=-1) if ndims > 3: shape[1] = indices_shape[1] gather_result = tf.reshape(gather_result, shape) if dtype != tf.float32: gather_result = tf.cast(gather_result, dtype) return gather_result # If the dtype is int, use the gather instead of one_hot matmul to avoid # precision loss. The max int value can be represented by bfloat16 in MXU is # 256, which is smaller than the possible id values. Encoding/decoding can # potentially used to make it work, but the benenfit is small right now. if dtype.is_integer: gather_result = tf.batch_gather(params, indices) else: gather_result = _gather(params, indices) return gather_result
[ "def", "fast_tpu_gather", "(", "params", ",", "indices", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "dtype", "=", "params", ".", "dtype", "def", "_gather", "(", "params", ",", "indices", ")", ":", "\"\"\"Fast gather using one_hot and batch matmul.\"\"\"", "if", "dtype", "!=", "tf", ".", "float32", ":", "params", "=", "tf", ".", "to_float", "(", "params", ")", "shape", "=", "common_layers", ".", "shape_list", "(", "params", ")", "indices_shape", "=", "common_layers", ".", "shape_list", "(", "indices", ")", "ndims", "=", "params", ".", "shape", ".", "ndims", "# Adjust the shape of params to match one-hot indices, which is the", "# requirement of Batch MatMul.", "if", "ndims", "==", "2", ":", "params", "=", "tf", ".", "expand_dims", "(", "params", ",", "axis", "=", "-", "1", ")", "if", "ndims", ">", "3", ":", "params", "=", "tf", ".", "reshape", "(", "params", ",", "[", "shape", "[", "0", "]", ",", "shape", "[", "1", "]", ",", "-", "1", "]", ")", "gather_result", "=", "tf", ".", "matmul", "(", "tf", ".", "one_hot", "(", "indices", ",", "shape", "[", "1", "]", ",", "dtype", "=", "params", ".", "dtype", ")", ",", "params", ")", "if", "ndims", "==", "2", ":", "gather_result", "=", "tf", ".", "squeeze", "(", "gather_result", ",", "axis", "=", "-", "1", ")", "if", "ndims", ">", "3", ":", "shape", "[", "1", "]", "=", "indices_shape", "[", "1", "]", "gather_result", "=", "tf", ".", "reshape", "(", "gather_result", ",", "shape", ")", "if", "dtype", "!=", "tf", ".", "float32", ":", "gather_result", "=", "tf", ".", "cast", "(", "gather_result", ",", "dtype", ")", "return", "gather_result", "# If the dtype is int, use the gather instead of one_hot matmul to avoid", "# precision loss. The max int value can be represented by bfloat16 in MXU is", "# 256, which is smaller than the possible id values. Encoding/decoding can", "# potentially used to make it work, but the benenfit is small right now.", "if", "dtype", ".", "is_integer", ":", "gather_result", "=", "tf", ".", "batch_gather", "(", "params", ",", "indices", ")", "else", ":", "gather_result", "=", "_gather", "(", "params", ",", "indices", ")", "return", "gather_result" ]
Fast gather implementation for models running on TPU. This function use one_hot and batch matmul to do gather, which is faster than gather_nd on TPU. For params that have dtype of int32 (sequences to gather from), batch_gather is used to keep accuracy. Args: params: A tensor from which to gather values. [batch_size, original_size, ...] indices: A tensor used as the index to gather values. [batch_size, selected_size]. name: A string, name of the operation (optional). Returns: gather_result: A tensor that has the same rank as params. [batch_size, selected_size, ...]
[ "Fast", "gather", "implementation", "for", "models", "running", "on", "TPU", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L111-L165
22,747
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
_create_make_unique
def _create_make_unique(inputs): """Replaces the lower bits of each element with iota. The iota is used to derive the index, and also serves the purpose to make each element unique to break ties. Args: inputs: A tensor with rank of 2 and dtype of tf.float32. [batch_size, original_size]. Returns: A tensor after element wise transformation, with dtype the same as inputs. [batch_size, original_size]. Raises: ValueError: If the rank of the input tensor does not equal 2. """ if inputs.shape.ndims != 2: raise ValueError("Input of top_k_with_unique must be rank-2 " "but got: %s" % inputs.shape) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) # Count_mask is used to mask away the low order bits to ensure that every # element is distinct. log2_ceiling = int(math.ceil(math.log(int(width), 2))) next_power_of_two = 1 << log2_ceiling count_mask = ~(next_power_of_two - 1) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) # Smallest_normal is the bit representation of the smallest positive normal # floating point number. The sign is zero, exponent is one, and the fraction # is zero. smallest_normal = 1 << 23 smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) # Low_bit_mask is used to mask away the sign bit when computing the absolute # value. low_bit_mask = ~(1 << 31) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) # Compare the absolute value with positive zero to handle negative zero. input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where( if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) # Discard the low-order bits and replace with iota. and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
python
def _create_make_unique(inputs): """Replaces the lower bits of each element with iota. The iota is used to derive the index, and also serves the purpose to make each element unique to break ties. Args: inputs: A tensor with rank of 2 and dtype of tf.float32. [batch_size, original_size]. Returns: A tensor after element wise transformation, with dtype the same as inputs. [batch_size, original_size]. Raises: ValueError: If the rank of the input tensor does not equal 2. """ if inputs.shape.ndims != 2: raise ValueError("Input of top_k_with_unique must be rank-2 " "but got: %s" % inputs.shape) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) # Count_mask is used to mask away the low order bits to ensure that every # element is distinct. log2_ceiling = int(math.ceil(math.log(int(width), 2))) next_power_of_two = 1 << log2_ceiling count_mask = ~(next_power_of_two - 1) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) # Smallest_normal is the bit representation of the smallest positive normal # floating point number. The sign is zero, exponent is one, and the fraction # is zero. smallest_normal = 1 << 23 smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) # Low_bit_mask is used to mask away the sign bit when computing the absolute # value. low_bit_mask = ~(1 << 31) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) # Compare the absolute value with positive zero to handle negative zero. input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where( if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) # Discard the low-order bits and replace with iota. and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
[ "def", "_create_make_unique", "(", "inputs", ")", ":", "if", "inputs", ".", "shape", ".", "ndims", "!=", "2", ":", "raise", "ValueError", "(", "\"Input of top_k_with_unique must be rank-2 \"", "\"but got: %s\"", "%", "inputs", ".", "shape", ")", "height", "=", "inputs", ".", "shape", "[", "0", "]", "width", "=", "inputs", ".", "shape", "[", "1", "]", "zeros", "=", "tf", ".", "zeros", "(", "[", "height", ",", "width", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "# Count_mask is used to mask away the low order bits to ensure that every", "# element is distinct.", "log2_ceiling", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "int", "(", "width", ")", ",", "2", ")", ")", ")", "next_power_of_two", "=", "1", "<<", "log2_ceiling", "count_mask", "=", "~", "(", "next_power_of_two", "-", "1", ")", "count_mask_r0", "=", "tf", ".", "constant", "(", "count_mask", ")", "count_mask_r2", "=", "tf", ".", "fill", "(", "[", "height", ",", "width", "]", ",", "count_mask_r0", ")", "# Smallest_normal is the bit representation of the smallest positive normal", "# floating point number. The sign is zero, exponent is one, and the fraction", "# is zero.", "smallest_normal", "=", "1", "<<", "23", "smallest_normal_r0", "=", "tf", ".", "constant", "(", "smallest_normal", ",", "dtype", "=", "tf", ".", "int32", ")", "smallest_normal_r2", "=", "tf", ".", "fill", "(", "[", "height", ",", "width", "]", ",", "smallest_normal_r0", ")", "# Low_bit_mask is used to mask away the sign bit when computing the absolute", "# value.", "low_bit_mask", "=", "~", "(", "1", "<<", "31", ")", "low_bit_mask_r0", "=", "tf", ".", "constant", "(", "low_bit_mask", ",", "dtype", "=", "tf", ".", "int32", ")", "low_bit_mask_r2", "=", "tf", ".", "fill", "(", "[", "height", ",", "width", "]", ",", "low_bit_mask_r0", ")", "iota", "=", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "tf", ".", "range", "(", "width", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "0", ")", ",", "[", "height", ",", "1", "]", ")", "# Compare the absolute value with positive zero to handle negative zero.", "input_r2", "=", "tf", ".", "bitcast", "(", "inputs", ",", "tf", ".", "int32", ")", "abs_r2", "=", "tf", ".", "bitwise", ".", "bitwise_and", "(", "input_r2", ",", "low_bit_mask_r2", ")", "if_zero_r2", "=", "tf", ".", "equal", "(", "abs_r2", ",", "zeros", ")", "smallest_normal_preserving_sign_r2", "=", "tf", ".", "bitwise", ".", "bitwise_or", "(", "input_r2", ",", "smallest_normal_r2", ")", "input_no_zeros_r2", "=", "tf", ".", "where", "(", "if_zero_r2", ",", "smallest_normal_preserving_sign_r2", ",", "input_r2", ")", "# Discard the low-order bits and replace with iota.", "and_r2", "=", "tf", ".", "bitwise", ".", "bitwise_and", "(", "input_no_zeros_r2", ",", "count_mask_r2", ")", "or_r2", "=", "tf", ".", "bitwise", ".", "bitwise_or", "(", "and_r2", ",", "iota", ")", "return", "tf", ".", "bitcast", "(", "or_r2", ",", "tf", ".", "float32", ")" ]
Replaces the lower bits of each element with iota. The iota is used to derive the index, and also serves the purpose to make each element unique to break ties. Args: inputs: A tensor with rank of 2 and dtype of tf.float32. [batch_size, original_size]. Returns: A tensor after element wise transformation, with dtype the same as inputs. [batch_size, original_size]. Raises: ValueError: If the rank of the input tensor does not equal 2.
[ "Replaces", "the", "lower", "bits", "of", "each", "element", "with", "iota", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L168-L229
22,748
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
_create_topk_unique
def _create_topk_unique(inputs, k): """Creates the top k values in sorted order with indices. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: topk_r2: A tensor, the k largest elements. [batch_size, k]. topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. """ height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) # Select the current largest value k times and keep them in topk_r2. The # selected largest values are marked as the smallest value to avoid being # selected again. tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
python
def _create_topk_unique(inputs, k): """Creates the top k values in sorted order with indices. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: topk_r2: A tensor, the k largest elements. [batch_size, k]. topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. """ height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) # Select the current largest value k times and keep them in topk_r2. The # selected largest values are marked as the smallest value to avoid being # selected again. tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
[ "def", "_create_topk_unique", "(", "inputs", ",", "k", ")", ":", "height", "=", "inputs", ".", "shape", "[", "0", "]", "width", "=", "inputs", ".", "shape", "[", "1", "]", "neg_inf_r0", "=", "tf", ".", "constant", "(", "-", "np", ".", "inf", ",", "dtype", "=", "tf", ".", "float32", ")", "ones", "=", "tf", ".", "ones", "(", "[", "height", ",", "width", "]", ",", "dtype", "=", "tf", ".", "float32", ")", "neg_inf_r2", "=", "ones", "*", "neg_inf_r0", "inputs", "=", "tf", ".", "where", "(", "tf", ".", "is_nan", "(", "inputs", ")", ",", "neg_inf_r2", ",", "inputs", ")", "# Select the current largest value k times and keep them in topk_r2. The", "# selected largest values are marked as the smallest value to avoid being", "# selected again.", "tmp", "=", "inputs", "topk_r2", "=", "tf", ".", "zeros", "(", "[", "height", ",", "k", "]", ",", "dtype", "=", "tf", ".", "float32", ")", "for", "i", "in", "range", "(", "k", ")", ":", "kth_order_statistic", "=", "tf", ".", "reduce_max", "(", "tmp", ",", "axis", "=", "1", ",", "keepdims", "=", "True", ")", "k_mask", "=", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "tf", ".", "equal", "(", "tf", ".", "range", "(", "k", ")", ",", "tf", ".", "fill", "(", "[", "k", "]", ",", "i", ")", ")", ",", "0", ")", ",", "[", "height", ",", "1", "]", ")", "topk_r2", "=", "tf", ".", "where", "(", "k_mask", ",", "tf", ".", "tile", "(", "kth_order_statistic", ",", "[", "1", ",", "k", "]", ")", ",", "topk_r2", ")", "ge_r2", "=", "tf", ".", "greater_equal", "(", "inputs", ",", "tf", ".", "tile", "(", "kth_order_statistic", ",", "[", "1", ",", "width", "]", ")", ")", "tmp", "=", "tf", ".", "where", "(", "ge_r2", ",", "neg_inf_r2", ",", "inputs", ")", "log2_ceiling", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "float", "(", "int", "(", "width", ")", ")", ",", "2", ")", ")", ")", "next_power_of_two", "=", "1", "<<", "log2_ceiling", "count_mask", "=", "next_power_of_two", "-", "1", "mask_r0", "=", "tf", ".", "constant", "(", "count_mask", ")", "mask_r2", "=", "tf", ".", "fill", "(", "[", "height", ",", "k", "]", ",", "mask_r0", ")", "topk_r2_s32", "=", "tf", ".", "bitcast", "(", "topk_r2", ",", "tf", ".", "int32", ")", "topk_indices_r2", "=", "tf", ".", "bitwise", ".", "bitwise_and", "(", "topk_r2_s32", ",", "mask_r2", ")", "return", "topk_r2", ",", "topk_indices_r2" ]
Creates the top k values in sorted order with indices. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: topk_r2: A tensor, the k largest elements. [batch_size, k]. topk_indices_r2: A tensor, indices of the top k values. [batch_size, k].
[ "Creates", "the", "top", "k", "values", "in", "sorted", "order", "with", "indices", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L232-L270
22,749
tensorflow/tensor2tensor
tensor2tensor/utils/beam_search.py
top_k_with_unique
def top_k_with_unique(inputs, k): """Finds the values and indices of the k largests entries. Instead of doing sort like tf.nn.top_k, this function finds the max value k times. The running time is proportional to k, which is be faster when k is small. The current implementation supports only inputs of rank 2. In addition, iota is used to replace the lower bits of each element, this makes the selection more stable when there are equal elements. The overhead is that output values are approximated. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: top_values: A tensor, the k largest elements in sorted order. [batch_size, k]. indices: A tensor, indices of the top_values. [batch_size, k]. """ unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32)) top_values, indices = _create_topk_unique(unique_inputs, k) top_values = tf.cast(top_values, inputs.dtype) return top_values, indices
python
def top_k_with_unique(inputs, k): """Finds the values and indices of the k largests entries. Instead of doing sort like tf.nn.top_k, this function finds the max value k times. The running time is proportional to k, which is be faster when k is small. The current implementation supports only inputs of rank 2. In addition, iota is used to replace the lower bits of each element, this makes the selection more stable when there are equal elements. The overhead is that output values are approximated. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: top_values: A tensor, the k largest elements in sorted order. [batch_size, k]. indices: A tensor, indices of the top_values. [batch_size, k]. """ unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32)) top_values, indices = _create_topk_unique(unique_inputs, k) top_values = tf.cast(top_values, inputs.dtype) return top_values, indices
[ "def", "top_k_with_unique", "(", "inputs", ",", "k", ")", ":", "unique_inputs", "=", "_create_make_unique", "(", "tf", ".", "cast", "(", "inputs", ",", "tf", ".", "float32", ")", ")", "top_values", ",", "indices", "=", "_create_topk_unique", "(", "unique_inputs", ",", "k", ")", "top_values", "=", "tf", ".", "cast", "(", "top_values", ",", "inputs", ".", "dtype", ")", "return", "top_values", ",", "indices" ]
Finds the values and indices of the k largests entries. Instead of doing sort like tf.nn.top_k, this function finds the max value k times. The running time is proportional to k, which is be faster when k is small. The current implementation supports only inputs of rank 2. In addition, iota is used to replace the lower bits of each element, this makes the selection more stable when there are equal elements. The overhead is that output values are approximated. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: top_values: A tensor, the k largest elements in sorted order. [batch_size, k]. indices: A tensor, indices of the top_values. [batch_size, k].
[ "Finds", "the", "values", "and", "indices", "of", "the", "k", "largests", "entries", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L273-L295
22,750
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
video_augmentation
def video_augmentation(features, hue=False, saturate=False, contrast=False): """Augments video with optional hue, saturation and constrast. Args: features: dict, with keys "inputs", "targets". features["inputs"], 4-D Tensor, shape=(THWC) features["targets"], 4-D Tensor, shape=(THWC) hue: bool, apply hue_transform. saturate: bool, apply saturation transform. contrast: bool, apply constrast transform. Returns: augment_features: dict with transformed "inputs" and "targets". """ inputs, targets = features["inputs"], features["targets"] in_steps = common_layers.shape_list(inputs)[0] # makes sure that the same augmentation is applied to both input and targets. # if input is 4-D, then tf.image applies the same transform across the batch. video = tf.concat((inputs, targets), axis=0) if hue: video = tf.image.random_hue(video, max_delta=0.2) if saturate: video = tf.image.random_saturation(video, lower=0.5, upper=1.5) if contrast: video = tf.image.random_contrast(video, lower=0.5, upper=1.5) features["inputs"], features["targets"] = video[:in_steps], video[in_steps:] return features
python
def video_augmentation(features, hue=False, saturate=False, contrast=False): """Augments video with optional hue, saturation and constrast. Args: features: dict, with keys "inputs", "targets". features["inputs"], 4-D Tensor, shape=(THWC) features["targets"], 4-D Tensor, shape=(THWC) hue: bool, apply hue_transform. saturate: bool, apply saturation transform. contrast: bool, apply constrast transform. Returns: augment_features: dict with transformed "inputs" and "targets". """ inputs, targets = features["inputs"], features["targets"] in_steps = common_layers.shape_list(inputs)[0] # makes sure that the same augmentation is applied to both input and targets. # if input is 4-D, then tf.image applies the same transform across the batch. video = tf.concat((inputs, targets), axis=0) if hue: video = tf.image.random_hue(video, max_delta=0.2) if saturate: video = tf.image.random_saturation(video, lower=0.5, upper=1.5) if contrast: video = tf.image.random_contrast(video, lower=0.5, upper=1.5) features["inputs"], features["targets"] = video[:in_steps], video[in_steps:] return features
[ "def", "video_augmentation", "(", "features", ",", "hue", "=", "False", ",", "saturate", "=", "False", ",", "contrast", "=", "False", ")", ":", "inputs", ",", "targets", "=", "features", "[", "\"inputs\"", "]", ",", "features", "[", "\"targets\"", "]", "in_steps", "=", "common_layers", ".", "shape_list", "(", "inputs", ")", "[", "0", "]", "# makes sure that the same augmentation is applied to both input and targets.", "# if input is 4-D, then tf.image applies the same transform across the batch.", "video", "=", "tf", ".", "concat", "(", "(", "inputs", ",", "targets", ")", ",", "axis", "=", "0", ")", "if", "hue", ":", "video", "=", "tf", ".", "image", ".", "random_hue", "(", "video", ",", "max_delta", "=", "0.2", ")", "if", "saturate", ":", "video", "=", "tf", ".", "image", ".", "random_saturation", "(", "video", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "if", "contrast", ":", "video", "=", "tf", ".", "image", ".", "random_contrast", "(", "video", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "features", "[", "\"inputs\"", "]", ",", "features", "[", "\"targets\"", "]", "=", "video", "[", ":", "in_steps", "]", ",", "video", "[", "in_steps", ":", "]", "return", "features" ]
Augments video with optional hue, saturation and constrast. Args: features: dict, with keys "inputs", "targets". features["inputs"], 4-D Tensor, shape=(THWC) features["targets"], 4-D Tensor, shape=(THWC) hue: bool, apply hue_transform. saturate: bool, apply saturation transform. contrast: bool, apply constrast transform. Returns: augment_features: dict with transformed "inputs" and "targets".
[ "Augments", "video", "with", "optional", "hue", "saturation", "and", "constrast", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L52-L78
22,751
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
create_border
def create_border(video, color="blue", border_percent=2): """Creates a border around each frame to differentiate input and target. Args: video: 5-D NumPy array. color: string, "blue", "red" or "green". border_percent: Percentarge of the frame covered by the border. Returns: video: 5-D NumPy array. """ # Do not create border if the video is not in RGB format if video.shape[-1] != 3: return video color_to_axis = {"blue": 2, "red": 0, "green": 1} axis = color_to_axis[color] _, _, height, width, _ = video.shape border_height = np.ceil(border_percent * height / 100.0).astype(np.int) border_width = np.ceil(border_percent * width / 100.0).astype(np.int) video[:, :, :border_height, :, axis] = 255 video[:, :, -border_height:, :, axis] = 255 video[:, :, :, :border_width, axis] = 255 video[:, :, :, -border_width:, axis] = 255 return video
python
def create_border(video, color="blue", border_percent=2): """Creates a border around each frame to differentiate input and target. Args: video: 5-D NumPy array. color: string, "blue", "red" or "green". border_percent: Percentarge of the frame covered by the border. Returns: video: 5-D NumPy array. """ # Do not create border if the video is not in RGB format if video.shape[-1] != 3: return video color_to_axis = {"blue": 2, "red": 0, "green": 1} axis = color_to_axis[color] _, _, height, width, _ = video.shape border_height = np.ceil(border_percent * height / 100.0).astype(np.int) border_width = np.ceil(border_percent * width / 100.0).astype(np.int) video[:, :, :border_height, :, axis] = 255 video[:, :, -border_height:, :, axis] = 255 video[:, :, :, :border_width, axis] = 255 video[:, :, :, -border_width:, axis] = 255 return video
[ "def", "create_border", "(", "video", ",", "color", "=", "\"blue\"", ",", "border_percent", "=", "2", ")", ":", "# Do not create border if the video is not in RGB format", "if", "video", ".", "shape", "[", "-", "1", "]", "!=", "3", ":", "return", "video", "color_to_axis", "=", "{", "\"blue\"", ":", "2", ",", "\"red\"", ":", "0", ",", "\"green\"", ":", "1", "}", "axis", "=", "color_to_axis", "[", "color", "]", "_", ",", "_", ",", "height", ",", "width", ",", "_", "=", "video", ".", "shape", "border_height", "=", "np", ".", "ceil", "(", "border_percent", "*", "height", "/", "100.0", ")", ".", "astype", "(", "np", ".", "int", ")", "border_width", "=", "np", ".", "ceil", "(", "border_percent", "*", "width", "/", "100.0", ")", ".", "astype", "(", "np", ".", "int", ")", "video", "[", ":", ",", ":", ",", ":", "border_height", ",", ":", ",", "axis", "]", "=", "255", "video", "[", ":", ",", ":", ",", "-", "border_height", ":", ",", ":", ",", "axis", "]", "=", "255", "video", "[", ":", ",", ":", ",", ":", ",", ":", "border_width", ",", "axis", "]", "=", "255", "video", "[", ":", ",", ":", ",", ":", ",", "-", "border_width", ":", ",", "axis", "]", "=", "255", "return", "video" ]
Creates a border around each frame to differentiate input and target. Args: video: 5-D NumPy array. color: string, "blue", "red" or "green". border_percent: Percentarge of the frame covered by the border. Returns: video: 5-D NumPy array.
[ "Creates", "a", "border", "around", "each", "frame", "to", "differentiate", "input", "and", "target", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L81-L103
22,752
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
convert_videos_to_summaries
def convert_videos_to_summaries(input_videos, output_videos, target_videos, tag, decode_hparams, display_ground_truth=False): """Converts input, output and target videos into video summaries. Args: input_videos: 5-D NumPy array, (NTHWC) conditioning frames. output_videos: 5-D NumPy array, (NTHWC) model predictions. target_videos: 5-D NumPy array, (NTHWC) target frames. tag: tf summary tag. decode_hparams: HParams. display_ground_truth: Whether or not to display ground truth videos. Returns: summaries: a list of tf frame-by-frame and video summaries. """ fps = decode_hparams.frames_per_second border_percent = decode_hparams.border_percent max_outputs = decode_hparams.max_display_outputs target_steps = target_videos.shape[1] all_summaries = [] input_videos = create_border( input_videos, color="blue", border_percent=border_percent) target_videos = create_border( target_videos, color="red", border_percent=border_percent) output_videos = create_border( output_videos, color="red", border_percent=border_percent) all_input = np.concatenate((input_videos, target_videos), axis=1) all_output = np.concatenate((input_videos, output_videos), axis=1) output_summ_vals, _ = common_video.py_gif_summary( "%s/output" % tag, all_output, max_outputs=max_outputs, fps=fps, return_summary_value=True) all_summaries.extend(output_summ_vals) # Optionally display ground truth. if display_ground_truth: input_summ_vals, _ = common_video.py_gif_summary( "%s/input" % tag, all_input, max_outputs=max_outputs, fps=fps, return_summary_value=True) all_summaries.extend(input_summ_vals) # Frame-by-frame summaries iterable = zip(output_videos[:max_outputs, :target_steps], target_videos[:max_outputs]) for ind, (input_video, output_video) in enumerate(iterable): t, h, w, c = input_video.shape # Tile vertically input_frames = np.reshape(input_video, (t*h, w, c)) output_frames = np.reshape(output_video, (t*h, w, c)) # Concat across width. all_frames = np.concatenate((input_frames, output_frames), axis=1) tag = "input/output/%s_sample_%d" % (tag, ind) frame_by_frame_summ = image_utils.image_to_tf_summary_value( all_frames, tag=tag) all_summaries.append(frame_by_frame_summ) return all_summaries
python
def convert_videos_to_summaries(input_videos, output_videos, target_videos, tag, decode_hparams, display_ground_truth=False): """Converts input, output and target videos into video summaries. Args: input_videos: 5-D NumPy array, (NTHWC) conditioning frames. output_videos: 5-D NumPy array, (NTHWC) model predictions. target_videos: 5-D NumPy array, (NTHWC) target frames. tag: tf summary tag. decode_hparams: HParams. display_ground_truth: Whether or not to display ground truth videos. Returns: summaries: a list of tf frame-by-frame and video summaries. """ fps = decode_hparams.frames_per_second border_percent = decode_hparams.border_percent max_outputs = decode_hparams.max_display_outputs target_steps = target_videos.shape[1] all_summaries = [] input_videos = create_border( input_videos, color="blue", border_percent=border_percent) target_videos = create_border( target_videos, color="red", border_percent=border_percent) output_videos = create_border( output_videos, color="red", border_percent=border_percent) all_input = np.concatenate((input_videos, target_videos), axis=1) all_output = np.concatenate((input_videos, output_videos), axis=1) output_summ_vals, _ = common_video.py_gif_summary( "%s/output" % tag, all_output, max_outputs=max_outputs, fps=fps, return_summary_value=True) all_summaries.extend(output_summ_vals) # Optionally display ground truth. if display_ground_truth: input_summ_vals, _ = common_video.py_gif_summary( "%s/input" % tag, all_input, max_outputs=max_outputs, fps=fps, return_summary_value=True) all_summaries.extend(input_summ_vals) # Frame-by-frame summaries iterable = zip(output_videos[:max_outputs, :target_steps], target_videos[:max_outputs]) for ind, (input_video, output_video) in enumerate(iterable): t, h, w, c = input_video.shape # Tile vertically input_frames = np.reshape(input_video, (t*h, w, c)) output_frames = np.reshape(output_video, (t*h, w, c)) # Concat across width. all_frames = np.concatenate((input_frames, output_frames), axis=1) tag = "input/output/%s_sample_%d" % (tag, ind) frame_by_frame_summ = image_utils.image_to_tf_summary_value( all_frames, tag=tag) all_summaries.append(frame_by_frame_summ) return all_summaries
[ "def", "convert_videos_to_summaries", "(", "input_videos", ",", "output_videos", ",", "target_videos", ",", "tag", ",", "decode_hparams", ",", "display_ground_truth", "=", "False", ")", ":", "fps", "=", "decode_hparams", ".", "frames_per_second", "border_percent", "=", "decode_hparams", ".", "border_percent", "max_outputs", "=", "decode_hparams", ".", "max_display_outputs", "target_steps", "=", "target_videos", ".", "shape", "[", "1", "]", "all_summaries", "=", "[", "]", "input_videos", "=", "create_border", "(", "input_videos", ",", "color", "=", "\"blue\"", ",", "border_percent", "=", "border_percent", ")", "target_videos", "=", "create_border", "(", "target_videos", ",", "color", "=", "\"red\"", ",", "border_percent", "=", "border_percent", ")", "output_videos", "=", "create_border", "(", "output_videos", ",", "color", "=", "\"red\"", ",", "border_percent", "=", "border_percent", ")", "all_input", "=", "np", ".", "concatenate", "(", "(", "input_videos", ",", "target_videos", ")", ",", "axis", "=", "1", ")", "all_output", "=", "np", ".", "concatenate", "(", "(", "input_videos", ",", "output_videos", ")", ",", "axis", "=", "1", ")", "output_summ_vals", ",", "_", "=", "common_video", ".", "py_gif_summary", "(", "\"%s/output\"", "%", "tag", ",", "all_output", ",", "max_outputs", "=", "max_outputs", ",", "fps", "=", "fps", ",", "return_summary_value", "=", "True", ")", "all_summaries", ".", "extend", "(", "output_summ_vals", ")", "# Optionally display ground truth.", "if", "display_ground_truth", ":", "input_summ_vals", ",", "_", "=", "common_video", ".", "py_gif_summary", "(", "\"%s/input\"", "%", "tag", ",", "all_input", ",", "max_outputs", "=", "max_outputs", ",", "fps", "=", "fps", ",", "return_summary_value", "=", "True", ")", "all_summaries", ".", "extend", "(", "input_summ_vals", ")", "# Frame-by-frame summaries", "iterable", "=", "zip", "(", "output_videos", "[", ":", "max_outputs", ",", ":", "target_steps", "]", ",", "target_videos", "[", ":", "max_outputs", "]", ")", "for", "ind", ",", "(", "input_video", ",", "output_video", ")", "in", "enumerate", "(", "iterable", ")", ":", "t", ",", "h", ",", "w", ",", "c", "=", "input_video", ".", "shape", "# Tile vertically", "input_frames", "=", "np", ".", "reshape", "(", "input_video", ",", "(", "t", "*", "h", ",", "w", ",", "c", ")", ")", "output_frames", "=", "np", ".", "reshape", "(", "output_video", ",", "(", "t", "*", "h", ",", "w", ",", "c", ")", ")", "# Concat across width.", "all_frames", "=", "np", ".", "concatenate", "(", "(", "input_frames", ",", "output_frames", ")", ",", "axis", "=", "1", ")", "tag", "=", "\"input/output/%s_sample_%d\"", "%", "(", "tag", ",", "ind", ")", "frame_by_frame_summ", "=", "image_utils", ".", "image_to_tf_summary_value", "(", "all_frames", ",", "tag", "=", "tag", ")", "all_summaries", ".", "append", "(", "frame_by_frame_summ", ")", "return", "all_summaries" ]
Converts input, output and target videos into video summaries. Args: input_videos: 5-D NumPy array, (NTHWC) conditioning frames. output_videos: 5-D NumPy array, (NTHWC) model predictions. target_videos: 5-D NumPy array, (NTHWC) target frames. tag: tf summary tag. decode_hparams: HParams. display_ground_truth: Whether or not to display ground truth videos. Returns: summaries: a list of tf frame-by-frame and video summaries.
[ "Converts", "input", "output", "and", "target", "videos", "into", "video", "summaries", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L106-L162
22,753
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
display_video_hooks
def display_video_hooks(hook_args): """Hooks to display videos at decode time.""" predictions = hook_args.predictions max_outputs = hook_args.decode_hparams.max_display_outputs max_decodes = hook_args.decode_hparams.max_display_decodes with tf.Graph().as_default(): _, best_decodes = video_metrics.compute_video_metrics_from_predictions( predictions, decode_hparams=hook_args.decode_hparams) all_summaries = [] # Displays decodes corresponding to the best/worst metric, for metric, metric_decode_inds in best_decodes.items(): curr_metric_inds = metric_decode_inds[:max_outputs] best_inputs, best_outputs, best_targets = [], [], [] for sample_ind, decode_ind in enumerate(curr_metric_inds): curr_decode = predictions[decode_ind][sample_ind] best_inputs.append(curr_decode["inputs"]) best_outputs.append(curr_decode["outputs"]) best_targets.append(curr_decode["targets"]) best_inputs = np.array(best_inputs, dtype=np.uint8) best_outputs = np.array(best_outputs, dtype=np.uint8) best_targets = np.array(best_targets, dtype=np.uint8) summaries = convert_videos_to_summaries( best_inputs, best_outputs, best_targets, tag=metric, decode_hparams=hook_args.decode_hparams) all_summaries.extend(summaries) # Display random decodes for ten conditioning frames. for decode_ind, decode in enumerate(predictions[: max_decodes]): target_videos = video_metrics.stack_data_given_key(decode, "targets") output_videos = video_metrics.stack_data_given_key(decode, "outputs") input_videos = video_metrics.stack_data_given_key(decode, "inputs") target_videos = np.asarray(target_videos, dtype=np.uint8) output_videos = np.asarray(output_videos, dtype=np.uint8) input_videos = np.asarray(input_videos, dtype=np.uint8) summaries = convert_videos_to_summaries( input_videos, output_videos, target_videos, tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams, display_ground_truth=decode_ind == 0) all_summaries.extend(summaries) return all_summaries
python
def display_video_hooks(hook_args): """Hooks to display videos at decode time.""" predictions = hook_args.predictions max_outputs = hook_args.decode_hparams.max_display_outputs max_decodes = hook_args.decode_hparams.max_display_decodes with tf.Graph().as_default(): _, best_decodes = video_metrics.compute_video_metrics_from_predictions( predictions, decode_hparams=hook_args.decode_hparams) all_summaries = [] # Displays decodes corresponding to the best/worst metric, for metric, metric_decode_inds in best_decodes.items(): curr_metric_inds = metric_decode_inds[:max_outputs] best_inputs, best_outputs, best_targets = [], [], [] for sample_ind, decode_ind in enumerate(curr_metric_inds): curr_decode = predictions[decode_ind][sample_ind] best_inputs.append(curr_decode["inputs"]) best_outputs.append(curr_decode["outputs"]) best_targets.append(curr_decode["targets"]) best_inputs = np.array(best_inputs, dtype=np.uint8) best_outputs = np.array(best_outputs, dtype=np.uint8) best_targets = np.array(best_targets, dtype=np.uint8) summaries = convert_videos_to_summaries( best_inputs, best_outputs, best_targets, tag=metric, decode_hparams=hook_args.decode_hparams) all_summaries.extend(summaries) # Display random decodes for ten conditioning frames. for decode_ind, decode in enumerate(predictions[: max_decodes]): target_videos = video_metrics.stack_data_given_key(decode, "targets") output_videos = video_metrics.stack_data_given_key(decode, "outputs") input_videos = video_metrics.stack_data_given_key(decode, "inputs") target_videos = np.asarray(target_videos, dtype=np.uint8) output_videos = np.asarray(output_videos, dtype=np.uint8) input_videos = np.asarray(input_videos, dtype=np.uint8) summaries = convert_videos_to_summaries( input_videos, output_videos, target_videos, tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams, display_ground_truth=decode_ind == 0) all_summaries.extend(summaries) return all_summaries
[ "def", "display_video_hooks", "(", "hook_args", ")", ":", "predictions", "=", "hook_args", ".", "predictions", "max_outputs", "=", "hook_args", ".", "decode_hparams", ".", "max_display_outputs", "max_decodes", "=", "hook_args", ".", "decode_hparams", ".", "max_display_decodes", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "_", ",", "best_decodes", "=", "video_metrics", ".", "compute_video_metrics_from_predictions", "(", "predictions", ",", "decode_hparams", "=", "hook_args", ".", "decode_hparams", ")", "all_summaries", "=", "[", "]", "# Displays decodes corresponding to the best/worst metric,", "for", "metric", ",", "metric_decode_inds", "in", "best_decodes", ".", "items", "(", ")", ":", "curr_metric_inds", "=", "metric_decode_inds", "[", ":", "max_outputs", "]", "best_inputs", ",", "best_outputs", ",", "best_targets", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "sample_ind", ",", "decode_ind", "in", "enumerate", "(", "curr_metric_inds", ")", ":", "curr_decode", "=", "predictions", "[", "decode_ind", "]", "[", "sample_ind", "]", "best_inputs", ".", "append", "(", "curr_decode", "[", "\"inputs\"", "]", ")", "best_outputs", ".", "append", "(", "curr_decode", "[", "\"outputs\"", "]", ")", "best_targets", ".", "append", "(", "curr_decode", "[", "\"targets\"", "]", ")", "best_inputs", "=", "np", ".", "array", "(", "best_inputs", ",", "dtype", "=", "np", ".", "uint8", ")", "best_outputs", "=", "np", ".", "array", "(", "best_outputs", ",", "dtype", "=", "np", ".", "uint8", ")", "best_targets", "=", "np", ".", "array", "(", "best_targets", ",", "dtype", "=", "np", ".", "uint8", ")", "summaries", "=", "convert_videos_to_summaries", "(", "best_inputs", ",", "best_outputs", ",", "best_targets", ",", "tag", "=", "metric", ",", "decode_hparams", "=", "hook_args", ".", "decode_hparams", ")", "all_summaries", ".", "extend", "(", "summaries", ")", "# Display random decodes for ten conditioning frames.", "for", "decode_ind", ",", "decode", "in", "enumerate", "(", "predictions", "[", ":", "max_decodes", "]", ")", ":", "target_videos", "=", "video_metrics", ".", "stack_data_given_key", "(", "decode", ",", "\"targets\"", ")", "output_videos", "=", "video_metrics", ".", "stack_data_given_key", "(", "decode", ",", "\"outputs\"", ")", "input_videos", "=", "video_metrics", ".", "stack_data_given_key", "(", "decode", ",", "\"inputs\"", ")", "target_videos", "=", "np", ".", "asarray", "(", "target_videos", ",", "dtype", "=", "np", ".", "uint8", ")", "output_videos", "=", "np", ".", "asarray", "(", "output_videos", ",", "dtype", "=", "np", ".", "uint8", ")", "input_videos", "=", "np", ".", "asarray", "(", "input_videos", ",", "dtype", "=", "np", ".", "uint8", ")", "summaries", "=", "convert_videos_to_summaries", "(", "input_videos", ",", "output_videos", ",", "target_videos", ",", "tag", "=", "\"decode_%d\"", "%", "decode_ind", ",", "decode_hparams", "=", "hook_args", ".", "decode_hparams", ",", "display_ground_truth", "=", "decode_ind", "==", "0", ")", "all_summaries", ".", "extend", "(", "summaries", ")", "return", "all_summaries" ]
Hooks to display videos at decode time.
[ "Hooks", "to", "display", "videos", "at", "decode", "time", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L165-L206
22,754
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
summarize_video_metrics
def summarize_video_metrics(hook_args): """Computes video metrics summaries using the decoder output.""" problem_name = hook_args.problem.name current_problem = hook_args.problem hparams = hook_args.hparams output_dirs = hook_args.output_dirs predictions = hook_args.predictions frame_shape = [ current_problem.frame_height, current_problem.frame_width, current_problem.num_channels ] metrics_graph = tf.Graph() with metrics_graph.as_default(): if predictions: metrics_results, _ = video_metrics.compute_video_metrics_from_predictions( predictions, decode_hparams=hook_args.decode_hparams) else: metrics_results, _ = video_metrics.compute_video_metrics_from_png_files( output_dirs, problem_name, hparams.video_num_target_frames, frame_shape) summary_values = [] for name, array in six.iteritems(metrics_results): for ind, val in enumerate(array): tag = "metric_{}/{}".format(name, ind) summary_values.append(tf.Summary.Value(tag=tag, simple_value=val)) return summary_values
python
def summarize_video_metrics(hook_args): """Computes video metrics summaries using the decoder output.""" problem_name = hook_args.problem.name current_problem = hook_args.problem hparams = hook_args.hparams output_dirs = hook_args.output_dirs predictions = hook_args.predictions frame_shape = [ current_problem.frame_height, current_problem.frame_width, current_problem.num_channels ] metrics_graph = tf.Graph() with metrics_graph.as_default(): if predictions: metrics_results, _ = video_metrics.compute_video_metrics_from_predictions( predictions, decode_hparams=hook_args.decode_hparams) else: metrics_results, _ = video_metrics.compute_video_metrics_from_png_files( output_dirs, problem_name, hparams.video_num_target_frames, frame_shape) summary_values = [] for name, array in six.iteritems(metrics_results): for ind, val in enumerate(array): tag = "metric_{}/{}".format(name, ind) summary_values.append(tf.Summary.Value(tag=tag, simple_value=val)) return summary_values
[ "def", "summarize_video_metrics", "(", "hook_args", ")", ":", "problem_name", "=", "hook_args", ".", "problem", ".", "name", "current_problem", "=", "hook_args", ".", "problem", "hparams", "=", "hook_args", ".", "hparams", "output_dirs", "=", "hook_args", ".", "output_dirs", "predictions", "=", "hook_args", ".", "predictions", "frame_shape", "=", "[", "current_problem", ".", "frame_height", ",", "current_problem", ".", "frame_width", ",", "current_problem", ".", "num_channels", "]", "metrics_graph", "=", "tf", ".", "Graph", "(", ")", "with", "metrics_graph", ".", "as_default", "(", ")", ":", "if", "predictions", ":", "metrics_results", ",", "_", "=", "video_metrics", ".", "compute_video_metrics_from_predictions", "(", "predictions", ",", "decode_hparams", "=", "hook_args", ".", "decode_hparams", ")", "else", ":", "metrics_results", ",", "_", "=", "video_metrics", ".", "compute_video_metrics_from_png_files", "(", "output_dirs", ",", "problem_name", ",", "hparams", ".", "video_num_target_frames", ",", "frame_shape", ")", "summary_values", "=", "[", "]", "for", "name", ",", "array", "in", "six", ".", "iteritems", "(", "metrics_results", ")", ":", "for", "ind", ",", "val", "in", "enumerate", "(", "array", ")", ":", "tag", "=", "\"metric_{}/{}\"", ".", "format", "(", "name", ",", "ind", ")", "summary_values", ".", "append", "(", "tf", ".", "Summary", ".", "Value", "(", "tag", "=", "tag", ",", "simple_value", "=", "val", ")", ")", "return", "summary_values" ]
Computes video metrics summaries using the decoder output.
[ "Computes", "video", "metrics", "summaries", "using", "the", "decoder", "output", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L209-L235
22,755
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
debug_video_writer_factory
def debug_video_writer_factory(output_dir): """Creates a VideoWriter for debug videos.""" if FLAGS.disable_ffmpeg: return common_video.IndividualFrameWriter(output_dir) else: output_path = os.path.join(output_dir, "video.avi") return common_video.WholeVideoWriter( fps=10, output_path=output_path, file_format="avi" )
python
def debug_video_writer_factory(output_dir): """Creates a VideoWriter for debug videos.""" if FLAGS.disable_ffmpeg: return common_video.IndividualFrameWriter(output_dir) else: output_path = os.path.join(output_dir, "video.avi") return common_video.WholeVideoWriter( fps=10, output_path=output_path, file_format="avi" )
[ "def", "debug_video_writer_factory", "(", "output_dir", ")", ":", "if", "FLAGS", ".", "disable_ffmpeg", ":", "return", "common_video", ".", "IndividualFrameWriter", "(", "output_dir", ")", "else", ":", "output_path", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"video.avi\"", ")", "return", "common_video", ".", "WholeVideoWriter", "(", "fps", "=", "10", ",", "output_path", "=", "output_path", ",", "file_format", "=", "\"avi\"", ")" ]
Creates a VideoWriter for debug videos.
[ "Creates", "a", "VideoWriter", "for", "debug", "videos", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L238-L246
22,756
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
VideoProblem.generate_encoded_samples
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): """Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required. """ writer = None with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for features in self.generate_samples(data_dir, tmp_dir, dataset_split): unencoded_frame = features.pop("frame") self.validate_frame(unencoded_frame) height, width, _ = unencoded_frame.shape encoded_frame = sess.run( encoded_image_t, feed_dict={image_t: unencoded_frame}) features["image/encoded"] = [encoded_frame] features["image/format"] = ["png"] features["image/height"] = [height] features["image/width"] = [width] has_debug_image = "image/debug" in features if has_debug_image: unencoded_debug = features.pop("image/debug") encoded_debug = sess.run( encoded_image_t, feed_dict={image_t: unencoded_debug}) features["image/encoded_debug"] = [encoded_debug] if self.debug_dump_frames_path: # Defer creating debug writer until we know debug_dump_frames_path. if writer is None: if not tf.gfile.Exists(self.debug_dump_frames_path): tf.gfile.MkDir(self.debug_dump_frames_path) writer = debug_video_writer_factory(self.debug_dump_frames_path) img = unencoded_debug if has_debug_image else unencoded_frame encoded_img = encoded_debug if has_debug_image else encoded_frame writer.write(img, encoded_img) yield features if self.debug_dump_frames_path: writer.finish_to_disk()
python
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): """Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required. """ writer = None with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for features in self.generate_samples(data_dir, tmp_dir, dataset_split): unencoded_frame = features.pop("frame") self.validate_frame(unencoded_frame) height, width, _ = unencoded_frame.shape encoded_frame = sess.run( encoded_image_t, feed_dict={image_t: unencoded_frame}) features["image/encoded"] = [encoded_frame] features["image/format"] = ["png"] features["image/height"] = [height] features["image/width"] = [width] has_debug_image = "image/debug" in features if has_debug_image: unencoded_debug = features.pop("image/debug") encoded_debug = sess.run( encoded_image_t, feed_dict={image_t: unencoded_debug}) features["image/encoded_debug"] = [encoded_debug] if self.debug_dump_frames_path: # Defer creating debug writer until we know debug_dump_frames_path. if writer is None: if not tf.gfile.Exists(self.debug_dump_frames_path): tf.gfile.MkDir(self.debug_dump_frames_path) writer = debug_video_writer_factory(self.debug_dump_frames_path) img = unencoded_debug if has_debug_image else unencoded_frame encoded_img = encoded_debug if has_debug_image else encoded_frame writer.write(img, encoded_img) yield features if self.debug_dump_frames_path: writer.finish_to_disk()
[ "def", "generate_encoded_samples", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "dataset_split", ")", ":", "writer", "=", "None", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "image_t", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "uint8", ",", "shape", "=", "(", "None", ",", "None", ",", "None", ")", ")", "encoded_image_t", "=", "tf", ".", "image", ".", "encode_png", "(", "image_t", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "for", "features", "in", "self", ".", "generate_samples", "(", "data_dir", ",", "tmp_dir", ",", "dataset_split", ")", ":", "unencoded_frame", "=", "features", ".", "pop", "(", "\"frame\"", ")", "self", ".", "validate_frame", "(", "unencoded_frame", ")", "height", ",", "width", ",", "_", "=", "unencoded_frame", ".", "shape", "encoded_frame", "=", "sess", ".", "run", "(", "encoded_image_t", ",", "feed_dict", "=", "{", "image_t", ":", "unencoded_frame", "}", ")", "features", "[", "\"image/encoded\"", "]", "=", "[", "encoded_frame", "]", "features", "[", "\"image/format\"", "]", "=", "[", "\"png\"", "]", "features", "[", "\"image/height\"", "]", "=", "[", "height", "]", "features", "[", "\"image/width\"", "]", "=", "[", "width", "]", "has_debug_image", "=", "\"image/debug\"", "in", "features", "if", "has_debug_image", ":", "unencoded_debug", "=", "features", ".", "pop", "(", "\"image/debug\"", ")", "encoded_debug", "=", "sess", ".", "run", "(", "encoded_image_t", ",", "feed_dict", "=", "{", "image_t", ":", "unencoded_debug", "}", ")", "features", "[", "\"image/encoded_debug\"", "]", "=", "[", "encoded_debug", "]", "if", "self", ".", "debug_dump_frames_path", ":", "# Defer creating debug writer until we know debug_dump_frames_path.", "if", "writer", "is", "None", ":", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "self", ".", "debug_dump_frames_path", ")", ":", "tf", ".", "gfile", ".", "MkDir", "(", "self", ".", "debug_dump_frames_path", ")", "writer", "=", "debug_video_writer_factory", "(", "self", ".", "debug_dump_frames_path", ")", "img", "=", "unencoded_debug", "if", "has_debug_image", "else", "unencoded_frame", "encoded_img", "=", "encoded_debug", "if", "has_debug_image", "else", "encoded_frame", "writer", ".", "write", "(", "img", ",", "encoded_img", ")", "yield", "features", "if", "self", ".", "debug_dump_frames_path", ":", "writer", ".", "finish_to_disk", "(", ")" ]
Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required.
[ "Generate", "samples", "of", "the", "encoded", "frames", "with", "possible", "extra", "data", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L573-L630
22,757
tensorflow/tensor2tensor
tensor2tensor/data_generators/video_utils.py
VideoProblem.generate_data
def generate_data(self, data_dir, tmp_dir, task_id=-1): """The function generating the data.""" filepath_fns = { problem.DatasetSplit.TRAIN: self.training_filepaths, problem.DatasetSplit.EVAL: self.dev_filepaths, problem.DatasetSplit.TEST: self.test_filepaths, } # We set shuffled=True as we don't want to shuffle on disk later. split_paths = [(split["split"], filepath_fns[split["split"]]( data_dir, split["shards"], shuffled=True)) for split in self.dataset_splits] all_paths = [] for _, paths in split_paths: all_paths.extend(paths) if self.is_generate_per_split: for split, paths in split_paths: generator_utils.generate_files( self.generate_encoded_samples(data_dir, tmp_dir, split), paths, cycle_every_n=self.total_number_of_frames // len(paths)) else: generator_utils.generate_files( self.generate_encoded_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN), all_paths, cycle_every_n=self.total_number_of_frames // len(all_paths))
python
def generate_data(self, data_dir, tmp_dir, task_id=-1): """The function generating the data.""" filepath_fns = { problem.DatasetSplit.TRAIN: self.training_filepaths, problem.DatasetSplit.EVAL: self.dev_filepaths, problem.DatasetSplit.TEST: self.test_filepaths, } # We set shuffled=True as we don't want to shuffle on disk later. split_paths = [(split["split"], filepath_fns[split["split"]]( data_dir, split["shards"], shuffled=True)) for split in self.dataset_splits] all_paths = [] for _, paths in split_paths: all_paths.extend(paths) if self.is_generate_per_split: for split, paths in split_paths: generator_utils.generate_files( self.generate_encoded_samples(data_dir, tmp_dir, split), paths, cycle_every_n=self.total_number_of_frames // len(paths)) else: generator_utils.generate_files( self.generate_encoded_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN), all_paths, cycle_every_n=self.total_number_of_frames // len(all_paths))
[ "def", "generate_data", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "task_id", "=", "-", "1", ")", ":", "filepath_fns", "=", "{", "problem", ".", "DatasetSplit", ".", "TRAIN", ":", "self", ".", "training_filepaths", ",", "problem", ".", "DatasetSplit", ".", "EVAL", ":", "self", ".", "dev_filepaths", ",", "problem", ".", "DatasetSplit", ".", "TEST", ":", "self", ".", "test_filepaths", ",", "}", "# We set shuffled=True as we don't want to shuffle on disk later.", "split_paths", "=", "[", "(", "split", "[", "\"split\"", "]", ",", "filepath_fns", "[", "split", "[", "\"split\"", "]", "]", "(", "data_dir", ",", "split", "[", "\"shards\"", "]", ",", "shuffled", "=", "True", ")", ")", "for", "split", "in", "self", ".", "dataset_splits", "]", "all_paths", "=", "[", "]", "for", "_", ",", "paths", "in", "split_paths", ":", "all_paths", ".", "extend", "(", "paths", ")", "if", "self", ".", "is_generate_per_split", ":", "for", "split", ",", "paths", "in", "split_paths", ":", "generator_utils", ".", "generate_files", "(", "self", ".", "generate_encoded_samples", "(", "data_dir", ",", "tmp_dir", ",", "split", ")", ",", "paths", ",", "cycle_every_n", "=", "self", ".", "total_number_of_frames", "//", "len", "(", "paths", ")", ")", "else", ":", "generator_utils", ".", "generate_files", "(", "self", ".", "generate_encoded_samples", "(", "data_dir", ",", "tmp_dir", ",", "problem", ".", "DatasetSplit", ".", "TRAIN", ")", ",", "all_paths", ",", "cycle_every_n", "=", "self", ".", "total_number_of_frames", "//", "len", "(", "all_paths", ")", ")" ]
The function generating the data.
[ "The", "function", "generating", "the", "data", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L632-L659
22,758
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
_add_variable_proxy_methods
def _add_variable_proxy_methods(var, proxy_tensor): """Proxy methods of underlying variable. This enables our custom getters to still work with, e.g., batch norm. Args: var: Variable to proxy proxy_tensor: Tensor that is identity of var """ proxy_tensor.read_value = lambda: tf.identity(proxy_tensor) proxy_tensor.assign_sub = var.assign_sub proxy_tensor.assign = var.assign proxy_tensor.initialized_value = var.initialized_value
python
def _add_variable_proxy_methods(var, proxy_tensor): """Proxy methods of underlying variable. This enables our custom getters to still work with, e.g., batch norm. Args: var: Variable to proxy proxy_tensor: Tensor that is identity of var """ proxy_tensor.read_value = lambda: tf.identity(proxy_tensor) proxy_tensor.assign_sub = var.assign_sub proxy_tensor.assign = var.assign proxy_tensor.initialized_value = var.initialized_value
[ "def", "_add_variable_proxy_methods", "(", "var", ",", "proxy_tensor", ")", ":", "proxy_tensor", ".", "read_value", "=", "lambda", ":", "tf", ".", "identity", "(", "proxy_tensor", ")", "proxy_tensor", ".", "assign_sub", "=", "var", ".", "assign_sub", "proxy_tensor", ".", "assign", "=", "var", ".", "assign", "proxy_tensor", ".", "initialized_value", "=", "var", ".", "initialized_value" ]
Proxy methods of underlying variable. This enables our custom getters to still work with, e.g., batch norm. Args: var: Variable to proxy proxy_tensor: Tensor that is identity of var
[ "Proxy", "methods", "of", "underlying", "variable", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L75-L87
22,759
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
_rowwise_unsorted_segment_sum
def _rowwise_unsorted_segment_sum(values, indices, n): """UnsortedSegmentSum on each row. Args: values: a `Tensor` with shape `[batch_size, k]`. indices: an integer `Tensor` with shape `[batch_size, k]`. n: an integer. Returns: A `Tensor` with the same type as `values` and shape `[batch_size, n]`. """ batch, k = tf.unstack(tf.shape(indices), num=2) indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n ret_flat = tf.unsorted_segment_sum( tf.reshape(values, [-1]), indices_flat, batch * n) return tf.reshape(ret_flat, [batch, n])
python
def _rowwise_unsorted_segment_sum(values, indices, n): """UnsortedSegmentSum on each row. Args: values: a `Tensor` with shape `[batch_size, k]`. indices: an integer `Tensor` with shape `[batch_size, k]`. n: an integer. Returns: A `Tensor` with the same type as `values` and shape `[batch_size, n]`. """ batch, k = tf.unstack(tf.shape(indices), num=2) indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n ret_flat = tf.unsorted_segment_sum( tf.reshape(values, [-1]), indices_flat, batch * n) return tf.reshape(ret_flat, [batch, n])
[ "def", "_rowwise_unsorted_segment_sum", "(", "values", ",", "indices", ",", "n", ")", ":", "batch", ",", "k", "=", "tf", ".", "unstack", "(", "tf", ".", "shape", "(", "indices", ")", ",", "num", "=", "2", ")", "indices_flat", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "-", "1", "]", ")", "+", "tf", ".", "div", "(", "tf", ".", "range", "(", "batch", "*", "k", ")", ",", "k", ")", "*", "n", "ret_flat", "=", "tf", ".", "unsorted_segment_sum", "(", "tf", ".", "reshape", "(", "values", ",", "[", "-", "1", "]", ")", ",", "indices_flat", ",", "batch", "*", "n", ")", "return", "tf", ".", "reshape", "(", "ret_flat", ",", "[", "batch", ",", "n", "]", ")" ]
UnsortedSegmentSum on each row. Args: values: a `Tensor` with shape `[batch_size, k]`. indices: an integer `Tensor` with shape `[batch_size, k]`. n: an integer. Returns: A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
[ "UnsortedSegmentSum", "on", "each", "row", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L267-L281
22,760
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
_prob_in_top_k
def _prob_in_top_k( clean_values, noisy_values, noise_stddev, noisy_top_values, k): """Helper function to NoisyTopKGating. Computes the probability that value is in top k, given different random noise. This gives us a way of backpropagating from a loss that balances the number of times each expert is in the top k experts per example. In the case of no noise, pass in None for noise_stddev, and the result will not be differentiable. Args: clean_values: a `Tensor` of shape [batch, n]. noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus normally distributed noise with standard deviation noise_stddev. noise_stddev: a `Tensor` of shape [batch, n], or None noisy_top_values: a `Tensor` of shape [batch, m]. "values" Output of tf.top_k(noisy_top_values, m). m >= k+1 k: an integer. Returns: a `Tensor` of shape [batch, n]. """ batch = tf.shape(clean_values)[0] m = tf.shape(noisy_top_values)[1] top_values_flat = tf.reshape(noisy_top_values, [-1]) # we want to compute the threshold that a particular value would have to # exceed in order to make the top k. This computation differs depending # on whether the value is already in the top k. threshold_positions_if_in = tf.range(batch) * m + k threshold_if_in = tf.expand_dims( tf.gather(top_values_flat, threshold_positions_if_in), 1) is_in = tf.greater(noisy_values, threshold_if_in) if noise_stddev is None: return tf.to_float(is_in) threshold_positions_if_out = threshold_positions_if_in - 1 threshold_if_out = tf.expand_dims( tf.gather(top_values_flat, threshold_positions_if_out), 1) # is each value currently in the top k. prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in, noise_stddev) prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out, noise_stddev) prob = tf.where(is_in, prob_if_in, prob_if_out) return prob
python
def _prob_in_top_k( clean_values, noisy_values, noise_stddev, noisy_top_values, k): """Helper function to NoisyTopKGating. Computes the probability that value is in top k, given different random noise. This gives us a way of backpropagating from a loss that balances the number of times each expert is in the top k experts per example. In the case of no noise, pass in None for noise_stddev, and the result will not be differentiable. Args: clean_values: a `Tensor` of shape [batch, n]. noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus normally distributed noise with standard deviation noise_stddev. noise_stddev: a `Tensor` of shape [batch, n], or None noisy_top_values: a `Tensor` of shape [batch, m]. "values" Output of tf.top_k(noisy_top_values, m). m >= k+1 k: an integer. Returns: a `Tensor` of shape [batch, n]. """ batch = tf.shape(clean_values)[0] m = tf.shape(noisy_top_values)[1] top_values_flat = tf.reshape(noisy_top_values, [-1]) # we want to compute the threshold that a particular value would have to # exceed in order to make the top k. This computation differs depending # on whether the value is already in the top k. threshold_positions_if_in = tf.range(batch) * m + k threshold_if_in = tf.expand_dims( tf.gather(top_values_flat, threshold_positions_if_in), 1) is_in = tf.greater(noisy_values, threshold_if_in) if noise_stddev is None: return tf.to_float(is_in) threshold_positions_if_out = threshold_positions_if_in - 1 threshold_if_out = tf.expand_dims( tf.gather(top_values_flat, threshold_positions_if_out), 1) # is each value currently in the top k. prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in, noise_stddev) prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out, noise_stddev) prob = tf.where(is_in, prob_if_in, prob_if_out) return prob
[ "def", "_prob_in_top_k", "(", "clean_values", ",", "noisy_values", ",", "noise_stddev", ",", "noisy_top_values", ",", "k", ")", ":", "batch", "=", "tf", ".", "shape", "(", "clean_values", ")", "[", "0", "]", "m", "=", "tf", ".", "shape", "(", "noisy_top_values", ")", "[", "1", "]", "top_values_flat", "=", "tf", ".", "reshape", "(", "noisy_top_values", ",", "[", "-", "1", "]", ")", "# we want to compute the threshold that a particular value would have to", "# exceed in order to make the top k. This computation differs depending", "# on whether the value is already in the top k.", "threshold_positions_if_in", "=", "tf", ".", "range", "(", "batch", ")", "*", "m", "+", "k", "threshold_if_in", "=", "tf", ".", "expand_dims", "(", "tf", ".", "gather", "(", "top_values_flat", ",", "threshold_positions_if_in", ")", ",", "1", ")", "is_in", "=", "tf", ".", "greater", "(", "noisy_values", ",", "threshold_if_in", ")", "if", "noise_stddev", "is", "None", ":", "return", "tf", ".", "to_float", "(", "is_in", ")", "threshold_positions_if_out", "=", "threshold_positions_if_in", "-", "1", "threshold_if_out", "=", "tf", ".", "expand_dims", "(", "tf", ".", "gather", "(", "top_values_flat", ",", "threshold_positions_if_out", ")", ",", "1", ")", "# is each value currently in the top k.", "prob_if_in", "=", "_normal_distribution_cdf", "(", "clean_values", "-", "threshold_if_in", ",", "noise_stddev", ")", "prob_if_out", "=", "_normal_distribution_cdf", "(", "clean_values", "-", "threshold_if_out", ",", "noise_stddev", ")", "prob", "=", "tf", ".", "where", "(", "is_in", ",", "prob_if_in", ",", "prob_if_out", ")", "return", "prob" ]
Helper function to NoisyTopKGating. Computes the probability that value is in top k, given different random noise. This gives us a way of backpropagating from a loss that balances the number of times each expert is in the top k experts per example. In the case of no noise, pass in None for noise_stddev, and the result will not be differentiable. Args: clean_values: a `Tensor` of shape [batch, n]. noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus normally distributed noise with standard deviation noise_stddev. noise_stddev: a `Tensor` of shape [batch, n], or None noisy_top_values: a `Tensor` of shape [batch, m]. "values" Output of tf.top_k(noisy_top_values, m). m >= k+1 k: an integer. Returns: a `Tensor` of shape [batch, n].
[ "Helper", "function", "to", "NoisyTopKGating", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L303-L348
22,761
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
cv_squared
def cv_squared(x): """The squared coefficient of variation of a sample. Useful as a loss to encourage a positive distribution to be more uniform. Epsilons added for numerical stability. Returns 0 for an empty Tensor. Args: x: a `Tensor`. Returns: a `Scalar`. """ epsilon = 1e-10 float_size = tf.to_float(tf.size(x)) + epsilon mean = tf.reduce_sum(x) / float_size variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size return variance / (tf.square(mean) + epsilon)
python
def cv_squared(x): """The squared coefficient of variation of a sample. Useful as a loss to encourage a positive distribution to be more uniform. Epsilons added for numerical stability. Returns 0 for an empty Tensor. Args: x: a `Tensor`. Returns: a `Scalar`. """ epsilon = 1e-10 float_size = tf.to_float(tf.size(x)) + epsilon mean = tf.reduce_sum(x) / float_size variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size return variance / (tf.square(mean) + epsilon)
[ "def", "cv_squared", "(", "x", ")", ":", "epsilon", "=", "1e-10", "float_size", "=", "tf", ".", "to_float", "(", "tf", ".", "size", "(", "x", ")", ")", "+", "epsilon", "mean", "=", "tf", ".", "reduce_sum", "(", "x", ")", "/", "float_size", "variance", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "squared_difference", "(", "x", ",", "mean", ")", ")", "/", "float_size", "return", "variance", "/", "(", "tf", ".", "square", "(", "mean", ")", "+", "epsilon", ")" ]
The squared coefficient of variation of a sample. Useful as a loss to encourage a positive distribution to be more uniform. Epsilons added for numerical stability. Returns 0 for an empty Tensor. Args: x: a `Tensor`. Returns: a `Scalar`.
[ "The", "squared", "coefficient", "of", "variation", "of", "a", "sample", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L351-L368
22,762
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
update_hparams_for_vq_gating
def update_hparams_for_vq_gating(hparams): """VQ Gating hparams.""" hparams.add_hparam("z_size", 4) hparams.add_hparam("noise_dev", 0.5) # Bottleneck kinds supported: dense, vae, dvq. hparams.add_hparam("bottleneck_kind", "dvq") hparams.add_hparam("num_blocks", 1) hparams.add_hparam("num_residuals", 1) # Reshape method for DVQ: slice, project hparams.add_hparam("beta", 0.25) hparams.add_hparam("epsilon", 1e-5) hparams.add_hparam("decay", 0.999) hparams.add_hparam("ema", False) # default is false until ema is implemented hparams.add_hparam("random_top_k", 1) hparams.add_hparam("soft_em", False) hparams.add_hparam("num_samples", 10) hparams.add_hparam("gating_type", "vq") hparams.add_hparam("use_scales", int(True)) hparams.add_hparam("residual_centroids", int(False))
python
def update_hparams_for_vq_gating(hparams): """VQ Gating hparams.""" hparams.add_hparam("z_size", 4) hparams.add_hparam("noise_dev", 0.5) # Bottleneck kinds supported: dense, vae, dvq. hparams.add_hparam("bottleneck_kind", "dvq") hparams.add_hparam("num_blocks", 1) hparams.add_hparam("num_residuals", 1) # Reshape method for DVQ: slice, project hparams.add_hparam("beta", 0.25) hparams.add_hparam("epsilon", 1e-5) hparams.add_hparam("decay", 0.999) hparams.add_hparam("ema", False) # default is false until ema is implemented hparams.add_hparam("random_top_k", 1) hparams.add_hparam("soft_em", False) hparams.add_hparam("num_samples", 10) hparams.add_hparam("gating_type", "vq") hparams.add_hparam("use_scales", int(True)) hparams.add_hparam("residual_centroids", int(False))
[ "def", "update_hparams_for_vq_gating", "(", "hparams", ")", ":", "hparams", ".", "add_hparam", "(", "\"z_size\"", ",", "4", ")", "hparams", ".", "add_hparam", "(", "\"noise_dev\"", ",", "0.5", ")", "# Bottleneck kinds supported: dense, vae, dvq.", "hparams", ".", "add_hparam", "(", "\"bottleneck_kind\"", ",", "\"dvq\"", ")", "hparams", ".", "add_hparam", "(", "\"num_blocks\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"num_residuals\"", ",", "1", ")", "# Reshape method for DVQ: slice, project", "hparams", ".", "add_hparam", "(", "\"beta\"", ",", "0.25", ")", "hparams", ".", "add_hparam", "(", "\"epsilon\"", ",", "1e-5", ")", "hparams", ".", "add_hparam", "(", "\"decay\"", ",", "0.999", ")", "hparams", ".", "add_hparam", "(", "\"ema\"", ",", "False", ")", "# default is false until ema is implemented", "hparams", ".", "add_hparam", "(", "\"random_top_k\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"soft_em\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"num_samples\"", ",", "10", ")", "hparams", ".", "add_hparam", "(", "\"gating_type\"", ",", "\"vq\"", ")", "hparams", ".", "add_hparam", "(", "\"use_scales\"", ",", "int", "(", "True", ")", ")", "hparams", ".", "add_hparam", "(", "\"residual_centroids\"", ",", "int", "(", "False", ")", ")" ]
VQ Gating hparams.
[ "VQ", "Gating", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L384-L402
22,763
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
_my_top_k
def _my_top_k(x, k): """GPU-compatible version of top-k that works for very small constant k. Calls argmax repeatedly. tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense, seems not to be, so if we use tf.nn.top_k, then both the top_k and its gradient go on cpu. Once this is not an issue, this function becomes obsolete and should be replaced by tf.nn.top_k. Args: x: a 2d Tensor. k: a small integer. Returns: values: a Tensor of shape [batch_size, k] indices: a int32 Tensor of shape [batch_size, k] """ if k > 10: return tf.nn.top_k(x, k) values = [] indices = [] depth = tf.shape(x)[1] for i in range(k): values.append(tf.reduce_max(x, 1)) argmax = tf.argmax(x, 1) indices.append(argmax) if i + 1 < k: x += tf.one_hot(argmax, depth, -1e9) return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
python
def _my_top_k(x, k): """GPU-compatible version of top-k that works for very small constant k. Calls argmax repeatedly. tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense, seems not to be, so if we use tf.nn.top_k, then both the top_k and its gradient go on cpu. Once this is not an issue, this function becomes obsolete and should be replaced by tf.nn.top_k. Args: x: a 2d Tensor. k: a small integer. Returns: values: a Tensor of shape [batch_size, k] indices: a int32 Tensor of shape [batch_size, k] """ if k > 10: return tf.nn.top_k(x, k) values = [] indices = [] depth = tf.shape(x)[1] for i in range(k): values.append(tf.reduce_max(x, 1)) argmax = tf.argmax(x, 1) indices.append(argmax) if i + 1 < k: x += tf.one_hot(argmax, depth, -1e9) return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
[ "def", "_my_top_k", "(", "x", ",", "k", ")", ":", "if", "k", ">", "10", ":", "return", "tf", ".", "nn", ".", "top_k", "(", "x", ",", "k", ")", "values", "=", "[", "]", "indices", "=", "[", "]", "depth", "=", "tf", ".", "shape", "(", "x", ")", "[", "1", "]", "for", "i", "in", "range", "(", "k", ")", ":", "values", ".", "append", "(", "tf", ".", "reduce_max", "(", "x", ",", "1", ")", ")", "argmax", "=", "tf", ".", "argmax", "(", "x", ",", "1", ")", "indices", ".", "append", "(", "argmax", ")", "if", "i", "+", "1", "<", "k", ":", "x", "+=", "tf", ".", "one_hot", "(", "argmax", ",", "depth", ",", "-", "1e9", ")", "return", "tf", ".", "stack", "(", "values", ",", "axis", "=", "1", ")", ",", "tf", ".", "to_int32", "(", "tf", ".", "stack", "(", "indices", ",", "axis", "=", "1", ")", ")" ]
GPU-compatible version of top-k that works for very small constant k. Calls argmax repeatedly. tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense, seems not to be, so if we use tf.nn.top_k, then both the top_k and its gradient go on cpu. Once this is not an issue, this function becomes obsolete and should be replaced by tf.nn.top_k. Args: x: a 2d Tensor. k: a small integer. Returns: values: a Tensor of shape [batch_size, k] indices: a int32 Tensor of shape [batch_size, k]
[ "GPU", "-", "compatible", "version", "of", "top", "-", "k", "that", "works", "for", "very", "small", "constant", "k", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L405-L434
22,764
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
vq_gating
def vq_gating(x, num_experts, k, bneck, hparams=None, name="vq_gating"): """VQ gating. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer k: an integer - number of experts per example bneck: a bottleneck object hparams: optional hparams name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts] """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE): if hparams.use_scales: scales = tf.get_variable( "scales", [num_experts], tf.float32, initializer=tf.ones_initializer()) scales = tf.nn.softmax(scales) hparams.scales = scales input_size = x.get_shape().as_list()[-1] batch_size = common_layers.shape_list(x)[0] if k > 1: # first project into two dense layers, chop and discretize, and gate # TODO(avaswani): Maybe scale the embeddings flowing out of the experts. # We might want to do this to match the computation being done by topk x = tf.layers.dense(x, input_size * k) # x goes from [batch_size, input_size*k] to [batch_size*k, input_size] x = tf.reshape(x, [batch_size * k, input_size]) inputs = tf.expand_dims(x, axis=1) inputs = tf.expand_dims(inputs, axis=1) # VQ hparams hparams.z_size = int(math.log(num_experts, 2)) hparams.hidden_size = input_size hparams.top_k = k d = bneck.discrete_bottleneck(inputs) centroids = None exp_discrete = d["discrete"] embed_lookup = d["embed"] extra_loss = d["loss"] if hparams.residual_centroids: centroids = embed_lookup(exp_discrete) # gives the centroids top_k_indices = tf.squeeze(exp_discrete, axis=1) tf.summary.histogram("discrete_counts", top_k_indices) # if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1] # to [batch_size, k] if k > 1: top_k_indices = tf.reshape(top_k_indices, [batch_size, k]) # get the top k gates top_k_gates = tf.ones([batch_size, k]) # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the # positions corresponding to all but the top k experts per example. gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts) # Compute count per expert from the gates. # gates has shape [batch_size, num_experts] # count per expert has shape [num_experts, 1] count_per_expert = tf.reduce_sum(gates, axis=0) if hparams.use_scales: scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales) extra_loss += scale_loss if common_layers.should_generate_summaries(): tf.summary.histogram("vq_loss", extra_loss) tf.summary.historgram("scale_loss", scale_loss) return gates, extra_loss, centroids
python
def vq_gating(x, num_experts, k, bneck, hparams=None, name="vq_gating"): """VQ gating. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer k: an integer - number of experts per example bneck: a bottleneck object hparams: optional hparams name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts] """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE): if hparams.use_scales: scales = tf.get_variable( "scales", [num_experts], tf.float32, initializer=tf.ones_initializer()) scales = tf.nn.softmax(scales) hparams.scales = scales input_size = x.get_shape().as_list()[-1] batch_size = common_layers.shape_list(x)[0] if k > 1: # first project into two dense layers, chop and discretize, and gate # TODO(avaswani): Maybe scale the embeddings flowing out of the experts. # We might want to do this to match the computation being done by topk x = tf.layers.dense(x, input_size * k) # x goes from [batch_size, input_size*k] to [batch_size*k, input_size] x = tf.reshape(x, [batch_size * k, input_size]) inputs = tf.expand_dims(x, axis=1) inputs = tf.expand_dims(inputs, axis=1) # VQ hparams hparams.z_size = int(math.log(num_experts, 2)) hparams.hidden_size = input_size hparams.top_k = k d = bneck.discrete_bottleneck(inputs) centroids = None exp_discrete = d["discrete"] embed_lookup = d["embed"] extra_loss = d["loss"] if hparams.residual_centroids: centroids = embed_lookup(exp_discrete) # gives the centroids top_k_indices = tf.squeeze(exp_discrete, axis=1) tf.summary.histogram("discrete_counts", top_k_indices) # if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1] # to [batch_size, k] if k > 1: top_k_indices = tf.reshape(top_k_indices, [batch_size, k]) # get the top k gates top_k_gates = tf.ones([batch_size, k]) # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the # positions corresponding to all but the top k experts per example. gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts) # Compute count per expert from the gates. # gates has shape [batch_size, num_experts] # count per expert has shape [num_experts, 1] count_per_expert = tf.reduce_sum(gates, axis=0) if hparams.use_scales: scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales) extra_loss += scale_loss if common_layers.should_generate_summaries(): tf.summary.histogram("vq_loss", extra_loss) tf.summary.historgram("scale_loss", scale_loss) return gates, extra_loss, centroids
[ "def", "vq_gating", "(", "x", ",", "num_experts", ",", "k", ",", "bneck", ",", "hparams", "=", "None", ",", "name", "=", "\"vq_gating\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "if", "hparams", ".", "use_scales", ":", "scales", "=", "tf", ".", "get_variable", "(", "\"scales\"", ",", "[", "num_experts", "]", ",", "tf", ".", "float32", ",", "initializer", "=", "tf", ".", "ones_initializer", "(", ")", ")", "scales", "=", "tf", ".", "nn", ".", "softmax", "(", "scales", ")", "hparams", ".", "scales", "=", "scales", "input_size", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", "batch_size", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "0", "]", "if", "k", ">", "1", ":", "# first project into two dense layers, chop and discretize, and gate", "# TODO(avaswani): Maybe scale the embeddings flowing out of the experts.", "# We might want to do this to match the computation being done by topk", "x", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "input_size", "*", "k", ")", "# x goes from [batch_size, input_size*k] to [batch_size*k, input_size]", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch_size", "*", "k", ",", "input_size", "]", ")", "inputs", "=", "tf", ".", "expand_dims", "(", "x", ",", "axis", "=", "1", ")", "inputs", "=", "tf", ".", "expand_dims", "(", "inputs", ",", "axis", "=", "1", ")", "# VQ hparams", "hparams", ".", "z_size", "=", "int", "(", "math", ".", "log", "(", "num_experts", ",", "2", ")", ")", "hparams", ".", "hidden_size", "=", "input_size", "hparams", ".", "top_k", "=", "k", "d", "=", "bneck", ".", "discrete_bottleneck", "(", "inputs", ")", "centroids", "=", "None", "exp_discrete", "=", "d", "[", "\"discrete\"", "]", "embed_lookup", "=", "d", "[", "\"embed\"", "]", "extra_loss", "=", "d", "[", "\"loss\"", "]", "if", "hparams", ".", "residual_centroids", ":", "centroids", "=", "embed_lookup", "(", "exp_discrete", ")", "# gives the centroids", "top_k_indices", "=", "tf", ".", "squeeze", "(", "exp_discrete", ",", "axis", "=", "1", ")", "tf", ".", "summary", ".", "histogram", "(", "\"discrete_counts\"", ",", "top_k_indices", ")", "# if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1]", "# to [batch_size, k]", "if", "k", ">", "1", ":", "top_k_indices", "=", "tf", ".", "reshape", "(", "top_k_indices", ",", "[", "batch_size", ",", "k", "]", ")", "# get the top k gates", "top_k_gates", "=", "tf", ".", "ones", "(", "[", "batch_size", ",", "k", "]", ")", "# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the", "# positions corresponding to all but the top k experts per example.", "gates", "=", "_rowwise_unsorted_segment_sum", "(", "top_k_gates", ",", "top_k_indices", ",", "num_experts", ")", "# Compute count per expert from the gates.", "# gates has shape [batch_size, num_experts]", "# count per expert has shape [num_experts, 1]", "count_per_expert", "=", "tf", ".", "reduce_sum", "(", "gates", ",", "axis", "=", "0", ")", "if", "hparams", ".", "use_scales", ":", "scale_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "to_float", "(", "count_per_expert", ")", "*", "scales", ")", "extra_loss", "+=", "scale_loss", "if", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "histogram", "(", "\"vq_loss\"", ",", "extra_loss", ")", "tf", ".", "summary", ".", "historgram", "(", "\"scale_loss\"", ",", "scale_loss", ")", "return", "gates", ",", "extra_loss", ",", "centroids" ]
VQ gating. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer k: an integer - number of experts per example bneck: a bottleneck object hparams: optional hparams name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts]
[ "VQ", "gating", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L437-L511
22,765
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
noisy_top_k_gating
def noisy_top_k_gating(x, num_experts, train, k=2, initializer=tf.zeros_initializer(), noisy_gating=True, noise_epsilon=1e-2, name=None): """Noisy top-k gating. See paper: https://arxiv.org/abs/1701.06538. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer train: a boolean - we only add noise at training time. k: an integer - number of experts per example initializer: an initializer noisy_gating: a boolean noise_epsilon: a float name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts] """ with tf.variable_scope(name, default_name="noisy_top_k_gating"): input_size = x.get_shape().as_list()[-1] w_gate = tf.get_variable( "w_gate", [input_size, num_experts], tf.float32, initializer) if noisy_gating: w_noise = tf.get_variable("w_noise", [input_size, num_experts], tf.float32, initializer) clean_logits = tf.matmul(x, w_gate) if noisy_gating: raw_noise_stddev = tf.matmul(x, w_noise) noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + noise_epsilon) * (tf.to_float(train))) noisy_logits = clean_logits + ( tf.random_normal(tf.shape(clean_logits)) * noise_stddev) logits = noisy_logits if common_layers.should_generate_summaries(): tf.summary.histogram("noisy_logits", noisy_logits) tf.summary.histogram("noise_stddev", noise_stddev) else: logits = clean_logits top_logits, top_indices = _my_top_k(logits, min(k + 1, num_experts)) # top k logits has shape [batch, k] top_k_logits = tf.slice(top_logits, [0, 0], [-1, k]) top_k_indices = tf.slice(top_indices, [0, 0], [-1, k]) top_k_gates = tf.nn.softmax(top_k_logits) # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the # positions corresponding to all but the top k experts per example. gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts) if noisy_gating and k < num_experts: load = tf.reduce_sum( _prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits, k), 0) else: load = _gates_to_load(gates) if common_layers.should_generate_summaries(): tf.summary.histogram("importance", tf.reduce_sum(gates, 0)) tf.summary.histogram("load", load) return gates, load
python
def noisy_top_k_gating(x, num_experts, train, k=2, initializer=tf.zeros_initializer(), noisy_gating=True, noise_epsilon=1e-2, name=None): """Noisy top-k gating. See paper: https://arxiv.org/abs/1701.06538. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer train: a boolean - we only add noise at training time. k: an integer - number of experts per example initializer: an initializer noisy_gating: a boolean noise_epsilon: a float name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts] """ with tf.variable_scope(name, default_name="noisy_top_k_gating"): input_size = x.get_shape().as_list()[-1] w_gate = tf.get_variable( "w_gate", [input_size, num_experts], tf.float32, initializer) if noisy_gating: w_noise = tf.get_variable("w_noise", [input_size, num_experts], tf.float32, initializer) clean_logits = tf.matmul(x, w_gate) if noisy_gating: raw_noise_stddev = tf.matmul(x, w_noise) noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + noise_epsilon) * (tf.to_float(train))) noisy_logits = clean_logits + ( tf.random_normal(tf.shape(clean_logits)) * noise_stddev) logits = noisy_logits if common_layers.should_generate_summaries(): tf.summary.histogram("noisy_logits", noisy_logits) tf.summary.histogram("noise_stddev", noise_stddev) else: logits = clean_logits top_logits, top_indices = _my_top_k(logits, min(k + 1, num_experts)) # top k logits has shape [batch, k] top_k_logits = tf.slice(top_logits, [0, 0], [-1, k]) top_k_indices = tf.slice(top_indices, [0, 0], [-1, k]) top_k_gates = tf.nn.softmax(top_k_logits) # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the # positions corresponding to all but the top k experts per example. gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts) if noisy_gating and k < num_experts: load = tf.reduce_sum( _prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits, k), 0) else: load = _gates_to_load(gates) if common_layers.should_generate_summaries(): tf.summary.histogram("importance", tf.reduce_sum(gates, 0)) tf.summary.histogram("load", load) return gates, load
[ "def", "noisy_top_k_gating", "(", "x", ",", "num_experts", ",", "train", ",", "k", "=", "2", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "noisy_gating", "=", "True", ",", "noise_epsilon", "=", "1e-2", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"noisy_top_k_gating\"", ")", ":", "input_size", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", "w_gate", "=", "tf", ".", "get_variable", "(", "\"w_gate\"", ",", "[", "input_size", ",", "num_experts", "]", ",", "tf", ".", "float32", ",", "initializer", ")", "if", "noisy_gating", ":", "w_noise", "=", "tf", ".", "get_variable", "(", "\"w_noise\"", ",", "[", "input_size", ",", "num_experts", "]", ",", "tf", ".", "float32", ",", "initializer", ")", "clean_logits", "=", "tf", ".", "matmul", "(", "x", ",", "w_gate", ")", "if", "noisy_gating", ":", "raw_noise_stddev", "=", "tf", ".", "matmul", "(", "x", ",", "w_noise", ")", "noise_stddev", "=", "(", "(", "tf", ".", "nn", ".", "softplus", "(", "raw_noise_stddev", ")", "+", "noise_epsilon", ")", "*", "(", "tf", ".", "to_float", "(", "train", ")", ")", ")", "noisy_logits", "=", "clean_logits", "+", "(", "tf", ".", "random_normal", "(", "tf", ".", "shape", "(", "clean_logits", ")", ")", "*", "noise_stddev", ")", "logits", "=", "noisy_logits", "if", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "histogram", "(", "\"noisy_logits\"", ",", "noisy_logits", ")", "tf", ".", "summary", ".", "histogram", "(", "\"noise_stddev\"", ",", "noise_stddev", ")", "else", ":", "logits", "=", "clean_logits", "top_logits", ",", "top_indices", "=", "_my_top_k", "(", "logits", ",", "min", "(", "k", "+", "1", ",", "num_experts", ")", ")", "# top k logits has shape [batch, k]", "top_k_logits", "=", "tf", ".", "slice", "(", "top_logits", ",", "[", "0", ",", "0", "]", ",", "[", "-", "1", ",", "k", "]", ")", "top_k_indices", "=", "tf", ".", "slice", "(", "top_indices", ",", "[", "0", ",", "0", "]", ",", "[", "-", "1", ",", "k", "]", ")", "top_k_gates", "=", "tf", ".", "nn", ".", "softmax", "(", "top_k_logits", ")", "# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the", "# positions corresponding to all but the top k experts per example.", "gates", "=", "_rowwise_unsorted_segment_sum", "(", "top_k_gates", ",", "top_k_indices", ",", "num_experts", ")", "if", "noisy_gating", "and", "k", "<", "num_experts", ":", "load", "=", "tf", ".", "reduce_sum", "(", "_prob_in_top_k", "(", "clean_logits", ",", "noisy_logits", ",", "noise_stddev", ",", "top_logits", ",", "k", ")", ",", "0", ")", "else", ":", "load", "=", "_gates_to_load", "(", "gates", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "histogram", "(", "\"importance\"", ",", "tf", ".", "reduce_sum", "(", "gates", ",", "0", ")", ")", "tf", ".", "summary", ".", "histogram", "(", "\"load\"", ",", "load", ")", "return", "gates", ",", "load" ]
Noisy top-k gating. See paper: https://arxiv.org/abs/1701.06538. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer train: a boolean - we only add noise at training time. k: an integer - number of experts per example initializer: an initializer noisy_gating: a boolean noise_epsilon: a float name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts]
[ "Noisy", "top", "-", "k", "gating", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L514-L579
22,766
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
map_ids
def map_ids(x, indices, map_fn): """Apply a function to each coordinate ids of a multidimensional tensor. This allows to process each sequence of a batch independently. This is similar to tf.map_fn but with tensor where the batch dim has been flatten. Warning: The indices ids have to be contiguous and ordered in memory as the output vector for each of the ids are simply concatenated after being processed. Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed rows in the following order: [0,0,1,2,2,2] Args: x (Tensor): The tensor to be dispatched of shape [length,...] indices (Tensor): A int32 tensor of size [length, 1] containing the batch coordinate of x map_fn (fct): Function called for every ids of the original tensor. Take as input a tensor of same rank than x and from shape [length_id,...] with length_id <= length. Isn't called if length_id == 0 Returns: a tensor of same shape as x, where each elements has been processed """ indices = tf.reshape(indices, [-1]) t_i = tf.constant(0) # batch_coordinates start at 0 t_batch_size = tf.reduce_max(indices) + 1 # ta_stack_out will store the intermediate results for each individual id # As alternative to tf.TensorArray, scatter_update could potentially be used # but that would require an additional mutable tensor. ta_stack_out = tf.TensorArray( x.dtype, size=t_batch_size, ) # Then we iterate over each sequence individually and compute the # transformation for each id while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size) def body(t_i, ta_stack_out): """Loop body.""" # Gather the ids current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i))) t_row = tf.gather_nd(x, indices=current_ids) # TODO(epot): Should not call map_fn if t_row size is 0 # Apply transformation to each id # Restore batch_dim=1 as most function expect [batch_dim, length, ...] as # input t_row = tf.expand_dims(t_row, axis=0) t_row = map_fn(t_row) t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation ta_stack_out = ta_stack_out.write(t_i, t_row) return [tf.add(t_i, 1), ta_stack_out] # ++i # Run the loop, equivalent to: # stack_out = [] # while i < batch_size: # stack_out.expand(map_fn(x[indices==i])) _, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out]) # Merge all results return ta_stack_out.concat()
python
def map_ids(x, indices, map_fn): """Apply a function to each coordinate ids of a multidimensional tensor. This allows to process each sequence of a batch independently. This is similar to tf.map_fn but with tensor where the batch dim has been flatten. Warning: The indices ids have to be contiguous and ordered in memory as the output vector for each of the ids are simply concatenated after being processed. Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed rows in the following order: [0,0,1,2,2,2] Args: x (Tensor): The tensor to be dispatched of shape [length,...] indices (Tensor): A int32 tensor of size [length, 1] containing the batch coordinate of x map_fn (fct): Function called for every ids of the original tensor. Take as input a tensor of same rank than x and from shape [length_id,...] with length_id <= length. Isn't called if length_id == 0 Returns: a tensor of same shape as x, where each elements has been processed """ indices = tf.reshape(indices, [-1]) t_i = tf.constant(0) # batch_coordinates start at 0 t_batch_size = tf.reduce_max(indices) + 1 # ta_stack_out will store the intermediate results for each individual id # As alternative to tf.TensorArray, scatter_update could potentially be used # but that would require an additional mutable tensor. ta_stack_out = tf.TensorArray( x.dtype, size=t_batch_size, ) # Then we iterate over each sequence individually and compute the # transformation for each id while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size) def body(t_i, ta_stack_out): """Loop body.""" # Gather the ids current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i))) t_row = tf.gather_nd(x, indices=current_ids) # TODO(epot): Should not call map_fn if t_row size is 0 # Apply transformation to each id # Restore batch_dim=1 as most function expect [batch_dim, length, ...] as # input t_row = tf.expand_dims(t_row, axis=0) t_row = map_fn(t_row) t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation ta_stack_out = ta_stack_out.write(t_i, t_row) return [tf.add(t_i, 1), ta_stack_out] # ++i # Run the loop, equivalent to: # stack_out = [] # while i < batch_size: # stack_out.expand(map_fn(x[indices==i])) _, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out]) # Merge all results return ta_stack_out.concat()
[ "def", "map_ids", "(", "x", ",", "indices", ",", "map_fn", ")", ":", "indices", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "-", "1", "]", ")", "t_i", "=", "tf", ".", "constant", "(", "0", ")", "# batch_coordinates start at 0", "t_batch_size", "=", "tf", ".", "reduce_max", "(", "indices", ")", "+", "1", "# ta_stack_out will store the intermediate results for each individual id", "# As alternative to tf.TensorArray, scatter_update could potentially be used", "# but that would require an additional mutable tensor.", "ta_stack_out", "=", "tf", ".", "TensorArray", "(", "x", ".", "dtype", ",", "size", "=", "t_batch_size", ",", ")", "# Then we iterate over each sequence individually and compute the", "# transformation for each id", "while_condition", "=", "lambda", "t_i", ",", "*", "args", ":", "tf", ".", "less", "(", "t_i", ",", "t_batch_size", ")", "def", "body", "(", "t_i", ",", "ta_stack_out", ")", ":", "\"\"\"Loop body.\"\"\"", "# Gather the ids", "current_ids", "=", "tf", ".", "to_int32", "(", "tf", ".", "where", "(", "tf", ".", "equal", "(", "indices", ",", "t_i", ")", ")", ")", "t_row", "=", "tf", ".", "gather_nd", "(", "x", ",", "indices", "=", "current_ids", ")", "# TODO(epot): Should not call map_fn if t_row size is 0", "# Apply transformation to each id", "# Restore batch_dim=1 as most function expect [batch_dim, length, ...] as", "# input", "t_row", "=", "tf", ".", "expand_dims", "(", "t_row", ",", "axis", "=", "0", ")", "t_row", "=", "map_fn", "(", "t_row", ")", "t_row", "=", "tf", ".", "squeeze", "(", "t_row", ",", "axis", "=", "0", ")", "# Squeeze for concatenation", "ta_stack_out", "=", "ta_stack_out", ".", "write", "(", "t_i", ",", "t_row", ")", "return", "[", "tf", ".", "add", "(", "t_i", ",", "1", ")", ",", "ta_stack_out", "]", "# ++i", "# Run the loop, equivalent to:", "# stack_out = []", "# while i < batch_size:", "# stack_out.expand(map_fn(x[indices==i]))", "_", ",", "ta_stack_out", "=", "tf", ".", "while_loop", "(", "while_condition", ",", "body", ",", "[", "t_i", ",", "ta_stack_out", "]", ")", "# Merge all results", "return", "ta_stack_out", ".", "concat", "(", ")" ]
Apply a function to each coordinate ids of a multidimensional tensor. This allows to process each sequence of a batch independently. This is similar to tf.map_fn but with tensor where the batch dim has been flatten. Warning: The indices ids have to be contiguous and ordered in memory as the output vector for each of the ids are simply concatenated after being processed. Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed rows in the following order: [0,0,1,2,2,2] Args: x (Tensor): The tensor to be dispatched of shape [length,...] indices (Tensor): A int32 tensor of size [length, 1] containing the batch coordinate of x map_fn (fct): Function called for every ids of the original tensor. Take as input a tensor of same rank than x and from shape [length_id,...] with length_id <= length. Isn't called if length_id == 0 Returns: a tensor of same shape as x, where each elements has been processed
[ "Apply", "a", "function", "to", "each", "coordinate", "ids", "of", "a", "multidimensional", "tensor", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L665-L730
22,767
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
ffn_expert_fn
def ffn_expert_fn(input_size, hidden_sizes, output_size, hidden_activation=tf.nn.relu): """Returns a function that creates a feed-forward network. Use this function to create the expert_fn argument to distributed_moe. Args: input_size: an integer hidden_sizes: a list of integers output_size: an integer hidden_activation: a unary function. Returns: a unary function """ def my_fn(x): layer_sizes = [input_size] + hidden_sizes + [output_size] for i in range(1 + len(hidden_sizes)): w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2], tf.float32) x = tf.matmul(x, w) if i < len(hidden_sizes): x = hidden_activation(x) if layer_sizes[i] != input_size: x *= (layer_sizes[i] / float(input_size))**-0.5 return x return my_fn
python
def ffn_expert_fn(input_size, hidden_sizes, output_size, hidden_activation=tf.nn.relu): """Returns a function that creates a feed-forward network. Use this function to create the expert_fn argument to distributed_moe. Args: input_size: an integer hidden_sizes: a list of integers output_size: an integer hidden_activation: a unary function. Returns: a unary function """ def my_fn(x): layer_sizes = [input_size] + hidden_sizes + [output_size] for i in range(1 + len(hidden_sizes)): w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2], tf.float32) x = tf.matmul(x, w) if i < len(hidden_sizes): x = hidden_activation(x) if layer_sizes[i] != input_size: x *= (layer_sizes[i] / float(input_size))**-0.5 return x return my_fn
[ "def", "ffn_expert_fn", "(", "input_size", ",", "hidden_sizes", ",", "output_size", ",", "hidden_activation", "=", "tf", ".", "nn", ".", "relu", ")", ":", "def", "my_fn", "(", "x", ")", ":", "layer_sizes", "=", "[", "input_size", "]", "+", "hidden_sizes", "+", "[", "output_size", "]", "for", "i", "in", "range", "(", "1", "+", "len", "(", "hidden_sizes", ")", ")", ":", "w", "=", "tf", ".", "get_variable", "(", "\"w_%d\"", "%", "i", ",", "layer_sizes", "[", "i", ":", "i", "+", "2", "]", ",", "tf", ".", "float32", ")", "x", "=", "tf", ".", "matmul", "(", "x", ",", "w", ")", "if", "i", "<", "len", "(", "hidden_sizes", ")", ":", "x", "=", "hidden_activation", "(", "x", ")", "if", "layer_sizes", "[", "i", "]", "!=", "input_size", ":", "x", "*=", "(", "layer_sizes", "[", "i", "]", "/", "float", "(", "input_size", ")", ")", "**", "-", "0.5", "return", "x", "return", "my_fn" ]
Returns a function that creates a feed-forward network. Use this function to create the expert_fn argument to distributed_moe. Args: input_size: an integer hidden_sizes: a list of integers output_size: an integer hidden_activation: a unary function. Returns: a unary function
[ "Returns", "a", "function", "that", "creates", "a", "feed", "-", "forward", "network", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L956-L983
22,768
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
flatten_all_but_last
def flatten_all_but_last(a): """Flatten all dimensions of a except the last.""" ret = tf.reshape(a, [-1, tf.shape(a)[-1]]) if not tf.executing_eagerly(): ret.set_shape([None] + a.get_shape().as_list()[-1:]) return ret
python
def flatten_all_but_last(a): """Flatten all dimensions of a except the last.""" ret = tf.reshape(a, [-1, tf.shape(a)[-1]]) if not tf.executing_eagerly(): ret.set_shape([None] + a.get_shape().as_list()[-1:]) return ret
[ "def", "flatten_all_but_last", "(", "a", ")", ":", "ret", "=", "tf", ".", "reshape", "(", "a", ",", "[", "-", "1", ",", "tf", ".", "shape", "(", "a", ")", "[", "-", "1", "]", "]", ")", "if", "not", "tf", ".", "executing_eagerly", "(", ")", ":", "ret", ".", "set_shape", "(", "[", "None", "]", "+", "a", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", ":", "]", ")", "return", "ret" ]
Flatten all dimensions of a except the last.
[ "Flatten", "all", "dimensions", "of", "a", "except", "the", "last", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L986-L991
22,769
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
local_moe
def local_moe(x, train, expert_fn, num_experts, k=1, loss_coef=1e-2, hparams=None, pass_x=True, pass_gates=False, additional_dispatch_params=None, name=None): """Call a local mixture of experts. Args: x: a tensors with shape [... , input_size] train: a boolean scalar. expert_fn: a function. num_experts: an integer - number of experts k: an integer - how many experts to use for each batch element loss_coef: a scalar - multiplier on load-balancing losses hparams: optional hparams for vq gating pass_x: a boolean. If true, x will also be dispatched to the experts. pass_gates: a boolean. If true, gates will be passed to experts. Might be necessary when dealing with sparse encoder-encoder decoder attention additional_dispatch_params: The extra tensors that need to be sent to each expert. Examples include batch batch coordinates (see common_attention.local_expert_attention) name: a string Returns: y: a tensor. Has the same shape as x, except for the last dimension, which is output_size. extra_training_loss: a scalar. This should be added into the overall training loss of the model. The backpropagation of this loss encourages all experts to be approximately equally used across a batch. """ bneck = DiscreteBottleneck(hparams) with tf.variable_scope(name, default_name="local_moe"): centroids = None x_flat = flatten_all_but_last(x) if hparams.gating_type == "topk": tf.logging.info("Using noisy top_k with k = {}".format(k)) # The gates indicate which batch elements go to which tensors. # load is a measure of approximately how many examples go to each expert gates, load = noisy_top_k_gating( x_flat, num_experts, train, k, initializer=tf.zeros_initializer(), noisy_gating=True, noise_epsilon=1e-2) importance = tf.reduce_sum(gates, 0) loss = loss_coef * (cv_squared(importance) + cv_squared(load)) else: assert hparams.gating_type == "vq" tf.logging.info("Using VQ gating") gates, loss, centroids = vq_gating( x_flat, num_experts, k, bneck, hparams=hparams) loss *= loss_coef # Shuffle data between datashards and experts. dispatcher = SparseDispatcher(num_experts, gates) # Set up expert_fn arguments expert_kwargs = {} if pass_x: expert_kwargs["x"] = dispatcher.dispatch(x_flat) if pass_gates: expert_kwargs["gates"] = dispatcher.expert_to_gates() for key, val in six.iteritems(additional_dispatch_params or {}): val = flatten_all_but_last(val) expert_kwargs[key] = dispatcher.dispatch(val) ep = Parallelism([DEFAULT_DEV_STRING] * num_experts, reuse=None) expert_outputs = ep(expert_fn, **expert_kwargs) y_flat = dispatcher.combine(expert_outputs) if centroids is not None: centroids = tf.squeeze(centroids, axis=[1, 2]) y_flat += centroids y = common_layers.reshape_like(y_flat, x) return y, loss
python
def local_moe(x, train, expert_fn, num_experts, k=1, loss_coef=1e-2, hparams=None, pass_x=True, pass_gates=False, additional_dispatch_params=None, name=None): """Call a local mixture of experts. Args: x: a tensors with shape [... , input_size] train: a boolean scalar. expert_fn: a function. num_experts: an integer - number of experts k: an integer - how many experts to use for each batch element loss_coef: a scalar - multiplier on load-balancing losses hparams: optional hparams for vq gating pass_x: a boolean. If true, x will also be dispatched to the experts. pass_gates: a boolean. If true, gates will be passed to experts. Might be necessary when dealing with sparse encoder-encoder decoder attention additional_dispatch_params: The extra tensors that need to be sent to each expert. Examples include batch batch coordinates (see common_attention.local_expert_attention) name: a string Returns: y: a tensor. Has the same shape as x, except for the last dimension, which is output_size. extra_training_loss: a scalar. This should be added into the overall training loss of the model. The backpropagation of this loss encourages all experts to be approximately equally used across a batch. """ bneck = DiscreteBottleneck(hparams) with tf.variable_scope(name, default_name="local_moe"): centroids = None x_flat = flatten_all_but_last(x) if hparams.gating_type == "topk": tf.logging.info("Using noisy top_k with k = {}".format(k)) # The gates indicate which batch elements go to which tensors. # load is a measure of approximately how many examples go to each expert gates, load = noisy_top_k_gating( x_flat, num_experts, train, k, initializer=tf.zeros_initializer(), noisy_gating=True, noise_epsilon=1e-2) importance = tf.reduce_sum(gates, 0) loss = loss_coef * (cv_squared(importance) + cv_squared(load)) else: assert hparams.gating_type == "vq" tf.logging.info("Using VQ gating") gates, loss, centroids = vq_gating( x_flat, num_experts, k, bneck, hparams=hparams) loss *= loss_coef # Shuffle data between datashards and experts. dispatcher = SparseDispatcher(num_experts, gates) # Set up expert_fn arguments expert_kwargs = {} if pass_x: expert_kwargs["x"] = dispatcher.dispatch(x_flat) if pass_gates: expert_kwargs["gates"] = dispatcher.expert_to_gates() for key, val in six.iteritems(additional_dispatch_params or {}): val = flatten_all_but_last(val) expert_kwargs[key] = dispatcher.dispatch(val) ep = Parallelism([DEFAULT_DEV_STRING] * num_experts, reuse=None) expert_outputs = ep(expert_fn, **expert_kwargs) y_flat = dispatcher.combine(expert_outputs) if centroids is not None: centroids = tf.squeeze(centroids, axis=[1, 2]) y_flat += centroids y = common_layers.reshape_like(y_flat, x) return y, loss
[ "def", "local_moe", "(", "x", ",", "train", ",", "expert_fn", ",", "num_experts", ",", "k", "=", "1", ",", "loss_coef", "=", "1e-2", ",", "hparams", "=", "None", ",", "pass_x", "=", "True", ",", "pass_gates", "=", "False", ",", "additional_dispatch_params", "=", "None", ",", "name", "=", "None", ")", ":", "bneck", "=", "DiscreteBottleneck", "(", "hparams", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"local_moe\"", ")", ":", "centroids", "=", "None", "x_flat", "=", "flatten_all_but_last", "(", "x", ")", "if", "hparams", ".", "gating_type", "==", "\"topk\"", ":", "tf", ".", "logging", ".", "info", "(", "\"Using noisy top_k with k = {}\"", ".", "format", "(", "k", ")", ")", "# The gates indicate which batch elements go to which tensors.", "# load is a measure of approximately how many examples go to each expert", "gates", ",", "load", "=", "noisy_top_k_gating", "(", "x_flat", ",", "num_experts", ",", "train", ",", "k", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "noisy_gating", "=", "True", ",", "noise_epsilon", "=", "1e-2", ")", "importance", "=", "tf", ".", "reduce_sum", "(", "gates", ",", "0", ")", "loss", "=", "loss_coef", "*", "(", "cv_squared", "(", "importance", ")", "+", "cv_squared", "(", "load", ")", ")", "else", ":", "assert", "hparams", ".", "gating_type", "==", "\"vq\"", "tf", ".", "logging", ".", "info", "(", "\"Using VQ gating\"", ")", "gates", ",", "loss", ",", "centroids", "=", "vq_gating", "(", "x_flat", ",", "num_experts", ",", "k", ",", "bneck", ",", "hparams", "=", "hparams", ")", "loss", "*=", "loss_coef", "# Shuffle data between datashards and experts.", "dispatcher", "=", "SparseDispatcher", "(", "num_experts", ",", "gates", ")", "# Set up expert_fn arguments", "expert_kwargs", "=", "{", "}", "if", "pass_x", ":", "expert_kwargs", "[", "\"x\"", "]", "=", "dispatcher", ".", "dispatch", "(", "x_flat", ")", "if", "pass_gates", ":", "expert_kwargs", "[", "\"gates\"", "]", "=", "dispatcher", ".", "expert_to_gates", "(", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "additional_dispatch_params", "or", "{", "}", ")", ":", "val", "=", "flatten_all_but_last", "(", "val", ")", "expert_kwargs", "[", "key", "]", "=", "dispatcher", ".", "dispatch", "(", "val", ")", "ep", "=", "Parallelism", "(", "[", "DEFAULT_DEV_STRING", "]", "*", "num_experts", ",", "reuse", "=", "None", ")", "expert_outputs", "=", "ep", "(", "expert_fn", ",", "*", "*", "expert_kwargs", ")", "y_flat", "=", "dispatcher", ".", "combine", "(", "expert_outputs", ")", "if", "centroids", "is", "not", "None", ":", "centroids", "=", "tf", ".", "squeeze", "(", "centroids", ",", "axis", "=", "[", "1", ",", "2", "]", ")", "y_flat", "+=", "centroids", "y", "=", "common_layers", ".", "reshape_like", "(", "y_flat", ",", "x", ")", "return", "y", ",", "loss" ]
Call a local mixture of experts. Args: x: a tensors with shape [... , input_size] train: a boolean scalar. expert_fn: a function. num_experts: an integer - number of experts k: an integer - how many experts to use for each batch element loss_coef: a scalar - multiplier on load-balancing losses hparams: optional hparams for vq gating pass_x: a boolean. If true, x will also be dispatched to the experts. pass_gates: a boolean. If true, gates will be passed to experts. Might be necessary when dealing with sparse encoder-encoder decoder attention additional_dispatch_params: The extra tensors that need to be sent to each expert. Examples include batch batch coordinates (see common_attention.local_expert_attention) name: a string Returns: y: a tensor. Has the same shape as x, except for the last dimension, which is output_size. extra_training_loss: a scalar. This should be added into the overall training loss of the model. The backpropagation of this loss encourages all experts to be approximately equally used across a batch.
[ "Call", "a", "local", "mixture", "of", "experts", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L994-L1074
22,770
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
reduce_by_device
def reduce_by_device(parallelism, data, reduce_fn): """Reduces data per device. This can be useful, for example, if we want to all-reduce n tensors on k<n devices (like during eval when we have only one device). We call reduce_by_device() to first sum the tensors per device, then call our usual all-reduce operation to create one sum per device, followed by expand_by_device, to create the appropriate number of pointers to these results. See all_reduce_ring() below for an example of how this is used. Args: parallelism: a expert_utils.Parallelism object data: a list of Tensors with length parallelism.n reduce_fn: a function taking a list of Tensors. e.g. tf.add_n Returns: device_parallelism: a Parallelism object with each device listed only once. reduced_data: A list of Tensors, one per device. """ unique_devices = [] device_to_data = {} for dev, datum in zip(parallelism.devices, data): if dev not in device_to_data: unique_devices.append(dev) device_to_data[dev] = [datum] else: device_to_data[dev].append(datum) device_parallelism = Parallelism(unique_devices) grouped_data = [device_to_data[dev] for dev in unique_devices] return device_parallelism, device_parallelism(reduce_fn, grouped_data)
python
def reduce_by_device(parallelism, data, reduce_fn): """Reduces data per device. This can be useful, for example, if we want to all-reduce n tensors on k<n devices (like during eval when we have only one device). We call reduce_by_device() to first sum the tensors per device, then call our usual all-reduce operation to create one sum per device, followed by expand_by_device, to create the appropriate number of pointers to these results. See all_reduce_ring() below for an example of how this is used. Args: parallelism: a expert_utils.Parallelism object data: a list of Tensors with length parallelism.n reduce_fn: a function taking a list of Tensors. e.g. tf.add_n Returns: device_parallelism: a Parallelism object with each device listed only once. reduced_data: A list of Tensors, one per device. """ unique_devices = [] device_to_data = {} for dev, datum in zip(parallelism.devices, data): if dev not in device_to_data: unique_devices.append(dev) device_to_data[dev] = [datum] else: device_to_data[dev].append(datum) device_parallelism = Parallelism(unique_devices) grouped_data = [device_to_data[dev] for dev in unique_devices] return device_parallelism, device_parallelism(reduce_fn, grouped_data)
[ "def", "reduce_by_device", "(", "parallelism", ",", "data", ",", "reduce_fn", ")", ":", "unique_devices", "=", "[", "]", "device_to_data", "=", "{", "}", "for", "dev", ",", "datum", "in", "zip", "(", "parallelism", ".", "devices", ",", "data", ")", ":", "if", "dev", "not", "in", "device_to_data", ":", "unique_devices", ".", "append", "(", "dev", ")", "device_to_data", "[", "dev", "]", "=", "[", "datum", "]", "else", ":", "device_to_data", "[", "dev", "]", ".", "append", "(", "datum", ")", "device_parallelism", "=", "Parallelism", "(", "unique_devices", ")", "grouped_data", "=", "[", "device_to_data", "[", "dev", "]", "for", "dev", "in", "unique_devices", "]", "return", "device_parallelism", ",", "device_parallelism", "(", "reduce_fn", ",", "grouped_data", ")" ]
Reduces data per device. This can be useful, for example, if we want to all-reduce n tensors on k<n devices (like during eval when we have only one device). We call reduce_by_device() to first sum the tensors per device, then call our usual all-reduce operation to create one sum per device, followed by expand_by_device, to create the appropriate number of pointers to these results. See all_reduce_ring() below for an example of how this is used. Args: parallelism: a expert_utils.Parallelism object data: a list of Tensors with length parallelism.n reduce_fn: a function taking a list of Tensors. e.g. tf.add_n Returns: device_parallelism: a Parallelism object with each device listed only once. reduced_data: A list of Tensors, one per device.
[ "Reduces", "data", "per", "device", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1414-L1443
22,771
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
all_reduce_ring
def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True): """Compute the sum of all Tensors and put the result everywhere. Assumes that the devices are connected in a ring. Args: x: a list of Tensors with length parallelism.n parallelism: a expert_utils.Parallelism object. maybe_reduce: a boolean - first reduce per device. use_bfloat16: a boolean - saves bandwidth but loses precision Returns: a list of Tensors with length parallelism.n """ if parallelism.n == 1: return x if maybe_reduce: original_parallelism = parallelism parallelism, x = reduce_by_device(parallelism, x, tf.add_n) if parallelism.n == 1: y = x else: # first shard the input: x_flat = parallelism(tf.reshape, x, [[-1]] * parallelism.n) # [device, shard] x_split = parallelism( common_layers.approximate_split, x_flat, parallelism.n, 0) def _step(source_replica, target_replica, x_split, op="plus_eq"): """Helper function - one step of summing or copying. If op == "plus_eq", then adds source_replica into target_replica If op == "copy", then copies source_replica onto target_replica These operations happen for all shards. The replica numbers are offset by the shard numbers to keep all physical links busy. Args: source_replica: an integer target_replica: an integer x_split: a list of lists of tensors op: a string """ for shard in range(parallelism.n): source_device = (shard + source_replica) % parallelism.n target_device = (shard + target_replica) % parallelism.n source = x_split[source_device][shard] if use_bfloat16: with tf.device(parallelism.devices[source_device]): source = tf.to_bfloat16(source) with tf.device(parallelism.devices[target_device]): source = tf.to_float(source) if op == "plus_eq": x_split[target_device][shard] += source else: assert op == "copy" x_split[target_device][shard] = tf.identity(source) center = parallelism.n // 2 # accumulate everything towards the center. for i in reversed(range(center, parallelism.n - 1)): _step(i + 1, i, x_split, op="plus_eq") for i in range(center): _step(i, i + 1, x_split, op="plus_eq") # copy everything away from the center. for i in range(center, parallelism.n - 1): _step(i, i + 1, x_split, op="copy") for i in reversed(range(center)): _step(i + 1, i, x_split, op="copy") x_concat = parallelism(tf.concat, x_split, 0) y = parallelism(common_layers.reshape_like_all_dims, x_concat, x) if maybe_reduce: y = expand_by_device(original_parallelism, parallelism, y) return y
python
def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True): """Compute the sum of all Tensors and put the result everywhere. Assumes that the devices are connected in a ring. Args: x: a list of Tensors with length parallelism.n parallelism: a expert_utils.Parallelism object. maybe_reduce: a boolean - first reduce per device. use_bfloat16: a boolean - saves bandwidth but loses precision Returns: a list of Tensors with length parallelism.n """ if parallelism.n == 1: return x if maybe_reduce: original_parallelism = parallelism parallelism, x = reduce_by_device(parallelism, x, tf.add_n) if parallelism.n == 1: y = x else: # first shard the input: x_flat = parallelism(tf.reshape, x, [[-1]] * parallelism.n) # [device, shard] x_split = parallelism( common_layers.approximate_split, x_flat, parallelism.n, 0) def _step(source_replica, target_replica, x_split, op="plus_eq"): """Helper function - one step of summing or copying. If op == "plus_eq", then adds source_replica into target_replica If op == "copy", then copies source_replica onto target_replica These operations happen for all shards. The replica numbers are offset by the shard numbers to keep all physical links busy. Args: source_replica: an integer target_replica: an integer x_split: a list of lists of tensors op: a string """ for shard in range(parallelism.n): source_device = (shard + source_replica) % parallelism.n target_device = (shard + target_replica) % parallelism.n source = x_split[source_device][shard] if use_bfloat16: with tf.device(parallelism.devices[source_device]): source = tf.to_bfloat16(source) with tf.device(parallelism.devices[target_device]): source = tf.to_float(source) if op == "plus_eq": x_split[target_device][shard] += source else: assert op == "copy" x_split[target_device][shard] = tf.identity(source) center = parallelism.n // 2 # accumulate everything towards the center. for i in reversed(range(center, parallelism.n - 1)): _step(i + 1, i, x_split, op="plus_eq") for i in range(center): _step(i, i + 1, x_split, op="plus_eq") # copy everything away from the center. for i in range(center, parallelism.n - 1): _step(i, i + 1, x_split, op="copy") for i in reversed(range(center)): _step(i + 1, i, x_split, op="copy") x_concat = parallelism(tf.concat, x_split, 0) y = parallelism(common_layers.reshape_like_all_dims, x_concat, x) if maybe_reduce: y = expand_by_device(original_parallelism, parallelism, y) return y
[ "def", "all_reduce_ring", "(", "x", ",", "parallelism", ",", "maybe_reduce", "=", "True", ",", "use_bfloat16", "=", "True", ")", ":", "if", "parallelism", ".", "n", "==", "1", ":", "return", "x", "if", "maybe_reduce", ":", "original_parallelism", "=", "parallelism", "parallelism", ",", "x", "=", "reduce_by_device", "(", "parallelism", ",", "x", ",", "tf", ".", "add_n", ")", "if", "parallelism", ".", "n", "==", "1", ":", "y", "=", "x", "else", ":", "# first shard the input:", "x_flat", "=", "parallelism", "(", "tf", ".", "reshape", ",", "x", ",", "[", "[", "-", "1", "]", "]", "*", "parallelism", ".", "n", ")", "# [device, shard]", "x_split", "=", "parallelism", "(", "common_layers", ".", "approximate_split", ",", "x_flat", ",", "parallelism", ".", "n", ",", "0", ")", "def", "_step", "(", "source_replica", ",", "target_replica", ",", "x_split", ",", "op", "=", "\"plus_eq\"", ")", ":", "\"\"\"Helper function - one step of summing or copying.\n\n If op == \"plus_eq\", then adds source_replica into target_replica\n If op == \"copy\", then copies source_replica onto target_replica\n\n These operations happen for all shards. The replica numbers are offset\n by the shard numbers to keep all physical links busy.\n\n Args:\n source_replica: an integer\n target_replica: an integer\n x_split: a list of lists of tensors\n op: a string\n \"\"\"", "for", "shard", "in", "range", "(", "parallelism", ".", "n", ")", ":", "source_device", "=", "(", "shard", "+", "source_replica", ")", "%", "parallelism", ".", "n", "target_device", "=", "(", "shard", "+", "target_replica", ")", "%", "parallelism", ".", "n", "source", "=", "x_split", "[", "source_device", "]", "[", "shard", "]", "if", "use_bfloat16", ":", "with", "tf", ".", "device", "(", "parallelism", ".", "devices", "[", "source_device", "]", ")", ":", "source", "=", "tf", ".", "to_bfloat16", "(", "source", ")", "with", "tf", ".", "device", "(", "parallelism", ".", "devices", "[", "target_device", "]", ")", ":", "source", "=", "tf", ".", "to_float", "(", "source", ")", "if", "op", "==", "\"plus_eq\"", ":", "x_split", "[", "target_device", "]", "[", "shard", "]", "+=", "source", "else", ":", "assert", "op", "==", "\"copy\"", "x_split", "[", "target_device", "]", "[", "shard", "]", "=", "tf", ".", "identity", "(", "source", ")", "center", "=", "parallelism", ".", "n", "//", "2", "# accumulate everything towards the center.", "for", "i", "in", "reversed", "(", "range", "(", "center", ",", "parallelism", ".", "n", "-", "1", ")", ")", ":", "_step", "(", "i", "+", "1", ",", "i", ",", "x_split", ",", "op", "=", "\"plus_eq\"", ")", "for", "i", "in", "range", "(", "center", ")", ":", "_step", "(", "i", ",", "i", "+", "1", ",", "x_split", ",", "op", "=", "\"plus_eq\"", ")", "# copy everything away from the center.", "for", "i", "in", "range", "(", "center", ",", "parallelism", ".", "n", "-", "1", ")", ":", "_step", "(", "i", ",", "i", "+", "1", ",", "x_split", ",", "op", "=", "\"copy\"", ")", "for", "i", "in", "reversed", "(", "range", "(", "center", ")", ")", ":", "_step", "(", "i", "+", "1", ",", "i", ",", "x_split", ",", "op", "=", "\"copy\"", ")", "x_concat", "=", "parallelism", "(", "tf", ".", "concat", ",", "x_split", ",", "0", ")", "y", "=", "parallelism", "(", "common_layers", ".", "reshape_like_all_dims", ",", "x_concat", ",", "x", ")", "if", "maybe_reduce", ":", "y", "=", "expand_by_device", "(", "original_parallelism", ",", "parallelism", ",", "y", ")", "return", "y" ]
Compute the sum of all Tensors and put the result everywhere. Assumes that the devices are connected in a ring. Args: x: a list of Tensors with length parallelism.n parallelism: a expert_utils.Parallelism object. maybe_reduce: a boolean - first reduce per device. use_bfloat16: a boolean - saves bandwidth but loses precision Returns: a list of Tensors with length parallelism.n
[ "Compute", "the", "sum", "of", "all", "Tensors", "and", "put", "the", "result", "everywhere", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1463-L1537
22,772
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
Parallelism._maybe_repeat
def _maybe_repeat(self, x): """Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements. """ if isinstance(x, list): assert len(x) == self.n return x else: return [x] * self.n
python
def _maybe_repeat(self, x): """Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements. """ if isinstance(x, list): assert len(x) == self.n return x else: return [x] * self.n
[ "def", "_maybe_repeat", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "list", ")", ":", "assert", "len", "(", "x", ")", "==", "self", ".", "n", "return", "x", "else", ":", "return", "[", "x", "]", "*", "self", ".", "n" ]
Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements.
[ "Utility", "function", "for", "processing", "arguments", "that", "are", "singletons", "or", "lists", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L251-L264
22,773
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
PadRemover.remove
def remove(self, x): """Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.executing_eagerly(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
python
def remove(self, x): """Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.executing_eagerly(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
[ "def", "remove", "(", "self", ",", "x", ")", ":", "with", "tf", ".", "name_scope", "(", "\"pad_reduce/remove\"", ")", ":", "x_shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "x", "=", "tf", ".", "gather_nd", "(", "x", ",", "indices", "=", "self", ".", "nonpad_ids", ",", ")", "if", "not", "tf", ".", "executing_eagerly", "(", ")", ":", "# This is a hack but for some reason, gather_nd return a tensor of", "# undefined shape, so the shape is set up manually", "x", ".", "set_shape", "(", "[", "None", "]", "+", "x_shape", "[", "1", ":", "]", ")", "return", "x" ]
Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
[ "Remove", "padding", "from", "the", "given", "tensor", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L624-L643
22,774
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
PadRemover.restore
def restore(self, x): """Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor """ with tf.name_scope("pad_reduce/restore"): x = tf.scatter_nd( indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0), ) return x
python
def restore(self, x): """Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor """ with tf.name_scope("pad_reduce/restore"): x = tf.scatter_nd( indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0), ) return x
[ "def", "restore", "(", "self", ",", "x", ")", ":", "with", "tf", ".", "name_scope", "(", "\"pad_reduce/restore\"", ")", ":", "x", "=", "tf", ".", "scatter_nd", "(", "indices", "=", "self", ".", "nonpad_ids", ",", "updates", "=", "x", ",", "shape", "=", "tf", ".", "concat", "(", "[", "self", ".", "dim_origin", ",", "tf", ".", "shape", "(", "x", ")", "[", "1", ":", "]", "]", ",", "axis", "=", "0", ")", ",", ")", "return", "x" ]
Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor
[ "Add", "padding", "back", "to", "the", "given", "tensor", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L645-L661
22,775
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
SparseDispatcher.combine
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean Returns: a `Tensor` with shape `[batch_size, <extra_output_dims>]`. """ # see comments on convert_gradient_to_tensor stitched = common_layers.convert_gradient_to_tensor( tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined
python
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean Returns: a `Tensor` with shape `[batch_size, <extra_output_dims>]`. """ # see comments on convert_gradient_to_tensor stitched = common_layers.convert_gradient_to_tensor( tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined
[ "def", "combine", "(", "self", ",", "expert_out", ",", "multiply_by_gates", "=", "True", ")", ":", "# see comments on convert_gradient_to_tensor", "stitched", "=", "common_layers", ".", "convert_gradient_to_tensor", "(", "tf", ".", "concat", "(", "expert_out", ",", "0", ")", ")", "if", "multiply_by_gates", ":", "stitched", "*=", "tf", ".", "expand_dims", "(", "self", ".", "_nonzero_gates", ",", "1", ")", "combined", "=", "tf", ".", "unsorted_segment_sum", "(", "stitched", ",", "self", ".", "_batch_index", ",", "tf", ".", "shape", "(", "self", ".", "_gates", ")", "[", "0", "]", ")", "return", "combined" ]
Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean Returns: a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
[ "Sum", "together", "the", "expert", "output", "weighted", "by", "the", "gates", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L810-L833
22,776
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
SparseDispatcher.expert_to_batch_indices
def expert_to_batch_indices(self): """Batch indices corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64` and shapes `[expert_batch_size_i]` """ return tf.split( self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)
python
def expert_to_batch_indices(self): """Batch indices corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64` and shapes `[expert_batch_size_i]` """ return tf.split( self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)
[ "def", "expert_to_batch_indices", "(", "self", ")", ":", "return", "tf", ".", "split", "(", "self", ".", "_batch_index", ",", "self", ".", "_part_sizes_tensor", ",", "0", ",", "num", "=", "self", ".", "_num_experts", ")" ]
Batch indices corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64` and shapes `[expert_batch_size_i]`
[ "Batch", "indices", "corresponding", "to", "the", "examples", "in", "the", "per", "-", "expert", "Tensor", "s", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L845-L853
22,777
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
DistributedSparseDispatcher.combine
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`. """ expert_part_sizes = tf.unstack( tf.stack([d.part_sizes for d in self._dispatchers]), num=self._ep.n, axis=1) # list of lists of shape [num_experts][num_datashards] expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes) expert_output_parts_t = transpose_list_of_lists(expert_output_parts) def my_combine(dispatcher, parts): return dispatcher.combine( common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)), multiply_by_gates=multiply_by_gates) return self._dp(my_combine, self._dispatchers, expert_output_parts_t)
python
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`. """ expert_part_sizes = tf.unstack( tf.stack([d.part_sizes for d in self._dispatchers]), num=self._ep.n, axis=1) # list of lists of shape [num_experts][num_datashards] expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes) expert_output_parts_t = transpose_list_of_lists(expert_output_parts) def my_combine(dispatcher, parts): return dispatcher.combine( common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)), multiply_by_gates=multiply_by_gates) return self._dp(my_combine, self._dispatchers, expert_output_parts_t)
[ "def", "combine", "(", "self", ",", "expert_out", ",", "multiply_by_gates", "=", "True", ")", ":", "expert_part_sizes", "=", "tf", ".", "unstack", "(", "tf", ".", "stack", "(", "[", "d", ".", "part_sizes", "for", "d", "in", "self", ".", "_dispatchers", "]", ")", ",", "num", "=", "self", ".", "_ep", ".", "n", ",", "axis", "=", "1", ")", "# list of lists of shape [num_experts][num_datashards]", "expert_output_parts", "=", "self", ".", "_ep", "(", "tf", ".", "split", ",", "expert_out", ",", "expert_part_sizes", ")", "expert_output_parts_t", "=", "transpose_list_of_lists", "(", "expert_output_parts", ")", "def", "my_combine", "(", "dispatcher", ",", "parts", ")", ":", "return", "dispatcher", ".", "combine", "(", "common_layers", ".", "convert_gradient_to_tensor", "(", "tf", ".", "concat", "(", "parts", ",", "0", ")", ")", ",", "multiply_by_gates", "=", "multiply_by_gates", ")", "return", "self", ".", "_dp", "(", "my_combine", ",", "self", ".", "_dispatchers", ",", "expert_output_parts_t", ")" ]
Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`.
[ "Sum", "together", "the", "expert", "output", "multiplied", "by", "the", "corresponding", "gates", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L907-L930
22,778
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
TruncatingDispatcher.dispatch
def dispatch(self, inp): """Send the inputs to the experts. Args: inp: a `Tensor` of shape "[batch, length, depth]` Returns: a tensor with shape [batch, num_experts, expert_capacity, depth] """ inp = tf.reshape(inp, [self._batch * self._length, -1]) # [batch, num_experts, expert_capacity, depth] ret = tf.gather(inp, self._flat_indices) return ret
python
def dispatch(self, inp): """Send the inputs to the experts. Args: inp: a `Tensor` of shape "[batch, length, depth]` Returns: a tensor with shape [batch, num_experts, expert_capacity, depth] """ inp = tf.reshape(inp, [self._batch * self._length, -1]) # [batch, num_experts, expert_capacity, depth] ret = tf.gather(inp, self._flat_indices) return ret
[ "def", "dispatch", "(", "self", ",", "inp", ")", ":", "inp", "=", "tf", ".", "reshape", "(", "inp", ",", "[", "self", ".", "_batch", "*", "self", ".", "_length", ",", "-", "1", "]", ")", "# [batch, num_experts, expert_capacity, depth]", "ret", "=", "tf", ".", "gather", "(", "inp", ",", "self", ".", "_flat_indices", ")", "return", "ret" ]
Send the inputs to the experts. Args: inp: a `Tensor` of shape "[batch, length, depth]` Returns: a tensor with shape [batch, num_experts, expert_capacity, depth]
[ "Send", "the", "inputs", "to", "the", "experts", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1158-L1169
22,779
tensorflow/tensor2tensor
tensor2tensor/utils/expert_utils.py
TruncatingDispatcher.combine
def combine(self, x): """Return the output from the experts. When one example goes to multiple experts, the outputs are summed. Args: x: a Tensor with shape [batch, num_experts, expert_capacity, depth] Returns: a `Tensor` with shape `[batch, length, depth] """ depth = tf.shape(x)[-1] x *= tf.expand_dims(self._nonpadding, -1) ret = tf.unsorted_segment_sum( x, self._flat_indices, num_segments=self._batch * self._length) ret = tf.reshape(ret, [self._batch, self._length, depth]) return ret
python
def combine(self, x): """Return the output from the experts. When one example goes to multiple experts, the outputs are summed. Args: x: a Tensor with shape [batch, num_experts, expert_capacity, depth] Returns: a `Tensor` with shape `[batch, length, depth] """ depth = tf.shape(x)[-1] x *= tf.expand_dims(self._nonpadding, -1) ret = tf.unsorted_segment_sum( x, self._flat_indices, num_segments=self._batch * self._length) ret = tf.reshape(ret, [self._batch, self._length, depth]) return ret
[ "def", "combine", "(", "self", ",", "x", ")", ":", "depth", "=", "tf", ".", "shape", "(", "x", ")", "[", "-", "1", "]", "x", "*=", "tf", ".", "expand_dims", "(", "self", ".", "_nonpadding", ",", "-", "1", ")", "ret", "=", "tf", ".", "unsorted_segment_sum", "(", "x", ",", "self", ".", "_flat_indices", ",", "num_segments", "=", "self", ".", "_batch", "*", "self", ".", "_length", ")", "ret", "=", "tf", ".", "reshape", "(", "ret", ",", "[", "self", ".", "_batch", ",", "self", ".", "_length", ",", "depth", "]", ")", "return", "ret" ]
Return the output from the experts. When one example goes to multiple experts, the outputs are summed. Args: x: a Tensor with shape [batch, num_experts, expert_capacity, depth] Returns: a `Tensor` with shape `[batch, length, depth]
[ "Return", "the", "output", "from", "the", "experts", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1172-L1188
22,780
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
make_env
def make_env(env_type, real_env, sim_env_kwargs): """Factory function for envs.""" return { "real": lambda: real_env.new_like( # pylint: disable=g-long-lambda batch_size=sim_env_kwargs["batch_size"], store_rollouts=False, ), "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda **sim_env_kwargs ), }[env_type]()
python
def make_env(env_type, real_env, sim_env_kwargs): """Factory function for envs.""" return { "real": lambda: real_env.new_like( # pylint: disable=g-long-lambda batch_size=sim_env_kwargs["batch_size"], store_rollouts=False, ), "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda **sim_env_kwargs ), }[env_type]()
[ "def", "make_env", "(", "env_type", ",", "real_env", ",", "sim_env_kwargs", ")", ":", "return", "{", "\"real\"", ":", "lambda", ":", "real_env", ".", "new_like", "(", "# pylint: disable=g-long-lambda", "batch_size", "=", "sim_env_kwargs", "[", "\"batch_size\"", "]", ",", "store_rollouts", "=", "False", ",", ")", ",", "\"simulated\"", ":", "lambda", ":", "rl_utils", ".", "SimulatedBatchGymEnvWithFixedInitialFrames", "(", "# pylint: disable=g-long-lambda", "*", "*", "sim_env_kwargs", ")", ",", "}", "[", "env_type", "]", "(", ")" ]
Factory function for envs.
[ "Factory", "function", "for", "envs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L240-L250
22,781
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
make_agent
def make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn=None, frame_stack_size=None, rollout_agent_type=None, batch_size=None, inner_batch_size=None, env_type=None, **planner_kwargs ): """Factory function for Agents.""" if batch_size is None: batch_size = env.batch_size return { "random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space ), "policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space, policy_hparams, policy_dir, sampling_temp ), "planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda batch_size, make_agent( rollout_agent_type, env, policy_hparams, policy_dir, sampling_temp, batch_size=inner_batch_size ), make_env(env_type, env.env, sim_env_kwargs_fn()), lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size), discount_factor=policy_hparams.gae_gamma, **planner_kwargs ), }[agent_type]()
python
def make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn=None, frame_stack_size=None, rollout_agent_type=None, batch_size=None, inner_batch_size=None, env_type=None, **planner_kwargs ): """Factory function for Agents.""" if batch_size is None: batch_size = env.batch_size return { "random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space ), "policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space, policy_hparams, policy_dir, sampling_temp ), "planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda batch_size, make_agent( rollout_agent_type, env, policy_hparams, policy_dir, sampling_temp, batch_size=inner_batch_size ), make_env(env_type, env.env, sim_env_kwargs_fn()), lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size), discount_factor=policy_hparams.gae_gamma, **planner_kwargs ), }[agent_type]()
[ "def", "make_agent", "(", "agent_type", ",", "env", ",", "policy_hparams", ",", "policy_dir", ",", "sampling_temp", ",", "sim_env_kwargs_fn", "=", "None", ",", "frame_stack_size", "=", "None", ",", "rollout_agent_type", "=", "None", ",", "batch_size", "=", "None", ",", "inner_batch_size", "=", "None", ",", "env_type", "=", "None", ",", "*", "*", "planner_kwargs", ")", ":", "if", "batch_size", "is", "None", ":", "batch_size", "=", "env", ".", "batch_size", "return", "{", "\"random\"", ":", "lambda", ":", "rl_utils", ".", "RandomAgent", "(", "# pylint: disable=g-long-lambda", "batch_size", ",", "env", ".", "observation_space", ",", "env", ".", "action_space", ")", ",", "\"policy\"", ":", "lambda", ":", "rl_utils", ".", "PolicyAgent", "(", "# pylint: disable=g-long-lambda", "batch_size", ",", "env", ".", "observation_space", ",", "env", ".", "action_space", ",", "policy_hparams", ",", "policy_dir", ",", "sampling_temp", ")", ",", "\"planner\"", ":", "lambda", ":", "rl_utils", ".", "PlannerAgent", "(", "# pylint: disable=g-long-lambda", "batch_size", ",", "make_agent", "(", "rollout_agent_type", ",", "env", ",", "policy_hparams", ",", "policy_dir", ",", "sampling_temp", ",", "batch_size", "=", "inner_batch_size", ")", ",", "make_env", "(", "env_type", ",", "env", ".", "env", ",", "sim_env_kwargs_fn", "(", ")", ")", ",", "lambda", "env", ":", "rl_utils", ".", "BatchStackWrapper", "(", "env", ",", "frame_stack_size", ")", ",", "discount_factor", "=", "policy_hparams", ".", "gae_gamma", ",", "*", "*", "planner_kwargs", ")", ",", "}", "[", "agent_type", "]", "(", ")" ]
Factory function for Agents.
[ "Factory", "function", "for", "Agents", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L253-L277
22,782
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
collect_frames_for_random_starts
def collect_frames_for_random_starts( storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit, log_every_steps=None ): """Collects frames from real env for random starts of simulated env.""" del frame_stack_size storage_env.start_new_epoch(0) tf.logging.info( "Collecting %d frames for random starts.", random_starts_step_limit ) rl_utils.run_rollouts( stacked_env, agent, stacked_env.reset(), step_limit=random_starts_step_limit, many_rollouts_from_each_env=True, log_every_steps=log_every_steps, ) # Save unfinished rollouts to history. stacked_env.reset()
python
def collect_frames_for_random_starts( storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit, log_every_steps=None ): """Collects frames from real env for random starts of simulated env.""" del frame_stack_size storage_env.start_new_epoch(0) tf.logging.info( "Collecting %d frames for random starts.", random_starts_step_limit ) rl_utils.run_rollouts( stacked_env, agent, stacked_env.reset(), step_limit=random_starts_step_limit, many_rollouts_from_each_env=True, log_every_steps=log_every_steps, ) # Save unfinished rollouts to history. stacked_env.reset()
[ "def", "collect_frames_for_random_starts", "(", "storage_env", ",", "stacked_env", ",", "agent", ",", "frame_stack_size", ",", "random_starts_step_limit", ",", "log_every_steps", "=", "None", ")", ":", "del", "frame_stack_size", "storage_env", ".", "start_new_epoch", "(", "0", ")", "tf", ".", "logging", ".", "info", "(", "\"Collecting %d frames for random starts.\"", ",", "random_starts_step_limit", ")", "rl_utils", ".", "run_rollouts", "(", "stacked_env", ",", "agent", ",", "stacked_env", ".", "reset", "(", ")", ",", "step_limit", "=", "random_starts_step_limit", ",", "many_rollouts_from_each_env", "=", "True", ",", "log_every_steps", "=", "log_every_steps", ",", ")", "# Save unfinished rollouts to history.", "stacked_env", ".", "reset", "(", ")" ]
Collects frames from real env for random starts of simulated env.
[ "Collects", "frames", "from", "real", "env", "for", "random", "starts", "of", "simulated", "env", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L280-L297
22,783
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
make_agent_from_hparams
def make_agent_from_hparams( agent_type, base_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=() ): """Creates an Agent from hparams.""" def sim_env_kwargs_fn(): return rl.make_simulated_env_kwargs( base_env, loop_hparams, batch_size=planner_hparams.batch_size, model_dir=model_dir ) planner_kwargs = planner_hparams.values() planner_kwargs.pop("batch_size") planner_kwargs.pop("rollout_agent_type") planner_kwargs.pop("env_type") return make_agent( agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn, loop_hparams.frame_stack_size, planner_hparams.rollout_agent_type, inner_batch_size=planner_hparams.batch_size, env_type=planner_hparams.env_type, video_writers=video_writers, **planner_kwargs )
python
def make_agent_from_hparams( agent_type, base_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=() ): """Creates an Agent from hparams.""" def sim_env_kwargs_fn(): return rl.make_simulated_env_kwargs( base_env, loop_hparams, batch_size=planner_hparams.batch_size, model_dir=model_dir ) planner_kwargs = planner_hparams.values() planner_kwargs.pop("batch_size") planner_kwargs.pop("rollout_agent_type") planner_kwargs.pop("env_type") return make_agent( agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn, loop_hparams.frame_stack_size, planner_hparams.rollout_agent_type, inner_batch_size=planner_hparams.batch_size, env_type=planner_hparams.env_type, video_writers=video_writers, **planner_kwargs )
[ "def", "make_agent_from_hparams", "(", "agent_type", ",", "base_env", ",", "stacked_env", ",", "loop_hparams", ",", "policy_hparams", ",", "planner_hparams", ",", "model_dir", ",", "policy_dir", ",", "sampling_temp", ",", "video_writers", "=", "(", ")", ")", ":", "def", "sim_env_kwargs_fn", "(", ")", ":", "return", "rl", ".", "make_simulated_env_kwargs", "(", "base_env", ",", "loop_hparams", ",", "batch_size", "=", "planner_hparams", ".", "batch_size", ",", "model_dir", "=", "model_dir", ")", "planner_kwargs", "=", "planner_hparams", ".", "values", "(", ")", "planner_kwargs", ".", "pop", "(", "\"batch_size\"", ")", "planner_kwargs", ".", "pop", "(", "\"rollout_agent_type\"", ")", "planner_kwargs", ".", "pop", "(", "\"env_type\"", ")", "return", "make_agent", "(", "agent_type", ",", "stacked_env", ",", "policy_hparams", ",", "policy_dir", ",", "sampling_temp", ",", "sim_env_kwargs_fn", ",", "loop_hparams", ".", "frame_stack_size", ",", "planner_hparams", ".", "rollout_agent_type", ",", "inner_batch_size", "=", "planner_hparams", ".", "batch_size", ",", "env_type", "=", "planner_hparams", ".", "env_type", ",", "video_writers", "=", "video_writers", ",", "*", "*", "planner_kwargs", ")" ]
Creates an Agent from hparams.
[ "Creates", "an", "Agent", "from", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L300-L321
22,784
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
make_eval_fn_with_agent
def make_eval_fn_with_agent( agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=None, video_writers=(), random_starts_step_limit=None ): """Returns an out-of-graph eval_fn using the Agent API.""" def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp): """Eval function.""" base_env = env env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size) agent = make_agent_from_hparams( agent_type, base_env, env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers ) if eval_mode == "agent_simulated": real_env = base_env.new_like(batch_size=1) stacked_env = rl_utils.BatchStackWrapper( real_env, loop_hparams.frame_stack_size ) collect_frames_for_random_starts( real_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, loop_hparams.frame_stack_size, simulation_random_starts=True, simulation_flip_first_random_for_beginning=False, split=None, ) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, loop_hparams, batch_size=loop_hparams.eval_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=model_dir ) sim_env = env_fn(in_graph=False) env = rl_utils.BatchStackWrapper(sim_env, loop_hparams.frame_stack_size) kwargs = {} if not agent.records_own_videos: kwargs["video_writers"] = video_writers step_limit = base_env.rl_env_max_episode_steps if step_limit == -1: step_limit = None rl_utils.run_rollouts( env, agent, env.reset(), log_every_steps=log_every_steps, step_limit=step_limit, **kwargs ) if eval_mode == "agent_real": assert len(base_env.current_epoch_rollouts()) == env.batch_size return eval_fn
python
def make_eval_fn_with_agent( agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=None, video_writers=(), random_starts_step_limit=None ): """Returns an out-of-graph eval_fn using the Agent API.""" def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp): """Eval function.""" base_env = env env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size) agent = make_agent_from_hparams( agent_type, base_env, env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers ) if eval_mode == "agent_simulated": real_env = base_env.new_like(batch_size=1) stacked_env = rl_utils.BatchStackWrapper( real_env, loop_hparams.frame_stack_size ) collect_frames_for_random_starts( real_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, loop_hparams.frame_stack_size, simulation_random_starts=True, simulation_flip_first_random_for_beginning=False, split=None, ) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, loop_hparams, batch_size=loop_hparams.eval_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=model_dir ) sim_env = env_fn(in_graph=False) env = rl_utils.BatchStackWrapper(sim_env, loop_hparams.frame_stack_size) kwargs = {} if not agent.records_own_videos: kwargs["video_writers"] = video_writers step_limit = base_env.rl_env_max_episode_steps if step_limit == -1: step_limit = None rl_utils.run_rollouts( env, agent, env.reset(), log_every_steps=log_every_steps, step_limit=step_limit, **kwargs ) if eval_mode == "agent_real": assert len(base_env.current_epoch_rollouts()) == env.batch_size return eval_fn
[ "def", "make_eval_fn_with_agent", "(", "agent_type", ",", "eval_mode", ",", "planner_hparams", ",", "model_dir", ",", "log_every_steps", "=", "None", ",", "video_writers", "=", "(", ")", ",", "random_starts_step_limit", "=", "None", ")", ":", "def", "eval_fn", "(", "env", ",", "loop_hparams", ",", "policy_hparams", ",", "policy_dir", ",", "sampling_temp", ")", ":", "\"\"\"Eval function.\"\"\"", "base_env", "=", "env", "env", "=", "rl_utils", ".", "BatchStackWrapper", "(", "env", ",", "loop_hparams", ".", "frame_stack_size", ")", "agent", "=", "make_agent_from_hparams", "(", "agent_type", ",", "base_env", ",", "env", ",", "loop_hparams", ",", "policy_hparams", ",", "planner_hparams", ",", "model_dir", ",", "policy_dir", ",", "sampling_temp", ",", "video_writers", ")", "if", "eval_mode", "==", "\"agent_simulated\"", ":", "real_env", "=", "base_env", ".", "new_like", "(", "batch_size", "=", "1", ")", "stacked_env", "=", "rl_utils", ".", "BatchStackWrapper", "(", "real_env", ",", "loop_hparams", ".", "frame_stack_size", ")", "collect_frames_for_random_starts", "(", "real_env", ",", "stacked_env", ",", "agent", ",", "loop_hparams", ".", "frame_stack_size", ",", "random_starts_step_limit", ",", "log_every_steps", ")", "initial_frame_chooser", "=", "rl_utils", ".", "make_initial_frame_chooser", "(", "real_env", ",", "loop_hparams", ".", "frame_stack_size", ",", "simulation_random_starts", "=", "True", ",", "simulation_flip_first_random_for_beginning", "=", "False", ",", "split", "=", "None", ",", ")", "env_fn", "=", "rl", ".", "make_simulated_env_fn_from_hparams", "(", "real_env", ",", "loop_hparams", ",", "batch_size", "=", "loop_hparams", ".", "eval_batch_size", ",", "initial_frame_chooser", "=", "initial_frame_chooser", ",", "model_dir", "=", "model_dir", ")", "sim_env", "=", "env_fn", "(", "in_graph", "=", "False", ")", "env", "=", "rl_utils", ".", "BatchStackWrapper", "(", "sim_env", ",", "loop_hparams", ".", "frame_stack_size", ")", "kwargs", "=", "{", "}", "if", "not", "agent", ".", "records_own_videos", ":", "kwargs", "[", "\"video_writers\"", "]", "=", "video_writers", "step_limit", "=", "base_env", ".", "rl_env_max_episode_steps", "if", "step_limit", "==", "-", "1", ":", "step_limit", "=", "None", "rl_utils", ".", "run_rollouts", "(", "env", ",", "agent", ",", "env", ".", "reset", "(", ")", ",", "log_every_steps", "=", "log_every_steps", ",", "step_limit", "=", "step_limit", ",", "*", "*", "kwargs", ")", "if", "eval_mode", "==", "\"agent_real\"", ":", "assert", "len", "(", "base_env", ".", "current_epoch_rollouts", "(", ")", ")", "==", "env", ".", "batch_size", "return", "eval_fn" ]
Returns an out-of-graph eval_fn using the Agent API.
[ "Returns", "an", "out", "-", "of", "-", "graph", "eval_fn", "using", "the", "Agent", "API", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L324-L372
22,785
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
evaluate_world_model
def evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ): """Evaluates the world model.""" if debug_video_path: debug_video_path = os.path.join(debug_video_path, "0.avi") storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) stacked_env = rl_utils.BatchStackWrapper( storage_env, loop_hparams.frame_stack_size ) policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) agent = make_agent_from_hparams( agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, # TODO(koz4k): Loop over eval_sampling_temps? sampling_temp=loop_hparams.eval_sampling_temps[0], ) collect_frames_for_random_starts( storage_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) return rl_utils.evaluate_world_model( storage_env, loop_hparams, model_dir, debug_video_path, split=None )
python
def evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ): """Evaluates the world model.""" if debug_video_path: debug_video_path = os.path.join(debug_video_path, "0.avi") storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) stacked_env = rl_utils.BatchStackWrapper( storage_env, loop_hparams.frame_stack_size ) policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) agent = make_agent_from_hparams( agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, # TODO(koz4k): Loop over eval_sampling_temps? sampling_temp=loop_hparams.eval_sampling_temps[0], ) collect_frames_for_random_starts( storage_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) return rl_utils.evaluate_world_model( storage_env, loop_hparams, model_dir, debug_video_path, split=None )
[ "def", "evaluate_world_model", "(", "agent_type", ",", "loop_hparams", ",", "planner_hparams", ",", "model_dir", ",", "policy_dir", ",", "random_starts_step_limit", ",", "debug_video_path", ",", "log_every_steps", ")", ":", "if", "debug_video_path", ":", "debug_video_path", "=", "os", ".", "path", ".", "join", "(", "debug_video_path", ",", "\"0.avi\"", ")", "storage_env", "=", "rl_utils", ".", "setup_env", "(", "loop_hparams", ",", "batch_size", "=", "1", ",", "max_num_noops", "=", "0", ")", "stacked_env", "=", "rl_utils", ".", "BatchStackWrapper", "(", "storage_env", ",", "loop_hparams", ".", "frame_stack_size", ")", "policy_hparams", "=", "trainer_lib", ".", "create_hparams", "(", "loop_hparams", ".", "base_algo_params", ")", "agent", "=", "make_agent_from_hparams", "(", "agent_type", ",", "storage_env", ",", "stacked_env", ",", "loop_hparams", ",", "policy_hparams", ",", "planner_hparams", ",", "model_dir", ",", "policy_dir", ",", "# TODO(koz4k): Loop over eval_sampling_temps?", "sampling_temp", "=", "loop_hparams", ".", "eval_sampling_temps", "[", "0", "]", ",", ")", "collect_frames_for_random_starts", "(", "storage_env", ",", "stacked_env", ",", "agent", ",", "loop_hparams", ".", "frame_stack_size", ",", "random_starts_step_limit", ",", "log_every_steps", ")", "return", "rl_utils", ".", "evaluate_world_model", "(", "storage_env", ",", "loop_hparams", ",", "model_dir", ",", "debug_video_path", ",", "split", "=", "None", ")" ]
Evaluates the world model.
[ "Evaluates", "the", "world", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L375-L400
22,786
tensorflow/tensor2tensor
tensor2tensor/envs/tic_tac_toe_env.py
get_open_spaces
def get_open_spaces(board): """Given a representation of the board, returns a list of open spaces.""" open_spaces = [] for i in range(3): for j in range(3): if board[i][j] == 0: open_spaces.append(encode_pos(i, j)) return open_spaces
python
def get_open_spaces(board): """Given a representation of the board, returns a list of open spaces.""" open_spaces = [] for i in range(3): for j in range(3): if board[i][j] == 0: open_spaces.append(encode_pos(i, j)) return open_spaces
[ "def", "get_open_spaces", "(", "board", ")", ":", "open_spaces", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "for", "j", "in", "range", "(", "3", ")", ":", "if", "board", "[", "i", "]", "[", "j", "]", "==", "0", ":", "open_spaces", ".", "append", "(", "encode_pos", "(", "i", ",", "j", ")", ")", "return", "open_spaces" ]
Given a representation of the board, returns a list of open spaces.
[ "Given", "a", "representation", "of", "the", "board", "returns", "a", "list", "of", "open", "spaces", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/tic_tac_toe_env.py#L46-L53
22,787
tensorflow/tensor2tensor
tensor2tensor/envs/tic_tac_toe_env.py
get_reward_and_done
def get_reward_and_done(board): """Given a representation of the board, returns reward and done.""" # Returns (reward, done) where: # reward: -1 means lost, +1 means win, 0 means draw or continuing. # done: True if the game is over, i.e. someone won or it is a draw. # Sum all rows ... all_sums = [np.sum(board[i, :]) for i in range(3)] # ... all columns all_sums.extend([np.sum(board[:, i]) for i in range(3)]) # and both diagonals. all_sums.append(np.sum([board[i, i] for i in range(3)])) all_sums.append(np.sum([board[i, 2 - i] for i in range(3)])) if -3 in all_sums: return -1, True if 3 in all_sums: return 1, True done = True if get_open_spaces(board): done = False return 0, done
python
def get_reward_and_done(board): """Given a representation of the board, returns reward and done.""" # Returns (reward, done) where: # reward: -1 means lost, +1 means win, 0 means draw or continuing. # done: True if the game is over, i.e. someone won or it is a draw. # Sum all rows ... all_sums = [np.sum(board[i, :]) for i in range(3)] # ... all columns all_sums.extend([np.sum(board[:, i]) for i in range(3)]) # and both diagonals. all_sums.append(np.sum([board[i, i] for i in range(3)])) all_sums.append(np.sum([board[i, 2 - i] for i in range(3)])) if -3 in all_sums: return -1, True if 3 in all_sums: return 1, True done = True if get_open_spaces(board): done = False return 0, done
[ "def", "get_reward_and_done", "(", "board", ")", ":", "# Returns (reward, done) where:", "# reward: -1 means lost, +1 means win, 0 means draw or continuing.", "# done: True if the game is over, i.e. someone won or it is a draw.", "# Sum all rows ...", "all_sums", "=", "[", "np", ".", "sum", "(", "board", "[", "i", ",", ":", "]", ")", "for", "i", "in", "range", "(", "3", ")", "]", "# ... all columns", "all_sums", ".", "extend", "(", "[", "np", ".", "sum", "(", "board", "[", ":", ",", "i", "]", ")", "for", "i", "in", "range", "(", "3", ")", "]", ")", "# and both diagonals.", "all_sums", ".", "append", "(", "np", ".", "sum", "(", "[", "board", "[", "i", ",", "i", "]", "for", "i", "in", "range", "(", "3", ")", "]", ")", ")", "all_sums", ".", "append", "(", "np", ".", "sum", "(", "[", "board", "[", "i", ",", "2", "-", "i", "]", "for", "i", "in", "range", "(", "3", ")", "]", ")", ")", "if", "-", "3", "in", "all_sums", ":", "return", "-", "1", ",", "True", "if", "3", "in", "all_sums", ":", "return", "1", ",", "True", "done", "=", "True", "if", "get_open_spaces", "(", "board", ")", ":", "done", "=", "False", "return", "0", ",", "done" ]
Given a representation of the board, returns reward and done.
[ "Given", "a", "representation", "of", "the", "board", "returns", "reward", "and", "done", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/tic_tac_toe_env.py#L56-L80
22,788
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
decode_hparams
def decode_hparams(overrides=""): """Hyperparameters for decoding.""" hp = hparam.HParams( save_images=False, log_results=True, extra_length=100, min_length_ratio=0.0, batch_size=0, beam_size=4, alpha=0.6, eos_penalty=0.0, block_size=0, guess_and_check_top_k=0, guess_and_check_epsilon=-1, insertion_parallel=False, return_beams=False, write_beam_scores=False, max_input_size=-1, identity_output=False, num_samples=-1, # Number of examples to decode. delimiter="\n", decode_to_file="", # str. Prefix for filename to write decodings to. decode_reference="", # str. Filename to read references from. decode_in_memory=False, # How much decode should wait for the next checkpoint decode_timeout_mins=240, summaries_log_dir="decode", # Directory to write hook summaries. shards=1, # How many shards of data to decode (treating 1 as None). shard_id=0, # Which shard are we decoding if more than 1 above. shards_start_offset=0, # Number of the first shard to decode. shard_google_format=False, # If True use Google shard naming format. num_decodes=1, # Number of times to go over the dataset. force_decode_length=False, display_decoded_images=False, # Multi-problem decoding task id. multiproblem_task_id=-1, # Used for video decoding. frames_per_second=10, skip_eos_postprocess=False, # Creates a blue/red border covering border_percent of the frame. border_percent=2, # Maximum number of videos displayed. # number of videos displayed = max_display_outputs * max_display_decodes max_display_outputs=10, max_display_decodes=5, # Used in computation of VGG feature based video metrics. # Set this to be the path to a trained VGG ckpt to output # useful metrics. vgg_ckpt_path="", # Used for MLPerf compliance logging. mlperf_decode_step=0.0, mlperf_threshold=25.0, mlperf_success=False) hp.parse(overrides) return hp
python
def decode_hparams(overrides=""): """Hyperparameters for decoding.""" hp = hparam.HParams( save_images=False, log_results=True, extra_length=100, min_length_ratio=0.0, batch_size=0, beam_size=4, alpha=0.6, eos_penalty=0.0, block_size=0, guess_and_check_top_k=0, guess_and_check_epsilon=-1, insertion_parallel=False, return_beams=False, write_beam_scores=False, max_input_size=-1, identity_output=False, num_samples=-1, # Number of examples to decode. delimiter="\n", decode_to_file="", # str. Prefix for filename to write decodings to. decode_reference="", # str. Filename to read references from. decode_in_memory=False, # How much decode should wait for the next checkpoint decode_timeout_mins=240, summaries_log_dir="decode", # Directory to write hook summaries. shards=1, # How many shards of data to decode (treating 1 as None). shard_id=0, # Which shard are we decoding if more than 1 above. shards_start_offset=0, # Number of the first shard to decode. shard_google_format=False, # If True use Google shard naming format. num_decodes=1, # Number of times to go over the dataset. force_decode_length=False, display_decoded_images=False, # Multi-problem decoding task id. multiproblem_task_id=-1, # Used for video decoding. frames_per_second=10, skip_eos_postprocess=False, # Creates a blue/red border covering border_percent of the frame. border_percent=2, # Maximum number of videos displayed. # number of videos displayed = max_display_outputs * max_display_decodes max_display_outputs=10, max_display_decodes=5, # Used in computation of VGG feature based video metrics. # Set this to be the path to a trained VGG ckpt to output # useful metrics. vgg_ckpt_path="", # Used for MLPerf compliance logging. mlperf_decode_step=0.0, mlperf_threshold=25.0, mlperf_success=False) hp.parse(overrides) return hp
[ "def", "decode_hparams", "(", "overrides", "=", "\"\"", ")", ":", "hp", "=", "hparam", ".", "HParams", "(", "save_images", "=", "False", ",", "log_results", "=", "True", ",", "extra_length", "=", "100", ",", "min_length_ratio", "=", "0.0", ",", "batch_size", "=", "0", ",", "beam_size", "=", "4", ",", "alpha", "=", "0.6", ",", "eos_penalty", "=", "0.0", ",", "block_size", "=", "0", ",", "guess_and_check_top_k", "=", "0", ",", "guess_and_check_epsilon", "=", "-", "1", ",", "insertion_parallel", "=", "False", ",", "return_beams", "=", "False", ",", "write_beam_scores", "=", "False", ",", "max_input_size", "=", "-", "1", ",", "identity_output", "=", "False", ",", "num_samples", "=", "-", "1", ",", "# Number of examples to decode.", "delimiter", "=", "\"\\n\"", ",", "decode_to_file", "=", "\"\"", ",", "# str. Prefix for filename to write decodings to.", "decode_reference", "=", "\"\"", ",", "# str. Filename to read references from.", "decode_in_memory", "=", "False", ",", "# How much decode should wait for the next checkpoint", "decode_timeout_mins", "=", "240", ",", "summaries_log_dir", "=", "\"decode\"", ",", "# Directory to write hook summaries.", "shards", "=", "1", ",", "# How many shards of data to decode (treating 1 as None).", "shard_id", "=", "0", ",", "# Which shard are we decoding if more than 1 above.", "shards_start_offset", "=", "0", ",", "# Number of the first shard to decode.", "shard_google_format", "=", "False", ",", "# If True use Google shard naming format.", "num_decodes", "=", "1", ",", "# Number of times to go over the dataset.", "force_decode_length", "=", "False", ",", "display_decoded_images", "=", "False", ",", "# Multi-problem decoding task id.", "multiproblem_task_id", "=", "-", "1", ",", "# Used for video decoding.", "frames_per_second", "=", "10", ",", "skip_eos_postprocess", "=", "False", ",", "# Creates a blue/red border covering border_percent of the frame.", "border_percent", "=", "2", ",", "# Maximum number of videos displayed.", "# number of videos displayed = max_display_outputs * max_display_decodes", "max_display_outputs", "=", "10", ",", "max_display_decodes", "=", "5", ",", "# Used in computation of VGG feature based video metrics.", "# Set this to be the path to a trained VGG ckpt to output", "# useful metrics.", "vgg_ckpt_path", "=", "\"\"", ",", "# Used for MLPerf compliance logging.", "mlperf_decode_step", "=", "0.0", ",", "mlperf_threshold", "=", "25.0", ",", "mlperf_success", "=", "False", ")", "hp", ".", "parse", "(", "overrides", ")", "return", "hp" ]
Hyperparameters for decoding.
[ "Hyperparameters", "for", "decoding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L47-L101
22,789
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
log_decode_results
def log_decode_results(inputs, outputs, problem_name, prediction_idx, inputs_vocab, targets_vocab, targets=None, save_images=False, output_dir=None, identity_output=False, log_results=True, skip_eos_postprocess=False): """Log inference results.""" # TODO(lukaszkaiser) refactor this into feature_encoder is_video = "video" in problem_name or "gym" in problem_name if is_video: def fix_and_save_video(vid, prefix): save_path_template = os.path.join( output_dir, "%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx)) # this is only required for predictions if vid.shape[-1] == 1: vid = np.squeeze(vid, axis=-1) save_video(vid, save_path_template) tf.logging.info("Saving video: {}".format(prediction_idx)) fix_and_save_video(inputs, "inputs") fix_and_save_video(outputs, "outputs") fix_and_save_video(targets, "targets") is_image = "image" in problem_name is_text2class = isinstance(registry.problem(problem_name), text_problems.Text2ClassProblem) skip_eos_postprocess = is_image or is_text2class or skip_eos_postprocess decoded_inputs = None if is_image and save_images: save_path = os.path.join( output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx)) show_and_save_image(inputs / 255., save_path) elif inputs is not None and inputs_vocab: if identity_output: decoded_inputs = " ".join(map(str, inputs.flatten())) else: decoded_inputs = inputs_vocab.decode(_save_until_eos( inputs, skip_eos_postprocess)) if log_results and not is_video: tf.logging.info("Inference results INPUT: %s" % decoded_inputs) decoded_targets = None decoded_outputs = None if identity_output: decoded_outputs = " ".join(map(str, outputs.flatten())) if targets is not None: decoded_targets = " ".join(map(str, targets.flatten())) else: decoded_outputs = targets_vocab.decode(_save_until_eos( outputs, skip_eos_postprocess)) if targets is not None and log_results: decoded_targets = targets_vocab.decode(_save_until_eos( targets, skip_eos_postprocess)) if log_results and not is_video: tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) if targets is not None and log_results and not is_video: tf.logging.info("Inference results TARGET: %s" % decoded_targets) return decoded_inputs, decoded_outputs, decoded_targets
python
def log_decode_results(inputs, outputs, problem_name, prediction_idx, inputs_vocab, targets_vocab, targets=None, save_images=False, output_dir=None, identity_output=False, log_results=True, skip_eos_postprocess=False): """Log inference results.""" # TODO(lukaszkaiser) refactor this into feature_encoder is_video = "video" in problem_name or "gym" in problem_name if is_video: def fix_and_save_video(vid, prefix): save_path_template = os.path.join( output_dir, "%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx)) # this is only required for predictions if vid.shape[-1] == 1: vid = np.squeeze(vid, axis=-1) save_video(vid, save_path_template) tf.logging.info("Saving video: {}".format(prediction_idx)) fix_and_save_video(inputs, "inputs") fix_and_save_video(outputs, "outputs") fix_and_save_video(targets, "targets") is_image = "image" in problem_name is_text2class = isinstance(registry.problem(problem_name), text_problems.Text2ClassProblem) skip_eos_postprocess = is_image or is_text2class or skip_eos_postprocess decoded_inputs = None if is_image and save_images: save_path = os.path.join( output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx)) show_and_save_image(inputs / 255., save_path) elif inputs is not None and inputs_vocab: if identity_output: decoded_inputs = " ".join(map(str, inputs.flatten())) else: decoded_inputs = inputs_vocab.decode(_save_until_eos( inputs, skip_eos_postprocess)) if log_results and not is_video: tf.logging.info("Inference results INPUT: %s" % decoded_inputs) decoded_targets = None decoded_outputs = None if identity_output: decoded_outputs = " ".join(map(str, outputs.flatten())) if targets is not None: decoded_targets = " ".join(map(str, targets.flatten())) else: decoded_outputs = targets_vocab.decode(_save_until_eos( outputs, skip_eos_postprocess)) if targets is not None and log_results: decoded_targets = targets_vocab.decode(_save_until_eos( targets, skip_eos_postprocess)) if log_results and not is_video: tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) if targets is not None and log_results and not is_video: tf.logging.info("Inference results TARGET: %s" % decoded_targets) return decoded_inputs, decoded_outputs, decoded_targets
[ "def", "log_decode_results", "(", "inputs", ",", "outputs", ",", "problem_name", ",", "prediction_idx", ",", "inputs_vocab", ",", "targets_vocab", ",", "targets", "=", "None", ",", "save_images", "=", "False", ",", "output_dir", "=", "None", ",", "identity_output", "=", "False", ",", "log_results", "=", "True", ",", "skip_eos_postprocess", "=", "False", ")", ":", "# TODO(lukaszkaiser) refactor this into feature_encoder", "is_video", "=", "\"video\"", "in", "problem_name", "or", "\"gym\"", "in", "problem_name", "if", "is_video", ":", "def", "fix_and_save_video", "(", "vid", ",", "prefix", ")", ":", "save_path_template", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"%s_%s_%05d_{:05d}.png\"", "%", "(", "problem_name", ",", "prefix", ",", "prediction_idx", ")", ")", "# this is only required for predictions", "if", "vid", ".", "shape", "[", "-", "1", "]", "==", "1", ":", "vid", "=", "np", ".", "squeeze", "(", "vid", ",", "axis", "=", "-", "1", ")", "save_video", "(", "vid", ",", "save_path_template", ")", "tf", ".", "logging", ".", "info", "(", "\"Saving video: {}\"", ".", "format", "(", "prediction_idx", ")", ")", "fix_and_save_video", "(", "inputs", ",", "\"inputs\"", ")", "fix_and_save_video", "(", "outputs", ",", "\"outputs\"", ")", "fix_and_save_video", "(", "targets", ",", "\"targets\"", ")", "is_image", "=", "\"image\"", "in", "problem_name", "is_text2class", "=", "isinstance", "(", "registry", ".", "problem", "(", "problem_name", ")", ",", "text_problems", ".", "Text2ClassProblem", ")", "skip_eos_postprocess", "=", "is_image", "or", "is_text2class", "or", "skip_eos_postprocess", "decoded_inputs", "=", "None", "if", "is_image", "and", "save_images", ":", "save_path", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"%s_prediction_%d.jpg\"", "%", "(", "problem_name", ",", "prediction_idx", ")", ")", "show_and_save_image", "(", "inputs", "/", "255.", ",", "save_path", ")", "elif", "inputs", "is", "not", "None", "and", "inputs_vocab", ":", "if", "identity_output", ":", "decoded_inputs", "=", "\" \"", ".", "join", "(", "map", "(", "str", ",", "inputs", ".", "flatten", "(", ")", ")", ")", "else", ":", "decoded_inputs", "=", "inputs_vocab", ".", "decode", "(", "_save_until_eos", "(", "inputs", ",", "skip_eos_postprocess", ")", ")", "if", "log_results", "and", "not", "is_video", ":", "tf", ".", "logging", ".", "info", "(", "\"Inference results INPUT: %s\"", "%", "decoded_inputs", ")", "decoded_targets", "=", "None", "decoded_outputs", "=", "None", "if", "identity_output", ":", "decoded_outputs", "=", "\" \"", ".", "join", "(", "map", "(", "str", ",", "outputs", ".", "flatten", "(", ")", ")", ")", "if", "targets", "is", "not", "None", ":", "decoded_targets", "=", "\" \"", ".", "join", "(", "map", "(", "str", ",", "targets", ".", "flatten", "(", ")", ")", ")", "else", ":", "decoded_outputs", "=", "targets_vocab", ".", "decode", "(", "_save_until_eos", "(", "outputs", ",", "skip_eos_postprocess", ")", ")", "if", "targets", "is", "not", "None", "and", "log_results", ":", "decoded_targets", "=", "targets_vocab", ".", "decode", "(", "_save_until_eos", "(", "targets", ",", "skip_eos_postprocess", ")", ")", "if", "log_results", "and", "not", "is_video", ":", "tf", ".", "logging", ".", "info", "(", "\"Inference results OUTPUT: %s\"", "%", "decoded_outputs", ")", "if", "targets", "is", "not", "None", "and", "log_results", "and", "not", "is_video", ":", "tf", ".", "logging", ".", "info", "(", "\"Inference results TARGET: %s\"", "%", "decoded_targets", ")", "return", "decoded_inputs", ",", "decoded_outputs", ",", "decoded_targets" ]
Log inference results.
[ "Log", "inference", "results", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L104-L170
22,790
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
decode_from_dataset
def decode_from_dataset(estimator, problem_name, hparams, decode_hp, decode_to_file=None, dataset_split=None, checkpoint_path=None): """Perform decoding from dataset.""" tf.logging.info("Performing local inference from dataset for %s.", str(problem_name)) # We assume that worker_id corresponds to shard number. shard = decode_hp.shard_id if decode_hp.shards > 1 else None # Setup output directory for any artifacts that may be written out. output_dir = os.path.join(estimator.model_dir, "decode") tf.gfile.MakeDirs(output_dir) # If decode_hp.batch_size is specified, use a fixed batch size if decode_hp.batch_size: hparams.batch_size = decode_hp.batch_size hparams.use_fixed_batch_size = True dataset_kwargs = { "shard": shard, "dataset_split": dataset_split, "max_records": decode_hp.num_samples } # Build the inference input function problem = hparams.problem infer_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs) predictions, output_dirs = [], [] for decode_id in range(decode_hp.num_decodes): tf.logging.info("Decoding {}".format(decode_id)) # Create decode directory if not in-memory decoding. if not decode_hp.decode_in_memory: output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id) tf.gfile.MakeDirs(output_dir) output_dirs.append(output_dir) result = decode_once(estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results=decode_hp.log_results, checkpoint_path=checkpoint_path) if decode_hp.decode_in_memory: output_dirs = [output_dir] predictions.append(result) if decode_hp.decode_to_file: decode_hp.decode_to_file = _decode_filename( decode_hp.decode_to_file, problem_name, decode_hp) run_postdecode_hooks(DecodeHookArgs( estimator=estimator, problem=problem, output_dirs=output_dirs, hparams=hparams, decode_hparams=decode_hp, predictions=predictions ), dataset_split) return predictions
python
def decode_from_dataset(estimator, problem_name, hparams, decode_hp, decode_to_file=None, dataset_split=None, checkpoint_path=None): """Perform decoding from dataset.""" tf.logging.info("Performing local inference from dataset for %s.", str(problem_name)) # We assume that worker_id corresponds to shard number. shard = decode_hp.shard_id if decode_hp.shards > 1 else None # Setup output directory for any artifacts that may be written out. output_dir = os.path.join(estimator.model_dir, "decode") tf.gfile.MakeDirs(output_dir) # If decode_hp.batch_size is specified, use a fixed batch size if decode_hp.batch_size: hparams.batch_size = decode_hp.batch_size hparams.use_fixed_batch_size = True dataset_kwargs = { "shard": shard, "dataset_split": dataset_split, "max_records": decode_hp.num_samples } # Build the inference input function problem = hparams.problem infer_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs) predictions, output_dirs = [], [] for decode_id in range(decode_hp.num_decodes): tf.logging.info("Decoding {}".format(decode_id)) # Create decode directory if not in-memory decoding. if not decode_hp.decode_in_memory: output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id) tf.gfile.MakeDirs(output_dir) output_dirs.append(output_dir) result = decode_once(estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results=decode_hp.log_results, checkpoint_path=checkpoint_path) if decode_hp.decode_in_memory: output_dirs = [output_dir] predictions.append(result) if decode_hp.decode_to_file: decode_hp.decode_to_file = _decode_filename( decode_hp.decode_to_file, problem_name, decode_hp) run_postdecode_hooks(DecodeHookArgs( estimator=estimator, problem=problem, output_dirs=output_dirs, hparams=hparams, decode_hparams=decode_hp, predictions=predictions ), dataset_split) return predictions
[ "def", "decode_from_dataset", "(", "estimator", ",", "problem_name", ",", "hparams", ",", "decode_hp", ",", "decode_to_file", "=", "None", ",", "dataset_split", "=", "None", ",", "checkpoint_path", "=", "None", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Performing local inference from dataset for %s.\"", ",", "str", "(", "problem_name", ")", ")", "# We assume that worker_id corresponds to shard number.", "shard", "=", "decode_hp", ".", "shard_id", "if", "decode_hp", ".", "shards", ">", "1", "else", "None", "# Setup output directory for any artifacts that may be written out.", "output_dir", "=", "os", ".", "path", ".", "join", "(", "estimator", ".", "model_dir", ",", "\"decode\"", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "output_dir", ")", "# If decode_hp.batch_size is specified, use a fixed batch size", "if", "decode_hp", ".", "batch_size", ":", "hparams", ".", "batch_size", "=", "decode_hp", ".", "batch_size", "hparams", ".", "use_fixed_batch_size", "=", "True", "dataset_kwargs", "=", "{", "\"shard\"", ":", "shard", ",", "\"dataset_split\"", ":", "dataset_split", ",", "\"max_records\"", ":", "decode_hp", ".", "num_samples", "}", "# Build the inference input function", "problem", "=", "hparams", ".", "problem", "infer_input_fn", "=", "problem", ".", "make_estimator_input_fn", "(", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ",", "hparams", ",", "dataset_kwargs", "=", "dataset_kwargs", ")", "predictions", ",", "output_dirs", "=", "[", "]", ",", "[", "]", "for", "decode_id", "in", "range", "(", "decode_hp", ".", "num_decodes", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Decoding {}\"", ".", "format", "(", "decode_id", ")", ")", "# Create decode directory if not in-memory decoding.", "if", "not", "decode_hp", ".", "decode_in_memory", ":", "output_dir", "=", "os", ".", "path", ".", "join", "(", "estimator", ".", "model_dir", ",", "\"decode_%05d\"", "%", "decode_id", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "output_dir", ")", "output_dirs", ".", "append", "(", "output_dir", ")", "result", "=", "decode_once", "(", "estimator", ",", "problem_name", ",", "hparams", ",", "infer_input_fn", ",", "decode_hp", ",", "decode_to_file", ",", "output_dir", ",", "log_results", "=", "decode_hp", ".", "log_results", ",", "checkpoint_path", "=", "checkpoint_path", ")", "if", "decode_hp", ".", "decode_in_memory", ":", "output_dirs", "=", "[", "output_dir", "]", "predictions", ".", "append", "(", "result", ")", "if", "decode_hp", ".", "decode_to_file", ":", "decode_hp", ".", "decode_to_file", "=", "_decode_filename", "(", "decode_hp", ".", "decode_to_file", ",", "problem_name", ",", "decode_hp", ")", "run_postdecode_hooks", "(", "DecodeHookArgs", "(", "estimator", "=", "estimator", ",", "problem", "=", "problem", ",", "output_dirs", "=", "output_dirs", ",", "hparams", "=", "hparams", ",", "decode_hparams", "=", "decode_hp", ",", "predictions", "=", "predictions", ")", ",", "dataset_split", ")", "return", "predictions" ]
Perform decoding from dataset.
[ "Perform", "decoding", "from", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L173-L242
22,791
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
_decode_filename
def _decode_filename(base_filename, problem_name, decode_hp): """Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename. """ if decode_hp.shards > 1: base_filename = _add_shard_to_filename(base_filename, decode_hp) if ("beam{beam}.alpha{alpha}.decodes".format( beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) in base_filename): return base_filename else: return ( "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format( base=base_filename, model=FLAGS.model, hp=FLAGS.hparams_set, problem=problem_name, beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)))
python
def _decode_filename(base_filename, problem_name, decode_hp): """Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename. """ if decode_hp.shards > 1: base_filename = _add_shard_to_filename(base_filename, decode_hp) if ("beam{beam}.alpha{alpha}.decodes".format( beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) in base_filename): return base_filename else: return ( "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format( base=base_filename, model=FLAGS.model, hp=FLAGS.hparams_set, problem=problem_name, beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)))
[ "def", "_decode_filename", "(", "base_filename", ",", "problem_name", ",", "decode_hp", ")", ":", "if", "decode_hp", ".", "shards", ">", "1", ":", "base_filename", "=", "_add_shard_to_filename", "(", "base_filename", ",", "decode_hp", ")", "if", "(", "\"beam{beam}.alpha{alpha}.decodes\"", ".", "format", "(", "beam", "=", "str", "(", "decode_hp", ".", "beam_size", ")", ",", "alpha", "=", "str", "(", "decode_hp", ".", "alpha", ")", ")", "in", "base_filename", ")", ":", "return", "base_filename", "else", ":", "return", "(", "\"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes\"", ".", "format", "(", "base", "=", "base_filename", ",", "model", "=", "FLAGS", ".", "model", ",", "hp", "=", "FLAGS", ".", "hparams_set", ",", "problem", "=", "problem_name", ",", "beam", "=", "str", "(", "decode_hp", ".", "beam_size", ")", ",", "alpha", "=", "str", "(", "decode_hp", ".", "alpha", ")", ")", ")" ]
Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename.
[ "Generates", "decode", "filename", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L573-L598
22,792
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
make_input_fn_from_generator
def make_input_fn_from_generator(gen): """Use py_func to yield elements from the given generator.""" first_ex = six.next(gen) flattened = tf.contrib.framework.nest.flatten(first_ex) types = [t.dtype for t in flattened] shapes = [[None] * len(t.shape) for t in flattened] first_ex_list = [first_ex] def py_func(): if first_ex_list: example = first_ex_list.pop() else: example = six.next(gen) return tf.contrib.framework.nest.flatten(example) def input_fn(): flat_example = tf.py_func(py_func, [], types) _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)] example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example) return example return input_fn
python
def make_input_fn_from_generator(gen): """Use py_func to yield elements from the given generator.""" first_ex = six.next(gen) flattened = tf.contrib.framework.nest.flatten(first_ex) types = [t.dtype for t in flattened] shapes = [[None] * len(t.shape) for t in flattened] first_ex_list = [first_ex] def py_func(): if first_ex_list: example = first_ex_list.pop() else: example = six.next(gen) return tf.contrib.framework.nest.flatten(example) def input_fn(): flat_example = tf.py_func(py_func, [], types) _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)] example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example) return example return input_fn
[ "def", "make_input_fn_from_generator", "(", "gen", ")", ":", "first_ex", "=", "six", ".", "next", "(", "gen", ")", "flattened", "=", "tf", ".", "contrib", ".", "framework", ".", "nest", ".", "flatten", "(", "first_ex", ")", "types", "=", "[", "t", ".", "dtype", "for", "t", "in", "flattened", "]", "shapes", "=", "[", "[", "None", "]", "*", "len", "(", "t", ".", "shape", ")", "for", "t", "in", "flattened", "]", "first_ex_list", "=", "[", "first_ex", "]", "def", "py_func", "(", ")", ":", "if", "first_ex_list", ":", "example", "=", "first_ex_list", ".", "pop", "(", ")", "else", ":", "example", "=", "six", ".", "next", "(", "gen", ")", "return", "tf", ".", "contrib", ".", "framework", ".", "nest", ".", "flatten", "(", "example", ")", "def", "input_fn", "(", ")", ":", "flat_example", "=", "tf", ".", "py_func", "(", "py_func", ",", "[", "]", ",", "types", ")", "_", "=", "[", "t", ".", "set_shape", "(", "shape", ")", "for", "t", ",", "shape", "in", "zip", "(", "flat_example", ",", "shapes", ")", "]", "example", "=", "tf", ".", "contrib", ".", "framework", ".", "nest", ".", "pack_sequence_as", "(", "first_ex", ",", "flat_example", ")", "return", "example", "return", "input_fn" ]
Use py_func to yield elements from the given generator.
[ "Use", "py_func", "to", "yield", "elements", "from", "the", "given", "generator", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L601-L622
22,793
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
decode_interactively
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None): """Interactive decoding.""" is_image = "image" in hparams.problem.name is_text2class = isinstance(hparams.problem, text_problems.Text2ClassProblem) skip_eos_postprocess = ( is_image or is_text2class or decode_hp.skip_eos_postprocess) def input_fn(): gen_fn = make_input_fn_from_generator( _interactive_input_fn(hparams, decode_hp)) example = gen_fn() example = _interactive_input_tensor_to_features_dict(example, hparams) return example result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) for result in result_iter: targets_vocab = hparams.problem_hparams.vocabulary["targets"] if decode_hp.return_beams: beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) scores = None if "scores" in result: if np.isscalar(result["scores"]): result["scores"] = result["scores"].reshape(1) scores = np.split(result["scores"], decode_hp.beam_size, axis=0) for k, beam in enumerate(beams): tf.logging.info("BEAM %d:" % k) beam_string = targets_vocab.decode(_save_until_eos( beam, skip_eos_postprocess)) if scores is not None: tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k])) else: tf.logging.info("\"%s\"" % beam_string) else: if decode_hp.identity_output: tf.logging.info(" ".join(map(str, result["outputs"].flatten()))) else: tf.logging.info( targets_vocab.decode(_save_until_eos( result["outputs"], skip_eos_postprocess)))
python
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None): """Interactive decoding.""" is_image = "image" in hparams.problem.name is_text2class = isinstance(hparams.problem, text_problems.Text2ClassProblem) skip_eos_postprocess = ( is_image or is_text2class or decode_hp.skip_eos_postprocess) def input_fn(): gen_fn = make_input_fn_from_generator( _interactive_input_fn(hparams, decode_hp)) example = gen_fn() example = _interactive_input_tensor_to_features_dict(example, hparams) return example result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) for result in result_iter: targets_vocab = hparams.problem_hparams.vocabulary["targets"] if decode_hp.return_beams: beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) scores = None if "scores" in result: if np.isscalar(result["scores"]): result["scores"] = result["scores"].reshape(1) scores = np.split(result["scores"], decode_hp.beam_size, axis=0) for k, beam in enumerate(beams): tf.logging.info("BEAM %d:" % k) beam_string = targets_vocab.decode(_save_until_eos( beam, skip_eos_postprocess)) if scores is not None: tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k])) else: tf.logging.info("\"%s\"" % beam_string) else: if decode_hp.identity_output: tf.logging.info(" ".join(map(str, result["outputs"].flatten()))) else: tf.logging.info( targets_vocab.decode(_save_until_eos( result["outputs"], skip_eos_postprocess)))
[ "def", "decode_interactively", "(", "estimator", ",", "hparams", ",", "decode_hp", ",", "checkpoint_path", "=", "None", ")", ":", "is_image", "=", "\"image\"", "in", "hparams", ".", "problem", ".", "name", "is_text2class", "=", "isinstance", "(", "hparams", ".", "problem", ",", "text_problems", ".", "Text2ClassProblem", ")", "skip_eos_postprocess", "=", "(", "is_image", "or", "is_text2class", "or", "decode_hp", ".", "skip_eos_postprocess", ")", "def", "input_fn", "(", ")", ":", "gen_fn", "=", "make_input_fn_from_generator", "(", "_interactive_input_fn", "(", "hparams", ",", "decode_hp", ")", ")", "example", "=", "gen_fn", "(", ")", "example", "=", "_interactive_input_tensor_to_features_dict", "(", "example", ",", "hparams", ")", "return", "example", "result_iter", "=", "estimator", ".", "predict", "(", "input_fn", ",", "checkpoint_path", "=", "checkpoint_path", ")", "for", "result", "in", "result_iter", ":", "targets_vocab", "=", "hparams", ".", "problem_hparams", ".", "vocabulary", "[", "\"targets\"", "]", "if", "decode_hp", ".", "return_beams", ":", "beams", "=", "np", ".", "split", "(", "result", "[", "\"outputs\"", "]", ",", "decode_hp", ".", "beam_size", ",", "axis", "=", "0", ")", "scores", "=", "None", "if", "\"scores\"", "in", "result", ":", "if", "np", ".", "isscalar", "(", "result", "[", "\"scores\"", "]", ")", ":", "result", "[", "\"scores\"", "]", "=", "result", "[", "\"scores\"", "]", ".", "reshape", "(", "1", ")", "scores", "=", "np", ".", "split", "(", "result", "[", "\"scores\"", "]", ",", "decode_hp", ".", "beam_size", ",", "axis", "=", "0", ")", "for", "k", ",", "beam", "in", "enumerate", "(", "beams", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"BEAM %d:\"", "%", "k", ")", "beam_string", "=", "targets_vocab", ".", "decode", "(", "_save_until_eos", "(", "beam", ",", "skip_eos_postprocess", ")", ")", "if", "scores", "is", "not", "None", ":", "tf", ".", "logging", ".", "info", "(", "\"\\\"%s\\\"\\tScore:%f\"", "%", "(", "beam_string", ",", "scores", "[", "k", "]", ")", ")", "else", ":", "tf", ".", "logging", ".", "info", "(", "\"\\\"%s\\\"\"", "%", "beam_string", ")", "else", ":", "if", "decode_hp", ".", "identity_output", ":", "tf", ".", "logging", ".", "info", "(", "\" \"", ".", "join", "(", "map", "(", "str", ",", "result", "[", "\"outputs\"", "]", ".", "flatten", "(", ")", ")", ")", ")", "else", ":", "tf", ".", "logging", ".", "info", "(", "targets_vocab", ".", "decode", "(", "_save_until_eos", "(", "result", "[", "\"outputs\"", "]", ",", "skip_eos_postprocess", ")", ")", ")" ]
Interactive decoding.
[ "Interactive", "decoding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L625-L666
22,794
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
_decode_batch_input_fn
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary, batch_size, max_input_size, task_id=-1, has_input=True): """Generator to produce batches of inputs.""" tf.logging.info(" batch %d" % num_decode_batches) for b in range(num_decode_batches): tf.logging.info("Decoding batch %d" % b) batch_length = 0 batch_inputs = [] for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]: input_ids = vocabulary.encode(inputs) if max_input_size > 0: # Subtract 1 for the EOS_ID. input_ids = input_ids[:max_input_size - 1] if has_input or task_id > -1: # Do not append EOS for pure LM tasks. final_id = text_encoder.EOS_ID if task_id < 0 else task_id input_ids.append(final_id) batch_inputs.append(input_ids) if len(input_ids) > batch_length: batch_length = len(input_ids) final_batch_inputs = [] for input_ids in batch_inputs: assert len(input_ids) <= batch_length x = input_ids + [0] * (batch_length - len(input_ids)) final_batch_inputs.append(x) yield { "inputs": np.array(final_batch_inputs).astype(np.int32), }
python
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary, batch_size, max_input_size, task_id=-1, has_input=True): """Generator to produce batches of inputs.""" tf.logging.info(" batch %d" % num_decode_batches) for b in range(num_decode_batches): tf.logging.info("Decoding batch %d" % b) batch_length = 0 batch_inputs = [] for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]: input_ids = vocabulary.encode(inputs) if max_input_size > 0: # Subtract 1 for the EOS_ID. input_ids = input_ids[:max_input_size - 1] if has_input or task_id > -1: # Do not append EOS for pure LM tasks. final_id = text_encoder.EOS_ID if task_id < 0 else task_id input_ids.append(final_id) batch_inputs.append(input_ids) if len(input_ids) > batch_length: batch_length = len(input_ids) final_batch_inputs = [] for input_ids in batch_inputs: assert len(input_ids) <= batch_length x = input_ids + [0] * (batch_length - len(input_ids)) final_batch_inputs.append(x) yield { "inputs": np.array(final_batch_inputs).astype(np.int32), }
[ "def", "_decode_batch_input_fn", "(", "num_decode_batches", ",", "sorted_inputs", ",", "vocabulary", ",", "batch_size", ",", "max_input_size", ",", "task_id", "=", "-", "1", ",", "has_input", "=", "True", ")", ":", "tf", ".", "logging", ".", "info", "(", "\" batch %d\"", "%", "num_decode_batches", ")", "for", "b", "in", "range", "(", "num_decode_batches", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Decoding batch %d\"", "%", "b", ")", "batch_length", "=", "0", "batch_inputs", "=", "[", "]", "for", "inputs", "in", "sorted_inputs", "[", "b", "*", "batch_size", ":", "(", "b", "+", "1", ")", "*", "batch_size", "]", ":", "input_ids", "=", "vocabulary", ".", "encode", "(", "inputs", ")", "if", "max_input_size", ">", "0", ":", "# Subtract 1 for the EOS_ID.", "input_ids", "=", "input_ids", "[", ":", "max_input_size", "-", "1", "]", "if", "has_input", "or", "task_id", ">", "-", "1", ":", "# Do not append EOS for pure LM tasks.", "final_id", "=", "text_encoder", ".", "EOS_ID", "if", "task_id", "<", "0", "else", "task_id", "input_ids", ".", "append", "(", "final_id", ")", "batch_inputs", ".", "append", "(", "input_ids", ")", "if", "len", "(", "input_ids", ")", ">", "batch_length", ":", "batch_length", "=", "len", "(", "input_ids", ")", "final_batch_inputs", "=", "[", "]", "for", "input_ids", "in", "batch_inputs", ":", "assert", "len", "(", "input_ids", ")", "<=", "batch_length", "x", "=", "input_ids", "+", "[", "0", "]", "*", "(", "batch_length", "-", "len", "(", "input_ids", ")", ")", "final_batch_inputs", ".", "append", "(", "x", ")", "yield", "{", "\"inputs\"", ":", "np", ".", "array", "(", "final_batch_inputs", ")", ".", "astype", "(", "np", ".", "int32", ")", ",", "}" ]
Generator to produce batches of inputs.
[ "Generator", "to", "produce", "batches", "of", "inputs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L669-L697
22,795
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
_interactive_input_fn
def _interactive_input_fn(hparams, decode_hp): """Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixed-size numpy array. We yield int32 arrays with shape [const_array_size]. The format is: [num_samples, decode_length, len(input ids), <input ids>, <padding>] Args: hparams: model hparams decode_hp: decode hparams Yields: numpy arrays Raises: Exception: when `input_type` is invalid. """ num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1 decode_length = decode_hp.extra_length input_type = "text" p_hparams = hparams.problem_hparams has_input = "inputs" in p_hparams.modality vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] # This should be longer than the longest input. const_array_size = 10000 # Import readline if available for command line editing and recall. try: import readline # pylint: disable=g-import-not-at-top,unused-variable except ImportError: pass while True: prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n" " it=<input_type> ('text' or 'image' or 'label', default: " "text)\n" " ns=<num_samples> (changes number of samples, default: 1)\n" " dl=<decode_length> (changes decode length, default: 100)\n" " <%s> (decode)\n" " q (quit)\n" ">" % (num_samples, decode_length, "source_string" if has_input else "target_prefix")) input_string = input(prompt) if input_string == "q": return elif input_string[:3] == "ns=": num_samples = int(input_string[3:]) elif input_string[:3] == "dl=": decode_length = int(input_string[3:]) elif input_string[:3] == "it=": input_type = input_string[3:] else: if input_type == "text": input_ids = vocabulary.encode(input_string) if has_input: input_ids.append(text_encoder.EOS_ID) x = [num_samples, decode_length, len(input_ids)] + input_ids assert len(x) < const_array_size x += [0] * (const_array_size - len(x)) features = { "inputs": np.array(x).astype(np.int32), } elif input_type == "image": input_path = input_string img = vocabulary.encode(input_path) features = { "inputs": img.astype(np.int32), } elif input_type == "label": input_ids = [int(input_string)] x = [num_samples, decode_length, len(input_ids)] + input_ids features = { "inputs": np.array(x).astype(np.int32), } else: raise Exception("Unsupported input type.") for k, v in six.iteritems( problem_lib.problem_hparams_to_features(p_hparams)): features[k] = np.array(v).astype(np.int32) yield features
python
def _interactive_input_fn(hparams, decode_hp): """Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixed-size numpy array. We yield int32 arrays with shape [const_array_size]. The format is: [num_samples, decode_length, len(input ids), <input ids>, <padding>] Args: hparams: model hparams decode_hp: decode hparams Yields: numpy arrays Raises: Exception: when `input_type` is invalid. """ num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1 decode_length = decode_hp.extra_length input_type = "text" p_hparams = hparams.problem_hparams has_input = "inputs" in p_hparams.modality vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] # This should be longer than the longest input. const_array_size = 10000 # Import readline if available for command line editing and recall. try: import readline # pylint: disable=g-import-not-at-top,unused-variable except ImportError: pass while True: prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n" " it=<input_type> ('text' or 'image' or 'label', default: " "text)\n" " ns=<num_samples> (changes number of samples, default: 1)\n" " dl=<decode_length> (changes decode length, default: 100)\n" " <%s> (decode)\n" " q (quit)\n" ">" % (num_samples, decode_length, "source_string" if has_input else "target_prefix")) input_string = input(prompt) if input_string == "q": return elif input_string[:3] == "ns=": num_samples = int(input_string[3:]) elif input_string[:3] == "dl=": decode_length = int(input_string[3:]) elif input_string[:3] == "it=": input_type = input_string[3:] else: if input_type == "text": input_ids = vocabulary.encode(input_string) if has_input: input_ids.append(text_encoder.EOS_ID) x = [num_samples, decode_length, len(input_ids)] + input_ids assert len(x) < const_array_size x += [0] * (const_array_size - len(x)) features = { "inputs": np.array(x).astype(np.int32), } elif input_type == "image": input_path = input_string img = vocabulary.encode(input_path) features = { "inputs": img.astype(np.int32), } elif input_type == "label": input_ids = [int(input_string)] x = [num_samples, decode_length, len(input_ids)] + input_ids features = { "inputs": np.array(x).astype(np.int32), } else: raise Exception("Unsupported input type.") for k, v in six.iteritems( problem_lib.problem_hparams_to_features(p_hparams)): features[k] = np.array(v).astype(np.int32) yield features
[ "def", "_interactive_input_fn", "(", "hparams", ",", "decode_hp", ")", ":", "num_samples", "=", "decode_hp", ".", "num_samples", "if", "decode_hp", ".", "num_samples", ">", "0", "else", "1", "decode_length", "=", "decode_hp", ".", "extra_length", "input_type", "=", "\"text\"", "p_hparams", "=", "hparams", ".", "problem_hparams", "has_input", "=", "\"inputs\"", "in", "p_hparams", ".", "modality", "vocabulary", "=", "p_hparams", ".", "vocabulary", "[", "\"inputs\"", "if", "has_input", "else", "\"targets\"", "]", "# This should be longer than the longest input.", "const_array_size", "=", "10000", "# Import readline if available for command line editing and recall.", "try", ":", "import", "readline", "# pylint: disable=g-import-not-at-top,unused-variable", "except", "ImportError", ":", "pass", "while", "True", ":", "prompt", "=", "(", "\"INTERACTIVE MODE num_samples=%d decode_length=%d \\n\"", "\" it=<input_type> ('text' or 'image' or 'label', default: \"", "\"text)\\n\"", "\" ns=<num_samples> (changes number of samples, default: 1)\\n\"", "\" dl=<decode_length> (changes decode length, default: 100)\\n\"", "\" <%s> (decode)\\n\"", "\" q (quit)\\n\"", "\">\"", "%", "(", "num_samples", ",", "decode_length", ",", "\"source_string\"", "if", "has_input", "else", "\"target_prefix\"", ")", ")", "input_string", "=", "input", "(", "prompt", ")", "if", "input_string", "==", "\"q\"", ":", "return", "elif", "input_string", "[", ":", "3", "]", "==", "\"ns=\"", ":", "num_samples", "=", "int", "(", "input_string", "[", "3", ":", "]", ")", "elif", "input_string", "[", ":", "3", "]", "==", "\"dl=\"", ":", "decode_length", "=", "int", "(", "input_string", "[", "3", ":", "]", ")", "elif", "input_string", "[", ":", "3", "]", "==", "\"it=\"", ":", "input_type", "=", "input_string", "[", "3", ":", "]", "else", ":", "if", "input_type", "==", "\"text\"", ":", "input_ids", "=", "vocabulary", ".", "encode", "(", "input_string", ")", "if", "has_input", ":", "input_ids", ".", "append", "(", "text_encoder", ".", "EOS_ID", ")", "x", "=", "[", "num_samples", ",", "decode_length", ",", "len", "(", "input_ids", ")", "]", "+", "input_ids", "assert", "len", "(", "x", ")", "<", "const_array_size", "x", "+=", "[", "0", "]", "*", "(", "const_array_size", "-", "len", "(", "x", ")", ")", "features", "=", "{", "\"inputs\"", ":", "np", ".", "array", "(", "x", ")", ".", "astype", "(", "np", ".", "int32", ")", ",", "}", "elif", "input_type", "==", "\"image\"", ":", "input_path", "=", "input_string", "img", "=", "vocabulary", ".", "encode", "(", "input_path", ")", "features", "=", "{", "\"inputs\"", ":", "img", ".", "astype", "(", "np", ".", "int32", ")", ",", "}", "elif", "input_type", "==", "\"label\"", ":", "input_ids", "=", "[", "int", "(", "input_string", ")", "]", "x", "=", "[", "num_samples", ",", "decode_length", ",", "len", "(", "input_ids", ")", "]", "+", "input_ids", "features", "=", "{", "\"inputs\"", ":", "np", ".", "array", "(", "x", ")", ".", "astype", "(", "np", ".", "int32", ")", ",", "}", "else", ":", "raise", "Exception", "(", "\"Unsupported input type.\"", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "problem_lib", ".", "problem_hparams_to_features", "(", "p_hparams", ")", ")", ":", "features", "[", "k", "]", "=", "np", ".", "array", "(", "v", ")", ".", "astype", "(", "np", ".", "int32", ")", "yield", "features" ]
Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixed-size numpy array. We yield int32 arrays with shape [const_array_size]. The format is: [num_samples, decode_length, len(input ids), <input ids>, <padding>] Args: hparams: model hparams decode_hp: decode hparams Yields: numpy arrays Raises: Exception: when `input_type` is invalid.
[ "Generator", "that", "reads", "from", "the", "terminal", "and", "yields", "interactive", "inputs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L700-L779
22,796
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
save_video
def save_video(video, save_path_template): """Save frames of the videos into files.""" try: from PIL import Image # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires PIL library to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") for i, frame in enumerate(video): save_path = save_path_template.format(i) with tf.gfile.Open(save_path, "wb") as sp: Image.fromarray(np.uint8(frame)).save(sp)
python
def save_video(video, save_path_template): """Save frames of the videos into files.""" try: from PIL import Image # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires PIL library to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") for i, frame in enumerate(video): save_path = save_path_template.format(i) with tf.gfile.Open(save_path, "wb") as sp: Image.fromarray(np.uint8(frame)).save(sp)
[ "def", "save_video", "(", "video", ",", "save_path_template", ")", ":", "try", ":", "from", "PIL", "import", "Image", "# pylint: disable=g-import-not-at-top", "except", "ImportError", "as", "e", ":", "tf", ".", "logging", ".", "warning", "(", "\"Showing and saving an image requires PIL library to be \"", "\"installed: %s\"", ",", "e", ")", "raise", "NotImplementedError", "(", "\"Image display and save not implemented.\"", ")", "for", "i", ",", "frame", "in", "enumerate", "(", "video", ")", ":", "save_path", "=", "save_path_template", ".", "format", "(", "i", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "save_path", ",", "\"wb\"", ")", "as", "sp", ":", "Image", ".", "fromarray", "(", "np", ".", "uint8", "(", "frame", ")", ")", ".", "save", "(", "sp", ")" ]
Save frames of the videos into files.
[ "Save", "frames", "of", "the", "videos", "into", "files", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L782-L795
22,797
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
show_and_save_image
def show_and_save_image(img, save_path): """Shows an image using matplotlib and saves it.""" try: import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires matplotlib to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") plt.imshow(img) with tf.gfile.Open(save_path, "wb") as sp: plt.savefig(sp)
python
def show_and_save_image(img, save_path): """Shows an image using matplotlib and saves it.""" try: import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires matplotlib to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") plt.imshow(img) with tf.gfile.Open(save_path, "wb") as sp: plt.savefig(sp)
[ "def", "show_and_save_image", "(", "img", ",", "save_path", ")", ":", "try", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "# pylint: disable=g-import-not-at-top", "except", "ImportError", "as", "e", ":", "tf", ".", "logging", ".", "warning", "(", "\"Showing and saving an image requires matplotlib to be \"", "\"installed: %s\"", ",", "e", ")", "raise", "NotImplementedError", "(", "\"Image display and save not implemented.\"", ")", "plt", ".", "imshow", "(", "img", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "save_path", ",", "\"wb\"", ")", "as", "sp", ":", "plt", ".", "savefig", "(", "sp", ")" ]
Shows an image using matplotlib and saves it.
[ "Shows", "an", "image", "using", "matplotlib", "and", "saves", "it", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L798-L809
22,798
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
_get_language_modeling_inputs
def _get_language_modeling_inputs(filename, delimiter="\n", repeat=1, append_space_to_final_punctionation=True): """Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings """ with tf.gfile.Open(filename) as f: text = f.read() inputs = text.split(delimiter) if not inputs[-1]: inputs.pop() inputs *= repeat if append_space_to_final_punctionation: inputs = [ s + " " if s and s[-1] in string.punctuation else s for s in inputs] return inputs
python
def _get_language_modeling_inputs(filename, delimiter="\n", repeat=1, append_space_to_final_punctionation=True): """Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings """ with tf.gfile.Open(filename) as f: text = f.read() inputs = text.split(delimiter) if not inputs[-1]: inputs.pop() inputs *= repeat if append_space_to_final_punctionation: inputs = [ s + " " if s and s[-1] in string.punctuation else s for s in inputs] return inputs
[ "def", "_get_language_modeling_inputs", "(", "filename", ",", "delimiter", "=", "\"\\n\"", ",", "repeat", "=", "1", ",", "append_space_to_final_punctionation", "=", "True", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", "inputs", "=", "text", ".", "split", "(", "delimiter", ")", "if", "not", "inputs", "[", "-", "1", "]", ":", "inputs", ".", "pop", "(", ")", "inputs", "*=", "repeat", "if", "append_space_to_final_punctionation", ":", "inputs", "=", "[", "s", "+", "\" \"", "if", "s", "and", "s", "[", "-", "1", "]", "in", "string", ".", "punctuation", "else", "s", "for", "s", "in", "inputs", "]", "return", "inputs" ]
Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings
[ "Read", "a", "file", "of", "partial", "texts", "to", "continue", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L812-L840
22,799
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
_get_sorted_inputs
def _get_sorted_inputs(filename, delimiter="\n"): """Returning inputs sorted according to decreasing length. This causes inputs of similar lengths to be processed in the same batch, facilitating early stopping for short sequences. Longer sequences are sorted first so that if you're going to get OOMs, you'll see it in the first batch. Args: filename: path to file with inputs, 1 per line. delimiter: str, delimits records in the file. Returns: a sorted list of inputs """ tf.logging.info("Getting sorted inputs") with tf.gfile.Open(filename) as f: text = f.read() records = text.split(delimiter) inputs = [record.strip() for record in records] # Strip the last empty line. if not inputs[-1]: inputs.pop() input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1)) # We'll need the keys to rearrange the inputs back into their original order sorted_keys = {} sorted_inputs = [] for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
python
def _get_sorted_inputs(filename, delimiter="\n"): """Returning inputs sorted according to decreasing length. This causes inputs of similar lengths to be processed in the same batch, facilitating early stopping for short sequences. Longer sequences are sorted first so that if you're going to get OOMs, you'll see it in the first batch. Args: filename: path to file with inputs, 1 per line. delimiter: str, delimits records in the file. Returns: a sorted list of inputs """ tf.logging.info("Getting sorted inputs") with tf.gfile.Open(filename) as f: text = f.read() records = text.split(delimiter) inputs = [record.strip() for record in records] # Strip the last empty line. if not inputs[-1]: inputs.pop() input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1)) # We'll need the keys to rearrange the inputs back into their original order sorted_keys = {} sorted_inputs = [] for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
[ "def", "_get_sorted_inputs", "(", "filename", ",", "delimiter", "=", "\"\\n\"", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Getting sorted inputs\"", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", "records", "=", "text", ".", "split", "(", "delimiter", ")", "inputs", "=", "[", "record", ".", "strip", "(", ")", "for", "record", "in", "records", "]", "# Strip the last empty line.", "if", "not", "inputs", "[", "-", "1", "]", ":", "inputs", ".", "pop", "(", ")", "input_lens", "=", "[", "(", "i", ",", "-", "len", "(", "line", ".", "split", "(", ")", ")", ")", "for", "i", ",", "line", "in", "enumerate", "(", "inputs", ")", "]", "sorted_input_lens", "=", "sorted", "(", "input_lens", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "# We'll need the keys to rearrange the inputs back into their original order", "sorted_keys", "=", "{", "}", "sorted_inputs", "=", "[", "]", "for", "i", ",", "(", "index", ",", "_", ")", "in", "enumerate", "(", "sorted_input_lens", ")", ":", "sorted_inputs", ".", "append", "(", "inputs", "[", "index", "]", ")", "sorted_keys", "[", "index", "]", "=", "i", "return", "sorted_inputs", ",", "sorted_keys" ]
Returning inputs sorted according to decreasing length. This causes inputs of similar lengths to be processed in the same batch, facilitating early stopping for short sequences. Longer sequences are sorted first so that if you're going to get OOMs, you'll see it in the first batch. Args: filename: path to file with inputs, 1 per line. delimiter: str, delimits records in the file. Returns: a sorted list of inputs
[ "Returning", "inputs", "sorted", "according", "to", "decreasing", "length", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L843-L876