id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
21,900
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
RandomNormalInitializer
def RandomNormalInitializer(stddev=1e-2): """An initializer function for random normal coefficients.""" def init(shape, rng): return (stddev * backend.random.normal(rng, shape)).astype('float32') return init
python
def RandomNormalInitializer(stddev=1e-2): """An initializer function for random normal coefficients.""" def init(shape, rng): return (stddev * backend.random.normal(rng, shape)).astype('float32') return init
[ "def", "RandomNormalInitializer", "(", "stddev", "=", "1e-2", ")", ":", "def", "init", "(", "shape", ",", "rng", ")", ":", "return", "(", "stddev", "*", "backend", ".", "random", ".", "normal", "(", "rng", ",", "shape", ")", ")", ".", "astype", "(", "'float32'", ")", "return", "init" ]
An initializer function for random normal coefficients.
[ "An", "initializer", "function", "for", "random", "normal", "coefficients", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L42-L46
21,901
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
GlorotNormalInitializer
def GlorotNormalInitializer(out_dim=0, in_dim=1, scale=onp.sqrt(2)): """An initializer function for random Glorot-scaled coefficients.""" def init(shape, rng): fan_in, fan_out = shape[in_dim], shape[out_dim] size = onp.prod(onp.delete(shape, [in_dim, out_dim])) std = scale / np.sqrt((fan_in + fan_out) / 2. * size) return (std * backend.random.normal(rng, shape)).astype('float32') return init
python
def GlorotNormalInitializer(out_dim=0, in_dim=1, scale=onp.sqrt(2)): """An initializer function for random Glorot-scaled coefficients.""" def init(shape, rng): fan_in, fan_out = shape[in_dim], shape[out_dim] size = onp.prod(onp.delete(shape, [in_dim, out_dim])) std = scale / np.sqrt((fan_in + fan_out) / 2. * size) return (std * backend.random.normal(rng, shape)).astype('float32') return init
[ "def", "GlorotNormalInitializer", "(", "out_dim", "=", "0", ",", "in_dim", "=", "1", ",", "scale", "=", "onp", ".", "sqrt", "(", "2", ")", ")", ":", "def", "init", "(", "shape", ",", "rng", ")", ":", "fan_in", ",", "fan_out", "=", "shape", "[", "in_dim", "]", ",", "shape", "[", "out_dim", "]", "size", "=", "onp", ".", "prod", "(", "onp", ".", "delete", "(", "shape", ",", "[", "in_dim", ",", "out_dim", "]", ")", ")", "std", "=", "scale", "/", "np", ".", "sqrt", "(", "(", "fan_in", "+", "fan_out", ")", "/", "2.", "*", "size", ")", "return", "(", "std", "*", "backend", ".", "random", ".", "normal", "(", "rng", ",", "shape", ")", ")", ".", "astype", "(", "'float32'", ")", "return", "init" ]
An initializer function for random Glorot-scaled coefficients.
[ "An", "initializer", "function", "for", "random", "Glorot", "-", "scaled", "coefficients", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L49-L56
21,902
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
GlorotUniformInitializer
def GlorotUniformInitializer(out_dim=0, in_dim=1): """An initializer function for random uniform Glorot-scaled coefficients.""" def init(shape, rng): fan_in, fan_out = shape[in_dim], shape[out_dim] std = np.sqrt(2.0 / (fan_in + fan_out)) a = np.sqrt(3.0) * std return backend.random.uniform(rng, shape, minval=-a, maxval=a) return init
python
def GlorotUniformInitializer(out_dim=0, in_dim=1): """An initializer function for random uniform Glorot-scaled coefficients.""" def init(shape, rng): fan_in, fan_out = shape[in_dim], shape[out_dim] std = np.sqrt(2.0 / (fan_in + fan_out)) a = np.sqrt(3.0) * std return backend.random.uniform(rng, shape, minval=-a, maxval=a) return init
[ "def", "GlorotUniformInitializer", "(", "out_dim", "=", "0", ",", "in_dim", "=", "1", ")", ":", "def", "init", "(", "shape", ",", "rng", ")", ":", "fan_in", ",", "fan_out", "=", "shape", "[", "in_dim", "]", ",", "shape", "[", "out_dim", "]", "std", "=", "np", ".", "sqrt", "(", "2.0", "/", "(", "fan_in", "+", "fan_out", ")", ")", "a", "=", "np", ".", "sqrt", "(", "3.0", ")", "*", "std", "return", "backend", ".", "random", ".", "uniform", "(", "rng", ",", "shape", ",", "minval", "=", "-", "a", ",", "maxval", "=", "a", ")", "return", "init" ]
An initializer function for random uniform Glorot-scaled coefficients.
[ "An", "initializer", "function", "for", "random", "uniform", "Glorot", "-", "scaled", "coefficients", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L59-L66
21,903
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
one_hot
def one_hot(x, size, dtype=np.float32): """Make a n+1 dim one-hot array from n dim int-categorical array.""" return np.array(x[..., np.newaxis] == np.arange(size), dtype)
python
def one_hot(x, size, dtype=np.float32): """Make a n+1 dim one-hot array from n dim int-categorical array.""" return np.array(x[..., np.newaxis] == np.arange(size), dtype)
[ "def", "one_hot", "(", "x", ",", "size", ",", "dtype", "=", "np", ".", "float32", ")", ":", "return", "np", ".", "array", "(", "x", "[", "...", ",", "np", ".", "newaxis", "]", "==", "np", ".", "arange", "(", "size", ")", ",", "dtype", ")" ]
Make a n+1 dim one-hot array from n dim int-categorical array.
[ "Make", "a", "n", "+", "1", "dim", "one", "-", "hot", "array", "from", "n", "dim", "int", "-", "categorical", "array", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L69-L71
21,904
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
padtype_to_pads
def padtype_to_pads(in_shape, window_shape, window_strides, padding): """Convert padding string to list of pairs of pad values.""" padding = padding.upper() if padding == 'SAME': out_shape = onp.ceil( onp.true_divide(in_shape, window_strides)).astype(int) pad_sizes = [max((out_size - 1) * stride + window_shape - in_size, 0) for out_size, stride, window_shape, in_size in zip(out_shape, window_strides, window_shape, in_shape)] return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes] elif padding == 'VALID': return [(0, 0)] * len(in_shape) else: msg = 'Unknown padding type: {}.' raise TypeError(msg.format(padding))
python
def padtype_to_pads(in_shape, window_shape, window_strides, padding): """Convert padding string to list of pairs of pad values.""" padding = padding.upper() if padding == 'SAME': out_shape = onp.ceil( onp.true_divide(in_shape, window_strides)).astype(int) pad_sizes = [max((out_size - 1) * stride + window_shape - in_size, 0) for out_size, stride, window_shape, in_size in zip(out_shape, window_strides, window_shape, in_shape)] return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes] elif padding == 'VALID': return [(0, 0)] * len(in_shape) else: msg = 'Unknown padding type: {}.' raise TypeError(msg.format(padding))
[ "def", "padtype_to_pads", "(", "in_shape", ",", "window_shape", ",", "window_strides", ",", "padding", ")", ":", "padding", "=", "padding", ".", "upper", "(", ")", "if", "padding", "==", "'SAME'", ":", "out_shape", "=", "onp", ".", "ceil", "(", "onp", ".", "true_divide", "(", "in_shape", ",", "window_strides", ")", ")", ".", "astype", "(", "int", ")", "pad_sizes", "=", "[", "max", "(", "(", "out_size", "-", "1", ")", "*", "stride", "+", "window_shape", "-", "in_size", ",", "0", ")", "for", "out_size", ",", "stride", ",", "window_shape", ",", "in_size", "in", "zip", "(", "out_shape", ",", "window_strides", ",", "window_shape", ",", "in_shape", ")", "]", "return", "[", "(", "pad_size", "//", "2", ",", "pad_size", "-", "pad_size", "//", "2", ")", "for", "pad_size", "in", "pad_sizes", "]", "elif", "padding", "==", "'VALID'", ":", "return", "[", "(", "0", ",", "0", ")", "]", "*", "len", "(", "in_shape", ")", "else", ":", "msg", "=", "'Unknown padding type: {}.'", "raise", "TypeError", "(", "msg", ".", "format", "(", "padding", ")", ")" ]
Convert padding string to list of pairs of pad values.
[ "Convert", "padding", "string", "to", "list", "of", "pairs", "of", "pad", "values", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L181-L196
21,905
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
_flatten_output_shape
def _flatten_output_shape(input_shape, num_axis_to_keep=1): """Output shape of a flatten layer.""" if num_axis_to_keep >= len(input_shape): raise ValueError( "num_axis_to_keep[%d] should be less than input's rank[%d]" % (num_axis_to_keep, len(input_shape))) return tuple(input_shape[:num_axis_to_keep]) + ( reduce(op.mul, input_shape[num_axis_to_keep:], 1),)
python
def _flatten_output_shape(input_shape, num_axis_to_keep=1): """Output shape of a flatten layer.""" if num_axis_to_keep >= len(input_shape): raise ValueError( "num_axis_to_keep[%d] should be less than input's rank[%d]" % (num_axis_to_keep, len(input_shape))) return tuple(input_shape[:num_axis_to_keep]) + ( reduce(op.mul, input_shape[num_axis_to_keep:], 1),)
[ "def", "_flatten_output_shape", "(", "input_shape", ",", "num_axis_to_keep", "=", "1", ")", ":", "if", "num_axis_to_keep", ">=", "len", "(", "input_shape", ")", ":", "raise", "ValueError", "(", "\"num_axis_to_keep[%d] should be less than input's rank[%d]\"", "%", "(", "num_axis_to_keep", ",", "len", "(", "input_shape", ")", ")", ")", "return", "tuple", "(", "input_shape", "[", ":", "num_axis_to_keep", "]", ")", "+", "(", "reduce", "(", "op", ".", "mul", ",", "input_shape", "[", "num_axis_to_keep", ":", "]", ",", "1", ")", ",", ")" ]
Output shape of a flatten layer.
[ "Output", "shape", "of", "a", "flatten", "layer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L304-L311
21,906
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
_batch_norm_new_params
def _batch_norm_new_params(input_shape, rng, axis=(0, 1, 2), center=True, scale=True, **kwargs): """Helper to initialize batch norm params.""" del rng, kwargs axis = (axis,) if np.isscalar(axis) else axis shape = tuple(d for i, d in enumerate(input_shape) if i not in axis) beta = np.zeros(shape, dtype='float32') if center else () gamma = np.ones(shape, dtype='float32') if scale else () return (beta, gamma)
python
def _batch_norm_new_params(input_shape, rng, axis=(0, 1, 2), center=True, scale=True, **kwargs): """Helper to initialize batch norm params.""" del rng, kwargs axis = (axis,) if np.isscalar(axis) else axis shape = tuple(d for i, d in enumerate(input_shape) if i not in axis) beta = np.zeros(shape, dtype='float32') if center else () gamma = np.ones(shape, dtype='float32') if scale else () return (beta, gamma)
[ "def", "_batch_norm_new_params", "(", "input_shape", ",", "rng", ",", "axis", "=", "(", "0", ",", "1", ",", "2", ")", ",", "center", "=", "True", ",", "scale", "=", "True", ",", "*", "*", "kwargs", ")", ":", "del", "rng", ",", "kwargs", "axis", "=", "(", "axis", ",", ")", "if", "np", ".", "isscalar", "(", "axis", ")", "else", "axis", "shape", "=", "tuple", "(", "d", "for", "i", ",", "d", "in", "enumerate", "(", "input_shape", ")", "if", "i", "not", "in", "axis", ")", "beta", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "'float32'", ")", "if", "center", "else", "(", ")", "gamma", "=", "np", ".", "ones", "(", "shape", ",", "dtype", "=", "'float32'", ")", "if", "scale", "else", "(", ")", "return", "(", "beta", ",", "gamma", ")" ]
Helper to initialize batch norm params.
[ "Helper", "to", "initialize", "batch", "norm", "params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L321-L329
21,907
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
BatchNorm
def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, **unused_kwargs): """Layer construction function for a batch normalization layer.""" mean = np.mean(x, axis, keepdims=True) # Fast but less numerically-stable variance calculation than np.var. m1 = np.mean(x**2, axis, keepdims=True) var = m1 - mean**2 z = (x - mean) / np.sqrt(var + epsilon) # Expand the parameters to have the right axes. beta, gamma = params # TODO(phawkins): np.expand_dims should accept an axis tuple. # (https://github.com/numpy/numpy/issues/12290) ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) beta = beta[ed] gamma = gamma[ed] # Return the z rescaled by the parameters if requested. if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z return z
python
def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, **unused_kwargs): """Layer construction function for a batch normalization layer.""" mean = np.mean(x, axis, keepdims=True) # Fast but less numerically-stable variance calculation than np.var. m1 = np.mean(x**2, axis, keepdims=True) var = m1 - mean**2 z = (x - mean) / np.sqrt(var + epsilon) # Expand the parameters to have the right axes. beta, gamma = params # TODO(phawkins): np.expand_dims should accept an axis tuple. # (https://github.com/numpy/numpy/issues/12290) ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) beta = beta[ed] gamma = gamma[ed] # Return the z rescaled by the parameters if requested. if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z return z
[ "def", "BatchNorm", "(", "x", ",", "params", ",", "axis", "=", "(", "0", ",", "1", ",", "2", ")", ",", "epsilon", "=", "1e-5", ",", "center", "=", "True", ",", "scale", "=", "True", ",", "*", "*", "unused_kwargs", ")", ":", "mean", "=", "np", ".", "mean", "(", "x", ",", "axis", ",", "keepdims", "=", "True", ")", "# Fast but less numerically-stable variance calculation than np.var.", "m1", "=", "np", ".", "mean", "(", "x", "**", "2", ",", "axis", ",", "keepdims", "=", "True", ")", "var", "=", "m1", "-", "mean", "**", "2", "z", "=", "(", "x", "-", "mean", ")", "/", "np", ".", "sqrt", "(", "var", "+", "epsilon", ")", "# Expand the parameters to have the right axes.", "beta", ",", "gamma", "=", "params", "# TODO(phawkins): np.expand_dims should accept an axis tuple.", "# (https://github.com/numpy/numpy/issues/12290)", "ed", "=", "tuple", "(", "None", "if", "i", "in", "axis", "else", "slice", "(", "None", ")", "for", "i", "in", "range", "(", "np", ".", "ndim", "(", "x", ")", ")", ")", "beta", "=", "beta", "[", "ed", "]", "gamma", "=", "gamma", "[", "ed", "]", "# Return the z rescaled by the parameters if requested.", "if", "center", "and", "scale", ":", "return", "gamma", "*", "z", "+", "beta", "if", "center", ":", "return", "z", "+", "beta", "if", "scale", ":", "return", "gamma", "*", "z", "return", "z" ]
Layer construction function for a batch normalization layer.
[ "Layer", "construction", "function", "for", "a", "batch", "normalization", "layer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L333-L357
21,908
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
Dropout
def Dropout(x, params, rate=0.0, mode='train', rng=None, **kwargs): """Layer construction function for a dropout layer with given rate.""" del params, kwargs if rng is None: msg = ('Dropout layer requires apply_fun to be called with a rng keyword ' 'argument. That is, instead of `Dropout(params, inputs)`, call ' 'it like `Dropout(params, inputs, rng=key)`.') raise ValueError(msg) if rate >= 1.0: raise ValueError('Dropout rate (%f) must be lower than 1.' % rate) if mode == 'train' and rate > 0.0: keep = backend.random.bernoulli(rng, 1.0 - rate, x.shape) return np.where(keep, x / (1.0 - rate), 0) else: return x
python
def Dropout(x, params, rate=0.0, mode='train', rng=None, **kwargs): """Layer construction function for a dropout layer with given rate.""" del params, kwargs if rng is None: msg = ('Dropout layer requires apply_fun to be called with a rng keyword ' 'argument. That is, instead of `Dropout(params, inputs)`, call ' 'it like `Dropout(params, inputs, rng=key)`.') raise ValueError(msg) if rate >= 1.0: raise ValueError('Dropout rate (%f) must be lower than 1.' % rate) if mode == 'train' and rate > 0.0: keep = backend.random.bernoulli(rng, 1.0 - rate, x.shape) return np.where(keep, x / (1.0 - rate), 0) else: return x
[ "def", "Dropout", "(", "x", ",", "params", ",", "rate", "=", "0.0", ",", "mode", "=", "'train'", ",", "rng", "=", "None", ",", "*", "*", "kwargs", ")", ":", "del", "params", ",", "kwargs", "if", "rng", "is", "None", ":", "msg", "=", "(", "'Dropout layer requires apply_fun to be called with a rng keyword '", "'argument. That is, instead of `Dropout(params, inputs)`, call '", "'it like `Dropout(params, inputs, rng=key)`.'", ")", "raise", "ValueError", "(", "msg", ")", "if", "rate", ">=", "1.0", ":", "raise", "ValueError", "(", "'Dropout rate (%f) must be lower than 1.'", "%", "rate", ")", "if", "mode", "==", "'train'", "and", "rate", ">", "0.0", ":", "keep", "=", "backend", ".", "random", ".", "bernoulli", "(", "rng", ",", "1.0", "-", "rate", ",", "x", ".", "shape", ")", "return", "np", ".", "where", "(", "keep", ",", "x", "/", "(", "1.0", "-", "rate", ")", ",", "0", ")", "else", ":", "return", "x" ]
Layer construction function for a dropout layer with given rate.
[ "Layer", "construction", "function", "for", "a", "dropout", "layer", "with", "given", "rate", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L415-L429
21,909
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
Conv._kernel_shape
def _kernel_shape(self, input_shape): """Helper to calculate the kernel shape.""" kernel_size_iter = iter(self._kernel_size) return [self._filters if c == 'O' else input_shape[self._lhs_spec.index('C')] if c == 'I' else next(kernel_size_iter) for c in self._rhs_spec]
python
def _kernel_shape(self, input_shape): """Helper to calculate the kernel shape.""" kernel_size_iter = iter(self._kernel_size) return [self._filters if c == 'O' else input_shape[self._lhs_spec.index('C')] if c == 'I' else next(kernel_size_iter) for c in self._rhs_spec]
[ "def", "_kernel_shape", "(", "self", ",", "input_shape", ")", ":", "kernel_size_iter", "=", "iter", "(", "self", ".", "_kernel_size", ")", "return", "[", "self", ".", "_filters", "if", "c", "==", "'O'", "else", "input_shape", "[", "self", ".", "_lhs_spec", ".", "index", "(", "'C'", ")", "]", "if", "c", "==", "'I'", "else", "next", "(", "kernel_size_iter", ")", "for", "c", "in", "self", ".", "_rhs_spec", "]" ]
Helper to calculate the kernel shape.
[ "Helper", "to", "calculate", "the", "kernel", "shape", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L226-L231
21,910
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
Conv._conv_shape_tuple
def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads): """Compute the shape of a conv given input shapes in canonical order.""" if isinstance(pads, str): pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads) if len(pads) != len(lhs_shape) - 2: msg = 'Wrong number of explicit pads for conv: expected {}, got {}.' raise TypeError(msg.format(len(lhs_shape) - 2, len(pads))) lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads))) out_space = onp.floor_divide( onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1 out_space = onp.maximum(0, out_space) out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space) return tuple(out_shape)
python
def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads): """Compute the shape of a conv given input shapes in canonical order.""" if isinstance(pads, str): pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads) if len(pads) != len(lhs_shape) - 2: msg = 'Wrong number of explicit pads for conv: expected {}, got {}.' raise TypeError(msg.format(len(lhs_shape) - 2, len(pads))) lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads))) out_space = onp.floor_divide( onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1 out_space = onp.maximum(0, out_space) out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space) return tuple(out_shape)
[ "def", "_conv_shape_tuple", "(", "self", ",", "lhs_shape", ",", "rhs_shape", ",", "strides", ",", "pads", ")", ":", "if", "isinstance", "(", "pads", ",", "str", ")", ":", "pads", "=", "padtype_to_pads", "(", "lhs_shape", "[", "2", ":", "]", ",", "rhs_shape", "[", "2", ":", "]", ",", "strides", ",", "pads", ")", "if", "len", "(", "pads", ")", "!=", "len", "(", "lhs_shape", ")", "-", "2", ":", "msg", "=", "'Wrong number of explicit pads for conv: expected {}, got {}.'", "raise", "TypeError", "(", "msg", ".", "format", "(", "len", "(", "lhs_shape", ")", "-", "2", ",", "len", "(", "pads", ")", ")", ")", "lhs_padded", "=", "onp", ".", "add", "(", "lhs_shape", "[", "2", ":", "]", ",", "onp", ".", "add", "(", "*", "zip", "(", "*", "pads", ")", ")", ")", "out_space", "=", "onp", ".", "floor_divide", "(", "onp", ".", "subtract", "(", "lhs_padded", ",", "rhs_shape", "[", "2", ":", "]", ")", ",", "strides", ")", "+", "1", "out_space", "=", "onp", ".", "maximum", "(", "0", ",", "out_space", ")", "out_shape", "=", "(", "lhs_shape", "[", "0", "]", ",", "rhs_shape", "[", "0", "]", ")", "+", "tuple", "(", "out_space", ")", "return", "tuple", "(", "out_shape", ")" ]
Compute the shape of a conv given input shapes in canonical order.
[ "Compute", "the", "shape", "of", "a", "conv", "given", "input", "shapes", "in", "canonical", "order", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L233-L245
21,911
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
Conv._conv_general_permutations
def _conv_general_permutations(self, dimension_numbers): """Utility for convolution dimension permutations relative to Conv HLO.""" lhs_spec, rhs_spec, out_spec = dimension_numbers lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C') charpairs = (lhs_char, rhs_char, out_char) for i, (a, b) in enumerate(charpairs): if not (dimension_numbers[i].count(a) == 1 and dimension_numbers[i].count(b) == 1): msg = ('convolution dimension_numbers[{}] must contain the characters ' '"{}" and "{}" exatly once, got {}.') raise TypeError(msg.format(i, a, b, dimension_numbers[i])) if len(dimension_numbers[i]) != len(set(dimension_numbers[i])): msg = ('convolution dimension_numbers[{}] cannot have duplicate ' 'characters, got {}.') raise TypeError(msg.format(i, dimension_numbers[i])) if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) == set(out_spec) - set(out_char)): msg = ('convolution dimension_numbers elements must each have the same ' 'set of spatial characters, got {}.') raise TypeError(msg.format(dimension_numbers)) def getperm(spec, charpair): spatial = (i for i, c in enumerate(spec) if c not in charpair) if spec is not rhs_spec: spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i])) return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial) lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs) return lhs_perm, rhs_perm, out_perm
python
def _conv_general_permutations(self, dimension_numbers): """Utility for convolution dimension permutations relative to Conv HLO.""" lhs_spec, rhs_spec, out_spec = dimension_numbers lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C') charpairs = (lhs_char, rhs_char, out_char) for i, (a, b) in enumerate(charpairs): if not (dimension_numbers[i].count(a) == 1 and dimension_numbers[i].count(b) == 1): msg = ('convolution dimension_numbers[{}] must contain the characters ' '"{}" and "{}" exatly once, got {}.') raise TypeError(msg.format(i, a, b, dimension_numbers[i])) if len(dimension_numbers[i]) != len(set(dimension_numbers[i])): msg = ('convolution dimension_numbers[{}] cannot have duplicate ' 'characters, got {}.') raise TypeError(msg.format(i, dimension_numbers[i])) if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) == set(out_spec) - set(out_char)): msg = ('convolution dimension_numbers elements must each have the same ' 'set of spatial characters, got {}.') raise TypeError(msg.format(dimension_numbers)) def getperm(spec, charpair): spatial = (i for i, c in enumerate(spec) if c not in charpair) if spec is not rhs_spec: spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i])) return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial) lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs) return lhs_perm, rhs_perm, out_perm
[ "def", "_conv_general_permutations", "(", "self", ",", "dimension_numbers", ")", ":", "lhs_spec", ",", "rhs_spec", ",", "out_spec", "=", "dimension_numbers", "lhs_char", ",", "rhs_char", ",", "out_char", "=", "(", "'N'", ",", "'C'", ")", ",", "(", "'O'", ",", "'I'", ")", ",", "(", "'N'", ",", "'C'", ")", "charpairs", "=", "(", "lhs_char", ",", "rhs_char", ",", "out_char", ")", "for", "i", ",", "(", "a", ",", "b", ")", "in", "enumerate", "(", "charpairs", ")", ":", "if", "not", "(", "dimension_numbers", "[", "i", "]", ".", "count", "(", "a", ")", "==", "1", "and", "dimension_numbers", "[", "i", "]", ".", "count", "(", "b", ")", "==", "1", ")", ":", "msg", "=", "(", "'convolution dimension_numbers[{}] must contain the characters '", "'\"{}\" and \"{}\" exatly once, got {}.'", ")", "raise", "TypeError", "(", "msg", ".", "format", "(", "i", ",", "a", ",", "b", ",", "dimension_numbers", "[", "i", "]", ")", ")", "if", "len", "(", "dimension_numbers", "[", "i", "]", ")", "!=", "len", "(", "set", "(", "dimension_numbers", "[", "i", "]", ")", ")", ":", "msg", "=", "(", "'convolution dimension_numbers[{}] cannot have duplicate '", "'characters, got {}.'", ")", "raise", "TypeError", "(", "msg", ".", "format", "(", "i", ",", "dimension_numbers", "[", "i", "]", ")", ")", "if", "not", "(", "set", "(", "lhs_spec", ")", "-", "set", "(", "lhs_char", ")", "==", "set", "(", "rhs_spec", ")", "-", "set", "(", "rhs_char", ")", "==", "set", "(", "out_spec", ")", "-", "set", "(", "out_char", ")", ")", ":", "msg", "=", "(", "'convolution dimension_numbers elements must each have the same '", "'set of spatial characters, got {}.'", ")", "raise", "TypeError", "(", "msg", ".", "format", "(", "dimension_numbers", ")", ")", "def", "getperm", "(", "spec", ",", "charpair", ")", ":", "spatial", "=", "(", "i", "for", "i", ",", "c", "in", "enumerate", "(", "spec", ")", "if", "c", "not", "in", "charpair", ")", "if", "spec", "is", "not", "rhs_spec", ":", "spatial", "=", "sorted", "(", "spatial", ",", "key", "=", "lambda", "i", ":", "rhs_spec", ".", "index", "(", "spec", "[", "i", "]", ")", ")", "return", "(", "spec", ".", "index", "(", "charpair", "[", "0", "]", ")", ",", "spec", ".", "index", "(", "charpair", "[", "1", "]", ")", ")", "+", "tuple", "(", "spatial", ")", "lhs_perm", ",", "rhs_perm", ",", "out_perm", "=", "map", "(", "getperm", ",", "dimension_numbers", ",", "charpairs", ")", "return", "lhs_perm", ",", "rhs_perm", ",", "out_perm" ]
Utility for convolution dimension permutations relative to Conv HLO.
[ "Utility", "for", "convolution", "dimension", "permutations", "relative", "to", "Conv", "HLO", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L247-L275
21,912
tensorflow/tensor2tensor
tensor2tensor/trax/layers/core.py
Conv._conv_general_shape_tuple
def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides, padding, dimension_numbers): """Generalized computation of conv shape.""" lhs_perm, rhs_perm, out_perm = self._conv_general_permutations( dimension_numbers) lhs_trans = onp.take(lhs_shape, lhs_perm) rhs_trans = onp.take(rhs_shape, rhs_perm) out_trans = self._conv_shape_tuple( lhs_trans, rhs_trans, window_strides, padding) return tuple(onp.take(out_trans, onp.argsort(out_perm)))
python
def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides, padding, dimension_numbers): """Generalized computation of conv shape.""" lhs_perm, rhs_perm, out_perm = self._conv_general_permutations( dimension_numbers) lhs_trans = onp.take(lhs_shape, lhs_perm) rhs_trans = onp.take(rhs_shape, rhs_perm) out_trans = self._conv_shape_tuple( lhs_trans, rhs_trans, window_strides, padding) return tuple(onp.take(out_trans, onp.argsort(out_perm)))
[ "def", "_conv_general_shape_tuple", "(", "self", ",", "lhs_shape", ",", "rhs_shape", ",", "window_strides", ",", "padding", ",", "dimension_numbers", ")", ":", "lhs_perm", ",", "rhs_perm", ",", "out_perm", "=", "self", ".", "_conv_general_permutations", "(", "dimension_numbers", ")", "lhs_trans", "=", "onp", ".", "take", "(", "lhs_shape", ",", "lhs_perm", ")", "rhs_trans", "=", "onp", ".", "take", "(", "rhs_shape", ",", "rhs_perm", ")", "out_trans", "=", "self", ".", "_conv_shape_tuple", "(", "lhs_trans", ",", "rhs_trans", ",", "window_strides", ",", "padding", ")", "return", "tuple", "(", "onp", ".", "take", "(", "out_trans", ",", "onp", ".", "argsort", "(", "out_perm", ")", ")", ")" ]
Generalized computation of conv shape.
[ "Generalized", "computation", "of", "conv", "shape", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/core.py#L277-L286
21,913
tensorflow/tensor2tensor
tensor2tensor/rl/dopamine_connector.py
get_create_agent
def get_create_agent(agent_kwargs): """Factory for dopamine agent initialization. Args: agent_kwargs: dict of BatchDQNAgent parameters Returns: Function(sess, environment, summary_writer) -> BatchDQNAgent instance. """ def create_agent(sess, environment, summary_writer=None): """Creates a DQN agent. Simplified version of `dopamine.discrete_domains.train.create_agent` Args: sess: a session environment: an environment summary_writer: a summary writer. Returns: a DQN agent. """ return BatchDQNAgent( env_batch_size=environment.batch_size, sess=sess, num_actions=environment.action_space.n, summary_writer=summary_writer, tf_device="/gpu:*", **agent_kwargs) return create_agent
python
def get_create_agent(agent_kwargs): """Factory for dopamine agent initialization. Args: agent_kwargs: dict of BatchDQNAgent parameters Returns: Function(sess, environment, summary_writer) -> BatchDQNAgent instance. """ def create_agent(sess, environment, summary_writer=None): """Creates a DQN agent. Simplified version of `dopamine.discrete_domains.train.create_agent` Args: sess: a session environment: an environment summary_writer: a summary writer. Returns: a DQN agent. """ return BatchDQNAgent( env_batch_size=environment.batch_size, sess=sess, num_actions=environment.action_space.n, summary_writer=summary_writer, tf_device="/gpu:*", **agent_kwargs) return create_agent
[ "def", "get_create_agent", "(", "agent_kwargs", ")", ":", "def", "create_agent", "(", "sess", ",", "environment", ",", "summary_writer", "=", "None", ")", ":", "\"\"\"Creates a DQN agent.\n\n Simplified version of `dopamine.discrete_domains.train.create_agent`\n\n Args:\n sess: a session\n environment: an environment\n summary_writer: a summary writer.\n\n Returns:\n a DQN agent.\n \"\"\"", "return", "BatchDQNAgent", "(", "env_batch_size", "=", "environment", ".", "batch_size", ",", "sess", "=", "sess", ",", "num_actions", "=", "environment", ".", "action_space", ".", "n", ",", "summary_writer", "=", "summary_writer", ",", "tf_device", "=", "\"/gpu:*\"", ",", "*", "*", "agent_kwargs", ")", "return", "create_agent" ]
Factory for dopamine agent initialization. Args: agent_kwargs: dict of BatchDQNAgent parameters Returns: Function(sess, environment, summary_writer) -> BatchDQNAgent instance.
[ "Factory", "for", "dopamine", "agent", "initialization", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/dopamine_connector.py#L274-L305
21,914
tensorflow/tensor2tensor
tensor2tensor/rl/dopamine_connector.py
get_create_batch_env_fun
def get_create_batch_env_fun(batch_env_fn, time_limit): """Factory for dopamine environment initialization function. Args: batch_env_fn: function(in_graph: bool) -> batch environment. time_limit: time steps limit for environment. Returns: function (with optional, unused parameters) initializing environment. """ def create_env_fun(game_name=None, sticky_actions=None): del game_name, sticky_actions batch_env = batch_env_fn(in_graph=False) batch_env = ResizeBatchObservation(batch_env) # pylint: disable=redefined-variable-type batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit) return batch_env return create_env_fun
python
def get_create_batch_env_fun(batch_env_fn, time_limit): """Factory for dopamine environment initialization function. Args: batch_env_fn: function(in_graph: bool) -> batch environment. time_limit: time steps limit for environment. Returns: function (with optional, unused parameters) initializing environment. """ def create_env_fun(game_name=None, sticky_actions=None): del game_name, sticky_actions batch_env = batch_env_fn(in_graph=False) batch_env = ResizeBatchObservation(batch_env) # pylint: disable=redefined-variable-type batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit) return batch_env return create_env_fun
[ "def", "get_create_batch_env_fun", "(", "batch_env_fn", ",", "time_limit", ")", ":", "def", "create_env_fun", "(", "game_name", "=", "None", ",", "sticky_actions", "=", "None", ")", ":", "del", "game_name", ",", "sticky_actions", "batch_env", "=", "batch_env_fn", "(", "in_graph", "=", "False", ")", "batch_env", "=", "ResizeBatchObservation", "(", "batch_env", ")", "# pylint: disable=redefined-variable-type", "batch_env", "=", "DopamineBatchEnv", "(", "batch_env", ",", "max_episode_steps", "=", "time_limit", ")", "return", "batch_env", "return", "create_env_fun" ]
Factory for dopamine environment initialization function. Args: batch_env_fn: function(in_graph: bool) -> batch environment. time_limit: time steps limit for environment. Returns: function (with optional, unused parameters) initializing environment.
[ "Factory", "for", "dopamine", "environment", "initialization", "function", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/dopamine_connector.py#L450-L468
21,915
tensorflow/tensor2tensor
tensor2tensor/rl/dopamine_connector.py
_parse_hparams
def _parse_hparams(hparams): """Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. """ prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
python
def _parse_hparams(hparams): """Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. """ prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
[ "def", "_parse_hparams", "(", "hparams", ")", ":", "prefixes", "=", "[", "\"agent_\"", ",", "\"optimizer_\"", ",", "\"runner_\"", ",", "\"replay_buffer_\"", "]", "ret", "=", "[", "]", "for", "prefix", "in", "prefixes", ":", "ret_dict", "=", "{", "}", "for", "key", "in", "hparams", ".", "values", "(", ")", ":", "if", "prefix", "in", "key", ":", "par_name", "=", "key", "[", "len", "(", "prefix", ")", ":", "]", "ret_dict", "[", "par_name", "]", "=", "hparams", ".", "get", "(", "key", ")", "ret", ".", "append", "(", "ret_dict", ")", "return", "ret" ]
Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
[ "Split", "hparams", "based", "on", "key", "prefixes", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/dopamine_connector.py#L471-L491
21,916
tensorflow/tensor2tensor
tensor2tensor/rl/dopamine_connector.py
_DQNAgent._build_replay_buffer
def _build_replay_buffer(self, use_staging): """Build WrappedReplayBuffer with custom OutOfGraphReplayBuffer.""" replay_buffer_kwargs = dict( observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE, stack_size=dqn_agent.NATURE_DQN_STACK_SIZE, replay_capacity=self._replay_capacity, batch_size=self._buffer_batch_size, update_horizon=self.update_horizon, gamma=self.gamma, extra_storage_types=None, observation_dtype=np.uint8, ) replay_memory = _OutOfGraphReplayBuffer( artificial_done=not self._generates_trainable_dones, **replay_buffer_kwargs) return circular_replay_buffer.WrappedReplayBuffer( wrapped_memory=replay_memory, use_staging=use_staging, **replay_buffer_kwargs)
python
def _build_replay_buffer(self, use_staging): """Build WrappedReplayBuffer with custom OutOfGraphReplayBuffer.""" replay_buffer_kwargs = dict( observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE, stack_size=dqn_agent.NATURE_DQN_STACK_SIZE, replay_capacity=self._replay_capacity, batch_size=self._buffer_batch_size, update_horizon=self.update_horizon, gamma=self.gamma, extra_storage_types=None, observation_dtype=np.uint8, ) replay_memory = _OutOfGraphReplayBuffer( artificial_done=not self._generates_trainable_dones, **replay_buffer_kwargs) return circular_replay_buffer.WrappedReplayBuffer( wrapped_memory=replay_memory, use_staging=use_staging, **replay_buffer_kwargs)
[ "def", "_build_replay_buffer", "(", "self", ",", "use_staging", ")", ":", "replay_buffer_kwargs", "=", "dict", "(", "observation_shape", "=", "dqn_agent", ".", "NATURE_DQN_OBSERVATION_SHAPE", ",", "stack_size", "=", "dqn_agent", ".", "NATURE_DQN_STACK_SIZE", ",", "replay_capacity", "=", "self", ".", "_replay_capacity", ",", "batch_size", "=", "self", ".", "_buffer_batch_size", ",", "update_horizon", "=", "self", ".", "update_horizon", ",", "gamma", "=", "self", ".", "gamma", ",", "extra_storage_types", "=", "None", ",", "observation_dtype", "=", "np", ".", "uint8", ",", ")", "replay_memory", "=", "_OutOfGraphReplayBuffer", "(", "artificial_done", "=", "not", "self", ".", "_generates_trainable_dones", ",", "*", "*", "replay_buffer_kwargs", ")", "return", "circular_replay_buffer", ".", "WrappedReplayBuffer", "(", "wrapped_memory", "=", "replay_memory", ",", "use_staging", "=", "use_staging", ",", "*", "*", "replay_buffer_kwargs", ")" ]
Build WrappedReplayBuffer with custom OutOfGraphReplayBuffer.
[ "Build", "WrappedReplayBuffer", "with", "custom", "OutOfGraphReplayBuffer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/dopamine_connector.py#L60-L79
21,917
tensorflow/tensor2tensor
tensor2tensor/models/video/next_frame_glow.py
next_frame_glow_hparams
def next_frame_glow_hparams(): """Hparams for next_frame_glow.""" hparams = glow.glow_hparams() # Possible modes are conditional and unconditional hparams.add_hparam("gen_mode", "conditional") hparams.add_hparam("learn_top_scale", False) hparams.add_hparam("condition_all_levels", True) # For each video, substitutes "num_input_frames + num_output_frames" with a # randomly sampled patch of length "num_train_frames" during training. # -1 indicates that the entire video is used for training. hparams.add_hparam("num_train_frames", -1) # The following are hparams that model the latent transitions. # Encoder that maps the latents to a Gaussian distribution. # This function is used to model the prior over z_{t}. Can be, # Pointwise -> point-wise multiplication of z_{t-1}. # conv_net -> one-layer convolution over z_{t-1} .. z_{t - num_cond_latents} # conv3d_net or conv_lstm hparams.add_hparam("latent_dist_encoder", "conv_net") # Number of latents used in the encoder above. hparams.add_hparam("num_cond_latents", 1) hparams.add_hparam("latent_architecture", "glow_resnet") hparams.add_hparam("latent_apply_dilations", False) hparams.add_hparam("latent_dilation_rates", [1, 3]) # Use latent skip connections hparams.add_hparam("model_input", False) hparams.add_hparam("cond_first_frame", False) hparams.add_hparam("latent_skip", True) hparams.add_hparam("latent_encoder_depth", 2) hparams.add_hparam("latent_encoder_width", 512) hparams.add_hparam("latent_dropout", 0.0) hparams.add_hparam("latent_pre_output_channels", 512) hparams.add_hparam("latent_activation", "relu") hparams.add_hparam("latent_noise", 0.0) # Pretrains the glow encoder for "pretrain_steps" number of steps. # By default, don't pretrain and learn end-to-end hparams.add_hparam("pretrain_steps", -1) hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l1_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.init_batch_size = 256 hparams.batch_size = 32 # Possible options: are prev_frame, single_conv and normal hparams.top_prior = "single_conv" return hparams
python
def next_frame_glow_hparams(): """Hparams for next_frame_glow.""" hparams = glow.glow_hparams() # Possible modes are conditional and unconditional hparams.add_hparam("gen_mode", "conditional") hparams.add_hparam("learn_top_scale", False) hparams.add_hparam("condition_all_levels", True) # For each video, substitutes "num_input_frames + num_output_frames" with a # randomly sampled patch of length "num_train_frames" during training. # -1 indicates that the entire video is used for training. hparams.add_hparam("num_train_frames", -1) # The following are hparams that model the latent transitions. # Encoder that maps the latents to a Gaussian distribution. # This function is used to model the prior over z_{t}. Can be, # Pointwise -> point-wise multiplication of z_{t-1}. # conv_net -> one-layer convolution over z_{t-1} .. z_{t - num_cond_latents} # conv3d_net or conv_lstm hparams.add_hparam("latent_dist_encoder", "conv_net") # Number of latents used in the encoder above. hparams.add_hparam("num_cond_latents", 1) hparams.add_hparam("latent_architecture", "glow_resnet") hparams.add_hparam("latent_apply_dilations", False) hparams.add_hparam("latent_dilation_rates", [1, 3]) # Use latent skip connections hparams.add_hparam("model_input", False) hparams.add_hparam("cond_first_frame", False) hparams.add_hparam("latent_skip", True) hparams.add_hparam("latent_encoder_depth", 2) hparams.add_hparam("latent_encoder_width", 512) hparams.add_hparam("latent_dropout", 0.0) hparams.add_hparam("latent_pre_output_channels", 512) hparams.add_hparam("latent_activation", "relu") hparams.add_hparam("latent_noise", 0.0) # Pretrains the glow encoder for "pretrain_steps" number of steps. # By default, don't pretrain and learn end-to-end hparams.add_hparam("pretrain_steps", -1) hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l1_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.init_batch_size = 256 hparams.batch_size = 32 # Possible options: are prev_frame, single_conv and normal hparams.top_prior = "single_conv" return hparams
[ "def", "next_frame_glow_hparams", "(", ")", ":", "hparams", "=", "glow", ".", "glow_hparams", "(", ")", "# Possible modes are conditional and unconditional", "hparams", ".", "add_hparam", "(", "\"gen_mode\"", ",", "\"conditional\"", ")", "hparams", ".", "add_hparam", "(", "\"learn_top_scale\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"condition_all_levels\"", ",", "True", ")", "# For each video, substitutes \"num_input_frames + num_output_frames\" with a", "# randomly sampled patch of length \"num_train_frames\" during training.", "# -1 indicates that the entire video is used for training.", "hparams", ".", "add_hparam", "(", "\"num_train_frames\"", ",", "-", "1", ")", "# The following are hparams that model the latent transitions.", "# Encoder that maps the latents to a Gaussian distribution.", "# This function is used to model the prior over z_{t}. Can be,", "# Pointwise -> point-wise multiplication of z_{t-1}.", "# conv_net -> one-layer convolution over z_{t-1} .. z_{t - num_cond_latents}", "# conv3d_net or conv_lstm", "hparams", ".", "add_hparam", "(", "\"latent_dist_encoder\"", ",", "\"conv_net\"", ")", "# Number of latents used in the encoder above.", "hparams", ".", "add_hparam", "(", "\"num_cond_latents\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"latent_architecture\"", ",", "\"glow_resnet\"", ")", "hparams", ".", "add_hparam", "(", "\"latent_apply_dilations\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"latent_dilation_rates\"", ",", "[", "1", ",", "3", "]", ")", "# Use latent skip connections", "hparams", ".", "add_hparam", "(", "\"model_input\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"cond_first_frame\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"latent_skip\"", ",", "True", ")", "hparams", ".", "add_hparam", "(", "\"latent_encoder_depth\"", ",", "2", ")", "hparams", ".", "add_hparam", "(", "\"latent_encoder_width\"", ",", "512", ")", "hparams", ".", "add_hparam", "(", "\"latent_dropout\"", ",", "0.0", ")", "hparams", ".", "add_hparam", "(", "\"latent_pre_output_channels\"", ",", "512", ")", "hparams", ".", "add_hparam", "(", "\"latent_activation\"", ",", "\"relu\"", ")", "hparams", ".", "add_hparam", "(", "\"latent_noise\"", ",", "0.0", ")", "# Pretrains the glow encoder for \"pretrain_steps\" number of steps.", "# By default, don't pretrain and learn end-to-end", "hparams", ".", "add_hparam", "(", "\"pretrain_steps\"", ",", "-", "1", ")", "hparams", ".", "bottom", "=", "{", "\"inputs\"", ":", "modalities", ".", "video_raw_bottom", ",", "\"targets\"", ":", "modalities", ".", "video_raw_targets_bottom", ",", "}", "hparams", ".", "loss", "=", "{", "\"targets\"", ":", "modalities", ".", "video_l1_raw_loss", ",", "}", "hparams", ".", "top", "=", "{", "\"targets\"", ":", "modalities", ".", "video_raw_top", ",", "}", "hparams", ".", "init_batch_size", "=", "256", "hparams", ".", "batch_size", "=", "32", "# Possible options: are prev_frame, single_conv and normal", "hparams", ".", "top_prior", "=", "\"single_conv\"", "return", "hparams" ]
Hparams for next_frame_glow.
[ "Hparams", "for", "next_frame_glow", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/next_frame_glow.py#L37-L87
21,918
tensorflow/tensor2tensor
tensor2tensor/models/video/next_frame_glow.py
next_frame_glow_bair_quant
def next_frame_glow_bair_quant(): """Hparams to reproduce bits-per-pixel results on BAIR action-free dataset.""" hparams = next_frame_glow_hparams() hparams.video_num_input_frames = 3 hparams.video_num_target_frames = 10 hparams.num_train_frames = 4 hparams.num_cond_latents = 3 hparams.depth = 24 hparams.latent_dist_encoder = "conv3d_net" hparams.latent_encoder_width = 256 hparams.latent_architecture = "glow_resnet" hparams.latent_encoder_depth = 5 hparams.latent_apply_dilations = True hparams.latent_activation = "gatu" hparams.activation = "gatu" hparams.learning_rate_constant = 3e-4 hparams.learning_rate_schedule = "constant*linear_warmup" hparams.learning_rate_warmup_steps = 10000 hparams.init_batch_size = 128 hparams.batch_size = 5 return hparams
python
def next_frame_glow_bair_quant(): """Hparams to reproduce bits-per-pixel results on BAIR action-free dataset.""" hparams = next_frame_glow_hparams() hparams.video_num_input_frames = 3 hparams.video_num_target_frames = 10 hparams.num_train_frames = 4 hparams.num_cond_latents = 3 hparams.depth = 24 hparams.latent_dist_encoder = "conv3d_net" hparams.latent_encoder_width = 256 hparams.latent_architecture = "glow_resnet" hparams.latent_encoder_depth = 5 hparams.latent_apply_dilations = True hparams.latent_activation = "gatu" hparams.activation = "gatu" hparams.learning_rate_constant = 3e-4 hparams.learning_rate_schedule = "constant*linear_warmup" hparams.learning_rate_warmup_steps = 10000 hparams.init_batch_size = 128 hparams.batch_size = 5 return hparams
[ "def", "next_frame_glow_bair_quant", "(", ")", ":", "hparams", "=", "next_frame_glow_hparams", "(", ")", "hparams", ".", "video_num_input_frames", "=", "3", "hparams", ".", "video_num_target_frames", "=", "10", "hparams", ".", "num_train_frames", "=", "4", "hparams", ".", "num_cond_latents", "=", "3", "hparams", ".", "depth", "=", "24", "hparams", ".", "latent_dist_encoder", "=", "\"conv3d_net\"", "hparams", ".", "latent_encoder_width", "=", "256", "hparams", ".", "latent_architecture", "=", "\"glow_resnet\"", "hparams", ".", "latent_encoder_depth", "=", "5", "hparams", ".", "latent_apply_dilations", "=", "True", "hparams", ".", "latent_activation", "=", "\"gatu\"", "hparams", ".", "activation", "=", "\"gatu\"", "hparams", ".", "learning_rate_constant", "=", "3e-4", "hparams", ".", "learning_rate_schedule", "=", "\"constant*linear_warmup\"", "hparams", ".", "learning_rate_warmup_steps", "=", "10000", "hparams", ".", "init_batch_size", "=", "128", "hparams", ".", "batch_size", "=", "5", "return", "hparams" ]
Hparams to reproduce bits-per-pixel results on BAIR action-free dataset.
[ "Hparams", "to", "reproduce", "bits", "-", "per", "-", "pixel", "results", "on", "BAIR", "action", "-", "free", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/next_frame_glow.py#L91-L111
21,919
tensorflow/tensor2tensor
tensor2tensor/models/video/next_frame_glow.py
next_frame_glow_bair_qual
def next_frame_glow_bair_qual(): """Hparams for qualitative video generation results.""" hparams = next_frame_glow_bair_quant() hparams.coupling = "additive" hparams.temperature = 0.5 hparams.coupling_width = 392 return hparams
python
def next_frame_glow_bair_qual(): """Hparams for qualitative video generation results.""" hparams = next_frame_glow_bair_quant() hparams.coupling = "additive" hparams.temperature = 0.5 hparams.coupling_width = 392 return hparams
[ "def", "next_frame_glow_bair_qual", "(", ")", ":", "hparams", "=", "next_frame_glow_bair_quant", "(", ")", "hparams", ".", "coupling", "=", "\"additive\"", "hparams", ".", "temperature", "=", "0.5", "hparams", ".", "coupling_width", "=", "392", "return", "hparams" ]
Hparams for qualitative video generation results.
[ "Hparams", "for", "qualitative", "video", "generation", "results", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/next_frame_glow.py#L115-L121
21,920
tensorflow/tensor2tensor
tensor2tensor/models/video/next_frame_glow.py
next_frame_glow_shapes
def next_frame_glow_shapes(): """Hparams for qualitative and quantitative results on shapes dataset.""" hparams = next_frame_glow_bair_quant() hparams.video_num_input_frames = 1 hparams.video_num_target_frames = 2 hparams.num_train_frames = 2 hparams.num_cond_latents = 1 hparams.coupling = "additive" hparams.coupling_width = 512 hparams.latent_encoder_depth = 10 hparams.latent_skip = False hparams.learning_rate_constant = 1e-4 hparams.batch_size = 10 return hparams
python
def next_frame_glow_shapes(): """Hparams for qualitative and quantitative results on shapes dataset.""" hparams = next_frame_glow_bair_quant() hparams.video_num_input_frames = 1 hparams.video_num_target_frames = 2 hparams.num_train_frames = 2 hparams.num_cond_latents = 1 hparams.coupling = "additive" hparams.coupling_width = 512 hparams.latent_encoder_depth = 10 hparams.latent_skip = False hparams.learning_rate_constant = 1e-4 hparams.batch_size = 10 return hparams
[ "def", "next_frame_glow_shapes", "(", ")", ":", "hparams", "=", "next_frame_glow_bair_quant", "(", ")", "hparams", ".", "video_num_input_frames", "=", "1", "hparams", ".", "video_num_target_frames", "=", "2", "hparams", ".", "num_train_frames", "=", "2", "hparams", ".", "num_cond_latents", "=", "1", "hparams", ".", "coupling", "=", "\"additive\"", "hparams", ".", "coupling_width", "=", "512", "hparams", ".", "latent_encoder_depth", "=", "10", "hparams", ".", "latent_skip", "=", "False", "hparams", ".", "learning_rate_constant", "=", "1e-4", "hparams", ".", "batch_size", "=", "10", "return", "hparams" ]
Hparams for qualitative and quantitative results on shapes dataset.
[ "Hparams", "for", "qualitative", "and", "quantitative", "results", "on", "shapes", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/next_frame_glow.py#L125-L138
21,921
tensorflow/tensor2tensor
tensor2tensor/models/basic.py
basic_fc_small
def basic_fc_small(): """Small fully connected model.""" hparams = common_hparams.basic_params1() hparams.learning_rate = 0.1 hparams.batch_size = 128 hparams.hidden_size = 256 hparams.num_hidden_layers = 2 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.dropout = 0.0 return hparams
python
def basic_fc_small(): """Small fully connected model.""" hparams = common_hparams.basic_params1() hparams.learning_rate = 0.1 hparams.batch_size = 128 hparams.hidden_size = 256 hparams.num_hidden_layers = 2 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.dropout = 0.0 return hparams
[ "def", "basic_fc_small", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "learning_rate", "=", "0.1", "hparams", ".", "batch_size", "=", "128", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "initializer", "=", "\"uniform_unit_scaling\"", "hparams", ".", "initializer_gain", "=", "1.0", "hparams", ".", "weight_decay", "=", "0.0", "hparams", ".", "dropout", "=", "0.0", "return", "hparams" ]
Small fully connected model.
[ "Small", "fully", "connected", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/basic.py#L47-L58
21,922
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
imagenet_pixelrnn_generator
def imagenet_pixelrnn_generator(tmp_dir, training, size=_IMAGENET_SMALL_IMAGE_SIZE): """Image generator for Imagenet 64x64 downsampled images. It assumes that the data has been downloaded from http://image-net.org/small/*_32x32.tar or http://image-net.org/small/*_64x64.tar into tmp_dir. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. size: image size (assumes height and width are same) Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """ if size == _IMAGENET_SMALL_IMAGE_SIZE: train_prefix = _IMAGENET_SMALL_TRAIN_PREFIX eval_prefix = _IMAGENET_SMALL_EVAL_PREFIX else: train_prefix = _IMAGENET_MEDIUM_TRAIN_PREFIX eval_prefix = _IMAGENET_MEDIUM_EVAL_PREFIX prefix = train_prefix if training else eval_prefix images_filepath = os.path.join(tmp_dir, prefix) image_files = tf.gfile.Glob(images_filepath + "/*") height = size width = size const_label = 0 for filename in image_files: with tf.gfile.Open(filename, "r") as f: encoded_image = f.read() yield { "image/encoded": [encoded_image], "image/format": ["png"], "image/class/label": [const_label], "image/height": [height], "image/width": [width] }
python
def imagenet_pixelrnn_generator(tmp_dir, training, size=_IMAGENET_SMALL_IMAGE_SIZE): """Image generator for Imagenet 64x64 downsampled images. It assumes that the data has been downloaded from http://image-net.org/small/*_32x32.tar or http://image-net.org/small/*_64x64.tar into tmp_dir. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. size: image size (assumes height and width are same) Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """ if size == _IMAGENET_SMALL_IMAGE_SIZE: train_prefix = _IMAGENET_SMALL_TRAIN_PREFIX eval_prefix = _IMAGENET_SMALL_EVAL_PREFIX else: train_prefix = _IMAGENET_MEDIUM_TRAIN_PREFIX eval_prefix = _IMAGENET_MEDIUM_EVAL_PREFIX prefix = train_prefix if training else eval_prefix images_filepath = os.path.join(tmp_dir, prefix) image_files = tf.gfile.Glob(images_filepath + "/*") height = size width = size const_label = 0 for filename in image_files: with tf.gfile.Open(filename, "r") as f: encoded_image = f.read() yield { "image/encoded": [encoded_image], "image/format": ["png"], "image/class/label": [const_label], "image/height": [height], "image/width": [width] }
[ "def", "imagenet_pixelrnn_generator", "(", "tmp_dir", ",", "training", ",", "size", "=", "_IMAGENET_SMALL_IMAGE_SIZE", ")", ":", "if", "size", "==", "_IMAGENET_SMALL_IMAGE_SIZE", ":", "train_prefix", "=", "_IMAGENET_SMALL_TRAIN_PREFIX", "eval_prefix", "=", "_IMAGENET_SMALL_EVAL_PREFIX", "else", ":", "train_prefix", "=", "_IMAGENET_MEDIUM_TRAIN_PREFIX", "eval_prefix", "=", "_IMAGENET_MEDIUM_EVAL_PREFIX", "prefix", "=", "train_prefix", "if", "training", "else", "eval_prefix", "images_filepath", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "prefix", ")", "image_files", "=", "tf", ".", "gfile", ".", "Glob", "(", "images_filepath", "+", "\"/*\"", ")", "height", "=", "size", "width", "=", "size", "const_label", "=", "0", "for", "filename", "in", "image_files", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "encoded_image", "=", "f", ".", "read", "(", ")", "yield", "{", "\"image/encoded\"", ":", "[", "encoded_image", "]", ",", "\"image/format\"", ":", "[", "\"png\"", "]", ",", "\"image/class/label\"", ":", "[", "const_label", "]", ",", "\"image/height\"", ":", "[", "height", "]", ",", "\"image/width\"", ":", "[", "width", "]", "}" ]
Image generator for Imagenet 64x64 downsampled images. It assumes that the data has been downloaded from http://image-net.org/small/*_32x32.tar or http://image-net.org/small/*_64x64.tar into tmp_dir. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. size: image size (assumes height and width are same) Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type.
[ "Image", "generator", "for", "Imagenet", "64x64", "downsampled", "images", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L56-L98
21,923
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
imagenet_preprocess_example
def imagenet_preprocess_example(example, mode, resize_size=None, normalize=True): """Preprocessing used for Imagenet and similar problems.""" resize_size = resize_size or [299, 299] assert resize_size[0] == resize_size[1] image = example["inputs"] if mode == tf.estimator.ModeKeys.TRAIN: image = preprocess_for_train(image, image_size=resize_size[0], normalize=normalize) else: image = preprocess_for_eval(image, image_size=resize_size[0], normalize=normalize) example["inputs"] = image return example
python
def imagenet_preprocess_example(example, mode, resize_size=None, normalize=True): """Preprocessing used for Imagenet and similar problems.""" resize_size = resize_size or [299, 299] assert resize_size[0] == resize_size[1] image = example["inputs"] if mode == tf.estimator.ModeKeys.TRAIN: image = preprocess_for_train(image, image_size=resize_size[0], normalize=normalize) else: image = preprocess_for_eval(image, image_size=resize_size[0], normalize=normalize) example["inputs"] = image return example
[ "def", "imagenet_preprocess_example", "(", "example", ",", "mode", ",", "resize_size", "=", "None", ",", "normalize", "=", "True", ")", ":", "resize_size", "=", "resize_size", "or", "[", "299", ",", "299", "]", "assert", "resize_size", "[", "0", "]", "==", "resize_size", "[", "1", "]", "image", "=", "example", "[", "\"inputs\"", "]", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "image", "=", "preprocess_for_train", "(", "image", ",", "image_size", "=", "resize_size", "[", "0", "]", ",", "normalize", "=", "normalize", ")", "else", ":", "image", "=", "preprocess_for_eval", "(", "image", ",", "image_size", "=", "resize_size", "[", "0", "]", ",", "normalize", "=", "normalize", ")", "example", "[", "\"inputs\"", "]", "=", "image", "return", "example" ]
Preprocessing used for Imagenet and similar problems.
[ "Preprocessing", "used", "for", "Imagenet", "and", "similar", "problems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L101-L116
21,924
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
_crop
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ["Crop size greater than the image size."]) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
python
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ["Crop size greater than the image size."]) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
[ "def", "_crop", "(", "image", ",", "offset_height", ",", "offset_width", ",", "crop_height", ",", "crop_width", ")", ":", "original_shape", "=", "tf", ".", "shape", "(", "image", ")", "rank_assertion", "=", "tf", ".", "Assert", "(", "tf", ".", "equal", "(", "tf", ".", "rank", "(", "image", ")", ",", "3", ")", ",", "[", "\"Rank of image must be equal to 3.\"", "]", ")", "with", "tf", ".", "control_dependencies", "(", "[", "rank_assertion", "]", ")", ":", "cropped_shape", "=", "tf", ".", "stack", "(", "[", "crop_height", ",", "crop_width", ",", "original_shape", "[", "2", "]", "]", ")", "size_assertion", "=", "tf", ".", "Assert", "(", "tf", ".", "logical_and", "(", "tf", ".", "greater_equal", "(", "original_shape", "[", "0", "]", ",", "crop_height", ")", ",", "tf", ".", "greater_equal", "(", "original_shape", "[", "1", "]", ",", "crop_width", ")", ")", ",", "[", "\"Crop size greater than the image size.\"", "]", ")", "offsets", "=", "tf", ".", "to_int32", "(", "tf", ".", "stack", "(", "[", "offset_height", ",", "offset_width", ",", "0", "]", ")", ")", "# Use tf.slice instead of crop_to_bounding box as it accepts tensors to", "# define the crop size.", "with", "tf", ".", "control_dependencies", "(", "[", "size_assertion", "]", ")", ":", "image", "=", "tf", ".", "slice", "(", "image", ",", "offsets", ",", "cropped_shape", ")", "return", "tf", ".", "reshape", "(", "image", ",", "cropped_shape", ")" ]
Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size.
[ "Crops", "the", "given", "image", "using", "the", "provided", "offsets", "and", "sizes", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L427-L466
21,925
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
distorted_bounding_box_crop
def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using a one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: `Tensor` of image (it will be converted to floats in [0, 1]). bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: (cropped image `Tensor`, distorted bbox `Tensor`). """ with tf.name_scope(scope, default_name="distorted_bounding_box_crop", values=[image, bbox]): # Each bounding box has shape [1, num_boxes, box coords] and # the coordinates are ordered [ymin, xmin, ymax, xmax]. # A large fraction of image datasets contain a human-annotated bounding # box delineating the region of the image containing the object of interest. # We choose to create a new bounding box for the object which is a randomly # distorted version of the human-annotated bounding box that obeys an # allowed range of aspect ratios, sizes and overlap with the human-annotated # bounding box. If no box is supplied, then we assume the bounding box is # the entire image. sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box # Crop the image to the specified bounding box. cropped_image = tf.slice(image, bbox_begin, bbox_size) return cropped_image, distort_bbox
python
def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using a one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: `Tensor` of image (it will be converted to floats in [0, 1]). bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: (cropped image `Tensor`, distorted bbox `Tensor`). """ with tf.name_scope(scope, default_name="distorted_bounding_box_crop", values=[image, bbox]): # Each bounding box has shape [1, num_boxes, box coords] and # the coordinates are ordered [ymin, xmin, ymax, xmax]. # A large fraction of image datasets contain a human-annotated bounding # box delineating the region of the image containing the object of interest. # We choose to create a new bounding box for the object which is a randomly # distorted version of the human-annotated bounding box that obeys an # allowed range of aspect ratios, sizes and overlap with the human-annotated # bounding box. If no box is supplied, then we assume the bounding box is # the entire image. sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box # Crop the image to the specified bounding box. cropped_image = tf.slice(image, bbox_begin, bbox_size) return cropped_image, distort_bbox
[ "def", "distorted_bounding_box_crop", "(", "image", ",", "bbox", ",", "min_object_covered", "=", "0.1", ",", "aspect_ratio_range", "=", "(", "0.75", ",", "1.33", ")", ",", "area_range", "=", "(", "0.05", ",", "1.0", ")", ",", "max_attempts", "=", "100", ",", "scope", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "scope", ",", "default_name", "=", "\"distorted_bounding_box_crop\"", ",", "values", "=", "[", "image", ",", "bbox", "]", ")", ":", "# Each bounding box has shape [1, num_boxes, box coords] and", "# the coordinates are ordered [ymin, xmin, ymax, xmax].", "# A large fraction of image datasets contain a human-annotated bounding", "# box delineating the region of the image containing the object of interest.", "# We choose to create a new bounding box for the object which is a randomly", "# distorted version of the human-annotated bounding box that obeys an", "# allowed range of aspect ratios, sizes and overlap with the human-annotated", "# bounding box. If no box is supplied, then we assume the bounding box is", "# the entire image.", "sample_distorted_bounding_box", "=", "tf", ".", "image", ".", "sample_distorted_bounding_box", "(", "tf", ".", "shape", "(", "image", ")", ",", "bounding_boxes", "=", "bbox", ",", "min_object_covered", "=", "min_object_covered", ",", "aspect_ratio_range", "=", "aspect_ratio_range", ",", "area_range", "=", "area_range", ",", "max_attempts", "=", "max_attempts", ",", "use_image_if_no_bounding_boxes", "=", "True", ")", "bbox_begin", ",", "bbox_size", ",", "distort_bbox", "=", "sample_distorted_bounding_box", "# Crop the image to the specified bounding box.", "cropped_image", "=", "tf", ".", "slice", "(", "image", ",", "bbox_begin", ",", "bbox_size", ")", "return", "cropped_image", ",", "distort_bbox" ]
Generates cropped_image using a one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: `Tensor` of image (it will be converted to floats in [0, 1]). bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: (cropped image `Tensor`, distorted bbox `Tensor`).
[ "Generates", "cropped_image", "using", "a", "one", "of", "the", "bboxes", "randomly", "distorted", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L469-L524
21,926
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
_at_least_x_are_true
def _at_least_x_are_true(a, b, x): """At least `x` of `a` and `b` `Tensors` are true.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)
python
def _at_least_x_are_true(a, b, x): """At least `x` of `a` and `b` `Tensors` are true.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)
[ "def", "_at_least_x_are_true", "(", "a", ",", "b", ",", "x", ")", ":", "match", "=", "tf", ".", "equal", "(", "a", ",", "b", ")", "match", "=", "tf", ".", "cast", "(", "match", ",", "tf", ".", "int32", ")", "return", "tf", ".", "greater_equal", "(", "tf", ".", "reduce_sum", "(", "match", ")", ",", "x", ")" ]
At least `x` of `a` and `b` `Tensors` are true.
[ "At", "least", "x", "of", "a", "and", "b", "Tensors", "are", "true", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L552-L556
21,927
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
_do_scale
def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
python
def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
[ "def", "_do_scale", "(", "image", ",", "size", ")", ":", "shape", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "image", ")", ",", "tf", ".", "float32", ")", "w_greater", "=", "tf", ".", "greater", "(", "shape", "[", "0", "]", ",", "shape", "[", "1", "]", ")", "shape", "=", "tf", ".", "cond", "(", "w_greater", ",", "lambda", ":", "tf", ".", "cast", "(", "[", "shape", "[", "0", "]", "/", "shape", "[", "1", "]", "*", "size", ",", "size", "]", ",", "tf", ".", "int32", ")", ",", "lambda", ":", "tf", ".", "cast", "(", "[", "size", ",", "shape", "[", "1", "]", "/", "shape", "[", "0", "]", "*", "size", "]", ",", "tf", ".", "int32", ")", ")", "return", "tf", ".", "image", ".", "resize_bicubic", "(", "[", "image", "]", ",", "shape", ")", "[", "0", "]" ]
Rescale the image by scaling the smaller spatial dimension to `size`.
[ "Rescale", "the", "image", "by", "scaling", "the", "smaller", "spatial", "dimension", "to", "size", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L559-L567
21,928
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
_center_crop
def _center_crop(image, size): """Crops to center of image with specified `size`.""" image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = ((image_height - size) + 1) / 2 offset_width = ((image_width - size) + 1) / 2 image = _crop(image, offset_height, offset_width, size, size) return image
python
def _center_crop(image, size): """Crops to center of image with specified `size`.""" image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = ((image_height - size) + 1) / 2 offset_width = ((image_width - size) + 1) / 2 image = _crop(image, offset_height, offset_width, size, size) return image
[ "def", "_center_crop", "(", "image", ",", "size", ")", ":", "image_height", "=", "tf", ".", "shape", "(", "image", ")", "[", "0", "]", "image_width", "=", "tf", ".", "shape", "(", "image", ")", "[", "1", "]", "offset_height", "=", "(", "(", "image_height", "-", "size", ")", "+", "1", ")", "/", "2", "offset_width", "=", "(", "(", "image_width", "-", "size", ")", "+", "1", ")", "/", "2", "image", "=", "_crop", "(", "image", ",", "offset_height", ",", "offset_width", ",", "size", ",", "size", ")", "return", "image" ]
Crops to center of image with specified `size`.
[ "Crops", "to", "center", "of", "image", "with", "specified", "size", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L570-L578
21,929
tensorflow/tensor2tensor
tensor2tensor/data_generators/imagenet.py
_normalize
def _normalize(image): """Normalize the image to zero mean and unit variance.""" offset = tf.constant(MEAN_RGB, shape=[1, 1, 3]) image -= offset scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3]) image /= scale return image
python
def _normalize(image): """Normalize the image to zero mean and unit variance.""" offset = tf.constant(MEAN_RGB, shape=[1, 1, 3]) image -= offset scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3]) image /= scale return image
[ "def", "_normalize", "(", "image", ")", ":", "offset", "=", "tf", ".", "constant", "(", "MEAN_RGB", ",", "shape", "=", "[", "1", ",", "1", ",", "3", "]", ")", "image", "-=", "offset", "scale", "=", "tf", ".", "constant", "(", "STDDEV_RGB", ",", "shape", "=", "[", "1", ",", "1", ",", "3", "]", ")", "image", "/=", "scale", "return", "image" ]
Normalize the image to zero mean and unit variance.
[ "Normalize", "the", "image", "to", "zero", "mean", "and", "unit", "variance", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L581-L588
21,930
tensorflow/tensor2tensor
tensor2tensor/trax/learning_rate.py
MultifactorSchedule
def MultifactorSchedule(history=None, factors="constant * linear_warmup * rsqrt_decay", constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000): """Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """ del history cache_args = (factors, constant, warmup_steps) if cache_args in _memoized_multifactor_schedules: return _memoized_multifactor_schedules[cache_args] factors = [n.strip() for n in factors.split("*")] def learning_rate(step): # pylint: disable=invalid-name """Step to learning rate function.""" ret = 1.0 for name in factors: if name == "constant": ret *= constant elif name == "linear_warmup": ret *= np.minimum(1.0, step / warmup_steps) elif name == "rsqrt_decay": ret /= np.sqrt(np.maximum(step, warmup_steps)) elif name == "decay_every": ret *= (decay_factor ** (step//steps_per_decay)) else: raise ValueError("Unknown factor %s." % name) return ret _memoized_multifactor_schedules[cache_args] = learning_rate return learning_rate
python
def MultifactorSchedule(history=None, factors="constant * linear_warmup * rsqrt_decay", constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000): """Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """ del history cache_args = (factors, constant, warmup_steps) if cache_args in _memoized_multifactor_schedules: return _memoized_multifactor_schedules[cache_args] factors = [n.strip() for n in factors.split("*")] def learning_rate(step): # pylint: disable=invalid-name """Step to learning rate function.""" ret = 1.0 for name in factors: if name == "constant": ret *= constant elif name == "linear_warmup": ret *= np.minimum(1.0, step / warmup_steps) elif name == "rsqrt_decay": ret /= np.sqrt(np.maximum(step, warmup_steps)) elif name == "decay_every": ret *= (decay_factor ** (step//steps_per_decay)) else: raise ValueError("Unknown factor %s." % name) return ret _memoized_multifactor_schedules[cache_args] = learning_rate return learning_rate
[ "def", "MultifactorSchedule", "(", "history", "=", "None", ",", "factors", "=", "\"constant * linear_warmup * rsqrt_decay\"", ",", "constant", "=", "0.1", ",", "warmup_steps", "=", "100", ",", "decay_factor", "=", "0.5", ",", "steps_per_decay", "=", "20000", ")", ":", "del", "history", "cache_args", "=", "(", "factors", ",", "constant", ",", "warmup_steps", ")", "if", "cache_args", "in", "_memoized_multifactor_schedules", ":", "return", "_memoized_multifactor_schedules", "[", "cache_args", "]", "factors", "=", "[", "n", ".", "strip", "(", ")", "for", "n", "in", "factors", ".", "split", "(", "\"*\"", ")", "]", "def", "learning_rate", "(", "step", ")", ":", "# pylint: disable=invalid-name", "\"\"\"Step to learning rate function.\"\"\"", "ret", "=", "1.0", "for", "name", "in", "factors", ":", "if", "name", "==", "\"constant\"", ":", "ret", "*=", "constant", "elif", "name", "==", "\"linear_warmup\"", ":", "ret", "*=", "np", ".", "minimum", "(", "1.0", ",", "step", "/", "warmup_steps", ")", "elif", "name", "==", "\"rsqrt_decay\"", ":", "ret", "/=", "np", ".", "sqrt", "(", "np", ".", "maximum", "(", "step", ",", "warmup_steps", ")", ")", "elif", "name", "==", "\"decay_every\"", ":", "ret", "*=", "(", "decay_factor", "**", "(", "step", "//", "steps_per_decay", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown factor %s.\"", "%", "name", ")", "return", "ret", "_memoized_multifactor_schedules", "[", "cache_args", "]", "=", "learning_rate", "return", "learning_rate" ]
Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr.
[ "Factor", "-", "based", "learning", "rate", "schedule", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/learning_rate.py#L42-L92
21,931
tensorflow/tensor2tensor
tensor2tensor/trax/learning_rate.py
EvalAdjustingSchedule
def EvalAdjustingSchedule(history, constant=0.1, steps_to_decrease=20, improvement_margin=0.001, decrease_rate=1.5, history_mode="eval", metric="metrics/accuracy"): """Learning rate that decreases when eval metric stalls. If the chosen metric does not improve by improvement_margin for as many as steps_to_decrease steps, then the constant gets decreased by decrease rate. Finally, the MultifactorSchedule gets called with the adjusted constant. Args: history: trax.history.History, the history of training and evaluation. constant: float, the starting constant for the learning rate schedule. steps_to_decrease: int, after how many steps without improvement should we decrease the constant. improvement_margin: how much we need to improve to consider the metric improved. decrease_rate: by what fraction to decrease (i.e. lr /= decrease_rate). history_mode: str, which mode of the history to use. metric: which evaluation metric to use for adjustments. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """ metrics = history.get(history_mode, metric) adjusted = constant if len(metrics) < 2: return MultifactorSchedule(history, constant=adjusted) steps_without_improvement = 0 cur = metrics.pop()[1] # The most-recent value of the metric. while len(metrics) > 1: # The one-before value of metrics as .pop() removes one element each time. prev = metrics.pop()[1] if cur < prev * (1 + improvement_margin): steps_without_improvement += 1 else: cur = prev steps_without_improvement = 0 if steps_without_improvement >= steps_to_decrease: adjusted /= decrease_rate cur = prev steps_without_improvement = 0 return MultifactorSchedule(history, constant=adjusted)
python
def EvalAdjustingSchedule(history, constant=0.1, steps_to_decrease=20, improvement_margin=0.001, decrease_rate=1.5, history_mode="eval", metric="metrics/accuracy"): """Learning rate that decreases when eval metric stalls. If the chosen metric does not improve by improvement_margin for as many as steps_to_decrease steps, then the constant gets decreased by decrease rate. Finally, the MultifactorSchedule gets called with the adjusted constant. Args: history: trax.history.History, the history of training and evaluation. constant: float, the starting constant for the learning rate schedule. steps_to_decrease: int, after how many steps without improvement should we decrease the constant. improvement_margin: how much we need to improve to consider the metric improved. decrease_rate: by what fraction to decrease (i.e. lr /= decrease_rate). history_mode: str, which mode of the history to use. metric: which evaluation metric to use for adjustments. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """ metrics = history.get(history_mode, metric) adjusted = constant if len(metrics) < 2: return MultifactorSchedule(history, constant=adjusted) steps_without_improvement = 0 cur = metrics.pop()[1] # The most-recent value of the metric. while len(metrics) > 1: # The one-before value of metrics as .pop() removes one element each time. prev = metrics.pop()[1] if cur < prev * (1 + improvement_margin): steps_without_improvement += 1 else: cur = prev steps_without_improvement = 0 if steps_without_improvement >= steps_to_decrease: adjusted /= decrease_rate cur = prev steps_without_improvement = 0 return MultifactorSchedule(history, constant=adjusted)
[ "def", "EvalAdjustingSchedule", "(", "history", ",", "constant", "=", "0.1", ",", "steps_to_decrease", "=", "20", ",", "improvement_margin", "=", "0.001", ",", "decrease_rate", "=", "1.5", ",", "history_mode", "=", "\"eval\"", ",", "metric", "=", "\"metrics/accuracy\"", ")", ":", "metrics", "=", "history", ".", "get", "(", "history_mode", ",", "metric", ")", "adjusted", "=", "constant", "if", "len", "(", "metrics", ")", "<", "2", ":", "return", "MultifactorSchedule", "(", "history", ",", "constant", "=", "adjusted", ")", "steps_without_improvement", "=", "0", "cur", "=", "metrics", ".", "pop", "(", ")", "[", "1", "]", "# The most-recent value of the metric.", "while", "len", "(", "metrics", ")", ">", "1", ":", "# The one-before value of metrics as .pop() removes one element each time.", "prev", "=", "metrics", ".", "pop", "(", ")", "[", "1", "]", "if", "cur", "<", "prev", "*", "(", "1", "+", "improvement_margin", ")", ":", "steps_without_improvement", "+=", "1", "else", ":", "cur", "=", "prev", "steps_without_improvement", "=", "0", "if", "steps_without_improvement", ">=", "steps_to_decrease", ":", "adjusted", "/=", "decrease_rate", "cur", "=", "prev", "steps_without_improvement", "=", "0", "return", "MultifactorSchedule", "(", "history", ",", "constant", "=", "adjusted", ")" ]
Learning rate that decreases when eval metric stalls. If the chosen metric does not improve by improvement_margin for as many as steps_to_decrease steps, then the constant gets decreased by decrease rate. Finally, the MultifactorSchedule gets called with the adjusted constant. Args: history: trax.history.History, the history of training and evaluation. constant: float, the starting constant for the learning rate schedule. steps_to_decrease: int, after how many steps without improvement should we decrease the constant. improvement_margin: how much we need to improve to consider the metric improved. decrease_rate: by what fraction to decrease (i.e. lr /= decrease_rate). history_mode: str, which mode of the history to use. metric: which evaluation metric to use for adjustments. Returns: a function learning_rate(step): float -> float, the step-dependent lr.
[ "Learning", "rate", "that", "decreases", "when", "eval", "metric", "stalls", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/learning_rate.py#L96-L143
21,932
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
project_hidden
def project_hidden(x, projection_tensors, hidden_size, num_blocks): """Project encoder hidden state under num_blocks using projection tensors. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. projection_tensors: Projection tensors used to project the hidden state. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, hidden_size / num_blocks]. """ batch_size, latent_dim, _ = common_layers.shape_list(x) x = tf.reshape(x, shape=[1, -1, hidden_size]) x_tiled = tf.reshape( tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, -1, hidden_size]) x_projected = tf.matmul(x_tiled, projection_tensors) x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1]) return x_4d
python
def project_hidden(x, projection_tensors, hidden_size, num_blocks): """Project encoder hidden state under num_blocks using projection tensors. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. projection_tensors: Projection tensors used to project the hidden state. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, hidden_size / num_blocks]. """ batch_size, latent_dim, _ = common_layers.shape_list(x) x = tf.reshape(x, shape=[1, -1, hidden_size]) x_tiled = tf.reshape( tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, -1, hidden_size]) x_projected = tf.matmul(x_tiled, projection_tensors) x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1]) return x_4d
[ "def", "project_hidden", "(", "x", ",", "projection_tensors", ",", "hidden_size", ",", "num_blocks", ")", ":", "batch_size", ",", "latent_dim", ",", "_", "=", "common_layers", ".", "shape_list", "(", "x", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "shape", "=", "[", "1", ",", "-", "1", ",", "hidden_size", "]", ")", "x_tiled", "=", "tf", ".", "reshape", "(", "tf", ".", "tile", "(", "x", ",", "multiples", "=", "[", "num_blocks", ",", "1", ",", "1", "]", ")", ",", "shape", "=", "[", "num_blocks", ",", "-", "1", ",", "hidden_size", "]", ")", "x_projected", "=", "tf", ".", "matmul", "(", "x_tiled", ",", "projection_tensors", ")", "x_projected", "=", "tf", ".", "transpose", "(", "x_projected", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", "x_4d", "=", "tf", ".", "reshape", "(", "x_projected", ",", "[", "batch_size", ",", "latent_dim", ",", "num_blocks", ",", "-", "1", "]", ")", "return", "x_4d" ]
Project encoder hidden state under num_blocks using projection tensors. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. projection_tensors: Projection tensors used to project the hidden state. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, hidden_size / num_blocks].
[ "Project", "encoder", "hidden", "state", "under", "num_blocks", "using", "projection", "tensors", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L33-L54
21,933
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
slice_hidden
def slice_hidden(x, hidden_size, num_blocks): """Slice encoder hidden state under num_blocks. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim]. """ batch_size, latent_dim, _ = common_layers.shape_list(x) block_dim = hidden_size // num_blocks x_sliced = tf.reshape(x, shape=[batch_size, latent_dim, num_blocks, block_dim]) return x_sliced
python
def slice_hidden(x, hidden_size, num_blocks): """Slice encoder hidden state under num_blocks. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim]. """ batch_size, latent_dim, _ = common_layers.shape_list(x) block_dim = hidden_size // num_blocks x_sliced = tf.reshape(x, shape=[batch_size, latent_dim, num_blocks, block_dim]) return x_sliced
[ "def", "slice_hidden", "(", "x", ",", "hidden_size", ",", "num_blocks", ")", ":", "batch_size", ",", "latent_dim", ",", "_", "=", "common_layers", ".", "shape_list", "(", "x", ")", "block_dim", "=", "hidden_size", "//", "num_blocks", "x_sliced", "=", "tf", ".", "reshape", "(", "x", ",", "shape", "=", "[", "batch_size", ",", "latent_dim", ",", "num_blocks", ",", "block_dim", "]", ")", "return", "x_sliced" ]
Slice encoder hidden state under num_blocks. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
[ "Slice", "encoder", "hidden", "state", "under", "num_blocks", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L57-L72
21,934
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
embedding_lookup
def embedding_lookup(x, means, num_blocks, block_v_size, bottleneck_kind="dvq", random_top_k=1, soft_em=False, num_samples=1, do_hard_gumbel_softmax=False, temperature_warmup_steps=150000, num_flows=0, approximate_gs_entropy=False, sum_over_latents=False): """Compute nearest neighbors and loss for training the embeddings via DVQ. Args: x: Continuous encodings of shape [batch_size, latent_dim, num_blocks, block_dim]. means: Embedding table of shape [num_blocks, block_v_size, block_dim]. num_blocks: Number of blocks in DVQ. block_v_size: Number of table entries per block. bottleneck_kind: Discrete bottleneck type. random_top_k: Noisy top-k if this is bigger than 1. soft_em: If True then use soft EM rather than hard EM. num_samples: Number of samples to use for soft EM. do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples for gumbel-softmax-dvq bottleneck. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. Used only if bottleneck_kind is gumbel-softmax-dvq. num_flows: Number of inverse autoregressive flows for gumbel-softmax-dvq bottleneck. approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density as a categorical distribution when calculating the sample entropy. Used only if bottleneck_kind is gumbel-softmax-dvq. sum_over_latents: Whether to sum over non-batch dimensions when calculating negative entropy loss. Used only if soft EM or when bottleneck_kind is gumbel-softmax-dvq. Returns: x_means_hot: The nearest neighbor in one hot form, with shape [batch_size * latent_dim, num_blocks, block_v_size]. x_means: The nearest neighbor itself, with shape [batch_size * latent_dim, num_blocks, block_dim]. q_loss: Scalar Tensor representing codebook loss. e_loss: Scalar Tensor representing commitment loss. neg_q_entropy: Scalar Tensor representing negative entropy of variational approximation (0 if it is deterministic). """ if bottleneck_kind == "gumbel-softmax-dvq": x_means_hot, neg_q_entropy = gumbel_softmax_nearest_neighbor_dvq( x, means, block_v_size, hard=do_hard_gumbel_softmax, num_samples=num_samples, temperature_warmup_steps=temperature_warmup_steps, num_flows=num_flows, approximate_gs_entropy=approximate_gs_entropy, sum_over_latents=sum_over_latents) else: x_means_hot, neg_q_entropy = nearest_neighbor( x, means, block_v_size, random_top_k, soft_em=soft_em, num_samples=num_samples, sum_over_latents=sum_over_latents) x_means_hot_flat = tf.reshape(x_means_hot, [-1, num_blocks, block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) x = tf.reshape(x, [batch_size * latent_dim, num_blocks, block_dim]) # Currently, we use the mean scaling for the commitment loss, as opposed to # summing across all non-batch dimensions. q_loss = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss, neg_q_entropy
python
def embedding_lookup(x, means, num_blocks, block_v_size, bottleneck_kind="dvq", random_top_k=1, soft_em=False, num_samples=1, do_hard_gumbel_softmax=False, temperature_warmup_steps=150000, num_flows=0, approximate_gs_entropy=False, sum_over_latents=False): """Compute nearest neighbors and loss for training the embeddings via DVQ. Args: x: Continuous encodings of shape [batch_size, latent_dim, num_blocks, block_dim]. means: Embedding table of shape [num_blocks, block_v_size, block_dim]. num_blocks: Number of blocks in DVQ. block_v_size: Number of table entries per block. bottleneck_kind: Discrete bottleneck type. random_top_k: Noisy top-k if this is bigger than 1. soft_em: If True then use soft EM rather than hard EM. num_samples: Number of samples to use for soft EM. do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples for gumbel-softmax-dvq bottleneck. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. Used only if bottleneck_kind is gumbel-softmax-dvq. num_flows: Number of inverse autoregressive flows for gumbel-softmax-dvq bottleneck. approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density as a categorical distribution when calculating the sample entropy. Used only if bottleneck_kind is gumbel-softmax-dvq. sum_over_latents: Whether to sum over non-batch dimensions when calculating negative entropy loss. Used only if soft EM or when bottleneck_kind is gumbel-softmax-dvq. Returns: x_means_hot: The nearest neighbor in one hot form, with shape [batch_size * latent_dim, num_blocks, block_v_size]. x_means: The nearest neighbor itself, with shape [batch_size * latent_dim, num_blocks, block_dim]. q_loss: Scalar Tensor representing codebook loss. e_loss: Scalar Tensor representing commitment loss. neg_q_entropy: Scalar Tensor representing negative entropy of variational approximation (0 if it is deterministic). """ if bottleneck_kind == "gumbel-softmax-dvq": x_means_hot, neg_q_entropy = gumbel_softmax_nearest_neighbor_dvq( x, means, block_v_size, hard=do_hard_gumbel_softmax, num_samples=num_samples, temperature_warmup_steps=temperature_warmup_steps, num_flows=num_flows, approximate_gs_entropy=approximate_gs_entropy, sum_over_latents=sum_over_latents) else: x_means_hot, neg_q_entropy = nearest_neighbor( x, means, block_v_size, random_top_k, soft_em=soft_em, num_samples=num_samples, sum_over_latents=sum_over_latents) x_means_hot_flat = tf.reshape(x_means_hot, [-1, num_blocks, block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) x = tf.reshape(x, [batch_size * latent_dim, num_blocks, block_dim]) # Currently, we use the mean scaling for the commitment loss, as opposed to # summing across all non-batch dimensions. q_loss = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss, neg_q_entropy
[ "def", "embedding_lookup", "(", "x", ",", "means", ",", "num_blocks", ",", "block_v_size", ",", "bottleneck_kind", "=", "\"dvq\"", ",", "random_top_k", "=", "1", ",", "soft_em", "=", "False", ",", "num_samples", "=", "1", ",", "do_hard_gumbel_softmax", "=", "False", ",", "temperature_warmup_steps", "=", "150000", ",", "num_flows", "=", "0", ",", "approximate_gs_entropy", "=", "False", ",", "sum_over_latents", "=", "False", ")", ":", "if", "bottleneck_kind", "==", "\"gumbel-softmax-dvq\"", ":", "x_means_hot", ",", "neg_q_entropy", "=", "gumbel_softmax_nearest_neighbor_dvq", "(", "x", ",", "means", ",", "block_v_size", ",", "hard", "=", "do_hard_gumbel_softmax", ",", "num_samples", "=", "num_samples", ",", "temperature_warmup_steps", "=", "temperature_warmup_steps", ",", "num_flows", "=", "num_flows", ",", "approximate_gs_entropy", "=", "approximate_gs_entropy", ",", "sum_over_latents", "=", "sum_over_latents", ")", "else", ":", "x_means_hot", ",", "neg_q_entropy", "=", "nearest_neighbor", "(", "x", ",", "means", ",", "block_v_size", ",", "random_top_k", ",", "soft_em", "=", "soft_em", ",", "num_samples", "=", "num_samples", ",", "sum_over_latents", "=", "sum_over_latents", ")", "x_means_hot_flat", "=", "tf", ".", "reshape", "(", "x_means_hot", ",", "[", "-", "1", ",", "num_blocks", ",", "block_v_size", "]", ")", "x_means", "=", "tf", ".", "matmul", "(", "tf", ".", "transpose", "(", "x_means_hot_flat", ",", "perm", "=", "[", "1", ",", "0", ",", "2", "]", ")", ",", "means", ")", "x_means", "=", "tf", ".", "transpose", "(", "x_means", ",", "[", "1", ",", "0", ",", "2", "]", ")", "batch_size", ",", "latent_dim", ",", "num_blocks", ",", "block_dim", "=", "common_layers", ".", "shape_list", "(", "x", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch_size", "*", "latent_dim", ",", "num_blocks", ",", "block_dim", "]", ")", "# Currently, we use the mean scaling for the commitment loss, as opposed to", "# summing across all non-batch dimensions.", "q_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "tf", ".", "stop_gradient", "(", "x", ")", ",", "x_means", ")", ")", "e_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "x", ",", "tf", ".", "stop_gradient", "(", "x_means", ")", ")", ")", "return", "x_means_hot", ",", "x_means", ",", "q_loss", ",", "e_loss", ",", "neg_q_entropy" ]
Compute nearest neighbors and loss for training the embeddings via DVQ. Args: x: Continuous encodings of shape [batch_size, latent_dim, num_blocks, block_dim]. means: Embedding table of shape [num_blocks, block_v_size, block_dim]. num_blocks: Number of blocks in DVQ. block_v_size: Number of table entries per block. bottleneck_kind: Discrete bottleneck type. random_top_k: Noisy top-k if this is bigger than 1. soft_em: If True then use soft EM rather than hard EM. num_samples: Number of samples to use for soft EM. do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples for gumbel-softmax-dvq bottleneck. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. Used only if bottleneck_kind is gumbel-softmax-dvq. num_flows: Number of inverse autoregressive flows for gumbel-softmax-dvq bottleneck. approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density as a categorical distribution when calculating the sample entropy. Used only if bottleneck_kind is gumbel-softmax-dvq. sum_over_latents: Whether to sum over non-batch dimensions when calculating negative entropy loss. Used only if soft EM or when bottleneck_kind is gumbel-softmax-dvq. Returns: x_means_hot: The nearest neighbor in one hot form, with shape [batch_size * latent_dim, num_blocks, block_v_size]. x_means: The nearest neighbor itself, with shape [batch_size * latent_dim, num_blocks, block_dim]. q_loss: Scalar Tensor representing codebook loss. e_loss: Scalar Tensor representing commitment loss. neg_q_entropy: Scalar Tensor representing negative entropy of variational approximation (0 if it is deterministic).
[ "Compute", "nearest", "neighbors", "and", "loss", "for", "training", "the", "embeddings", "via", "DVQ", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L144-L222
21,935
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
vae
def vae(x, z_size, name=None): """Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. """ with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
python
def vae(x, z_size, name=None): """Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. """ with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
[ "def", "vae", "(", "x", ",", "z_size", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"vae\"", ")", ":", "mu", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "z_size", ",", "name", "=", "\"mu\"", ")", "log_sigma", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "z_size", ",", "name", "=", "\"log_sigma\"", ")", "shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "epsilon", "=", "tf", ".", "random_normal", "(", "[", "shape", "[", "0", "]", ",", "shape", "[", "1", "]", ",", "1", ",", "z_size", "]", ")", "z", "=", "mu", "+", "tf", ".", "exp", "(", "log_sigma", "/", "2", ")", "*", "epsilon", "kl", "=", "0.5", "*", "tf", ".", "reduce_mean", "(", "tf", ".", "expm1", "(", "log_sigma", ")", "+", "tf", ".", "square", "(", "mu", ")", "-", "log_sigma", ",", "axis", "=", "-", "1", ")", "free_bits", "=", "z_size", "//", "4", "kl_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "maximum", "(", "kl", "-", "free_bits", ",", "0.0", ")", ")", "return", "z", ",", "kl_loss", ",", "mu", ",", "log_sigma" ]
Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga.
[ "Simple", "variational", "autoencoder", "without", "discretization", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L360-L381
21,936
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
gumbel_sample
def gumbel_sample(shape): """Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution. """ uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998) return -tf.log(-tf.log(uniform_samples))
python
def gumbel_sample(shape): """Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution. """ uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998) return -tf.log(-tf.log(uniform_samples))
[ "def", "gumbel_sample", "(", "shape", ")", ":", "uniform_samples", "=", "tf", ".", "random_uniform", "(", "shape", ",", "minval", "=", "0.00001", ",", "maxval", "=", "0.99998", ")", "return", "-", "tf", ".", "log", "(", "-", "tf", ".", "log", "(", "uniform_samples", ")", ")" ]
Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution.
[ "Sample", "from", "the", "Gumbel", "distribution", "protect", "from", "overflows", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L402-L412
21,937
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
gumbel_softmax
def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None): """Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss. """ with tf.variable_scope(name, default_name="gumbel_softmax"): m = tf.layers.dense(x, 2**z_size, name="mask") if softmax_k > 0: m, kl = top_k_softmax(m, softmax_k) return m, m, 1.0 - tf.reduce_mean(kl) logsm = tf.nn.log_softmax(m) # Gumbel-softmax sample. gumbel_samples = gumbel_sample(common_layers.shape_list(m)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) s = tf.nn.softmax((logsm + gumbel_samples) / temperature) m = tf.nn.softmax(m) kl = -tf.reduce_max(logsm, axis=-1) if summary: tf.summary.histogram("max-log", tf.reshape(kl, [-1])) # Calculate the argmax and construct hot vectors. maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size)) # Add losses that prevent too few being used. distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) d_variance = tf.reduce_mean( tf.squared_difference(distrib, d_mean), axis=[0]) d_dev = -tf.reduce_mean(d_variance) ret = s if mode != tf.estimator.ModeKeys.TRAIN: ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval. return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002
python
def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None): """Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss. """ with tf.variable_scope(name, default_name="gumbel_softmax"): m = tf.layers.dense(x, 2**z_size, name="mask") if softmax_k > 0: m, kl = top_k_softmax(m, softmax_k) return m, m, 1.0 - tf.reduce_mean(kl) logsm = tf.nn.log_softmax(m) # Gumbel-softmax sample. gumbel_samples = gumbel_sample(common_layers.shape_list(m)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) s = tf.nn.softmax((logsm + gumbel_samples) / temperature) m = tf.nn.softmax(m) kl = -tf.reduce_max(logsm, axis=-1) if summary: tf.summary.histogram("max-log", tf.reshape(kl, [-1])) # Calculate the argmax and construct hot vectors. maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size)) # Add losses that prevent too few being used. distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) d_variance = tf.reduce_mean( tf.squared_difference(distrib, d_mean), axis=[0]) d_dev = -tf.reduce_mean(d_variance) ret = s if mode != tf.estimator.ModeKeys.TRAIN: ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval. return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002
[ "def", "gumbel_softmax", "(", "x", ",", "z_size", ",", "mode", ",", "softmax_k", "=", "0", ",", "temperature_warmup_steps", "=", "150000", ",", "summary", "=", "True", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"gumbel_softmax\"", ")", ":", "m", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "2", "**", "z_size", ",", "name", "=", "\"mask\"", ")", "if", "softmax_k", ">", "0", ":", "m", ",", "kl", "=", "top_k_softmax", "(", "m", ",", "softmax_k", ")", "return", "m", ",", "m", ",", "1.0", "-", "tf", ".", "reduce_mean", "(", "kl", ")", "logsm", "=", "tf", ".", "nn", ".", "log_softmax", "(", "m", ")", "# Gumbel-softmax sample.", "gumbel_samples", "=", "gumbel_sample", "(", "common_layers", ".", "shape_list", "(", "m", ")", ")", "steps", "=", "temperature_warmup_steps", "gumbel_samples", "*=", "common_layers", ".", "inverse_exp_decay", "(", "steps", "//", "5", ")", "*", "0.5", "temperature", "=", "1.2", "-", "common_layers", ".", "inverse_lin_decay", "(", "steps", ")", "# 10% of the time keep reasonably high temperature to keep learning.", "temperature", "=", "tf", ".", "cond", "(", "tf", ".", "less", "(", "tf", ".", "random_uniform", "(", "[", "]", ")", ",", "0.9", ")", ",", "lambda", ":", "temperature", ",", "lambda", ":", "tf", ".", "random_uniform", "(", "[", "]", ",", "minval", "=", "0.5", ",", "maxval", "=", "1.0", ")", ")", "s", "=", "tf", ".", "nn", ".", "softmax", "(", "(", "logsm", "+", "gumbel_samples", ")", "/", "temperature", ")", "m", "=", "tf", ".", "nn", ".", "softmax", "(", "m", ")", "kl", "=", "-", "tf", ".", "reduce_max", "(", "logsm", ",", "axis", "=", "-", "1", ")", "if", "summary", ":", "tf", ".", "summary", ".", "histogram", "(", "\"max-log\"", ",", "tf", ".", "reshape", "(", "kl", ",", "[", "-", "1", "]", ")", ")", "# Calculate the argmax and construct hot vectors.", "maxvec", "=", "tf", ".", "reshape", "(", "tf", ".", "argmax", "(", "m", ",", "axis", "=", "-", "1", ")", ",", "[", "-", "1", "]", ")", "maxvhot", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "one_hot", "(", "maxvec", ",", "2", "**", "z_size", ")", ")", "# Add losses that prevent too few being used.", "distrib", "=", "tf", ".", "reshape", "(", "logsm", ",", "[", "-", "1", ",", "2", "**", "z_size", "]", ")", "*", "maxvhot", "d_mean", "=", "tf", ".", "reduce_mean", "(", "distrib", ",", "axis", "=", "[", "0", "]", ",", "keep_dims", "=", "True", ")", "d_variance", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "distrib", ",", "d_mean", ")", ",", "axis", "=", "[", "0", "]", ")", "d_dev", "=", "-", "tf", ".", "reduce_mean", "(", "d_variance", ")", "ret", "=", "s", "if", "mode", "!=", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "ret", "=", "tf", ".", "reshape", "(", "maxvhot", ",", "common_layers", ".", "shape_list", "(", "s", ")", ")", "# Just hot @eval.", "return", "m", ",", "ret", ",", "d_dev", "*", "5.0", "+", "tf", ".", "reduce_mean", "(", "kl", ")", "*", "0.002" ]
Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss.
[ "Gumbel", "softmax", "discretization", "bottleneck", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L415-L475
21,938
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
vq_body
def vq_body(x, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Discretize each x into one of codebook_size codes.""" x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) x_means_hot, e_loss, distances = vq_nearest_neighbor( x, means, soft_em=soft_em, num_samples=num_samples, temperature=temperature) def loss_with_update(): """Update the ema variables and return loss triggering the update.""" updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_hot, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): return beta * e_loss # Loss, also do update if requested. if do_update: loss = loss_with_update() else: loss = tf.cond(do_update, loss_with_update, lambda: beta * e_loss) d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size]) return d, loss, distances
python
def vq_body(x, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Discretize each x into one of codebook_size codes.""" x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) x_means_hot, e_loss, distances = vq_nearest_neighbor( x, means, soft_em=soft_em, num_samples=num_samples, temperature=temperature) def loss_with_update(): """Update the ema variables and return loss triggering the update.""" updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_hot, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): return beta * e_loss # Loss, also do update if requested. if do_update: loss = loss_with_update() else: loss = tf.cond(do_update, loss_with_update, lambda: beta * e_loss) d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size]) return d, loss, distances
[ "def", "vq_body", "(", "x", ",", "codebook_size", ",", "beta", "=", "0.25", ",", "decay", "=", "0.999", ",", "epsilon", "=", "1e-5", ",", "soft_em", "=", "False", ",", "num_samples", "=", "10", ",", "temperature", "=", "None", ",", "do_update", "=", "True", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "hidden_size", "=", "x_shape", "[", "-", "1", "]", "means", ",", "ema_means", ",", "ema_count", "=", "get_vq_codebook", "(", "codebook_size", ",", "hidden_size", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "hidden_size", "]", ")", "x_means_hot", ",", "e_loss", ",", "distances", "=", "vq_nearest_neighbor", "(", "x", ",", "means", ",", "soft_em", "=", "soft_em", ",", "num_samples", "=", "num_samples", ",", "temperature", "=", "temperature", ")", "def", "loss_with_update", "(", ")", ":", "\"\"\"Update the ema variables and return loss triggering the update.\"\"\"", "updated_ema_count", "=", "moving_averages", ".", "assign_moving_average", "(", "ema_count", ",", "tf", ".", "reduce_sum", "(", "tf", ".", "reshape", "(", "x_means_hot", ",", "shape", "=", "[", "-", "1", ",", "codebook_size", "]", ")", ",", "axis", "=", "0", ")", ",", "decay", ",", "zero_debias", "=", "False", ")", "dw", "=", "tf", ".", "matmul", "(", "x_means_hot", ",", "x", ",", "transpose_a", "=", "True", ")", "updated_ema_means", "=", "tf", ".", "identity", "(", "moving_averages", ".", "assign_moving_average", "(", "ema_means", ",", "dw", ",", "decay", ",", "zero_debias", "=", "False", ")", ")", "n", "=", "tf", ".", "reduce_sum", "(", "updated_ema_count", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "updated_ema_count", "=", "(", "(", "updated_ema_count", "+", "epsilon", ")", "/", "(", "n", "+", "codebook_size", "*", "epsilon", ")", "*", "n", ")", "updated_ema_means", "/=", "tf", ".", "expand_dims", "(", "updated_ema_count", ",", "axis", "=", "-", "1", ")", "with", "tf", ".", "control_dependencies", "(", "[", "e_loss", "]", ")", ":", "update_means", "=", "means", ".", "assign", "(", "updated_ema_means", ")", "with", "tf", ".", "control_dependencies", "(", "[", "update_means", "]", ")", ":", "return", "beta", "*", "e_loss", "# Loss, also do update if requested.", "if", "do_update", ":", "loss", "=", "loss_with_update", "(", ")", "else", ":", "loss", "=", "tf", ".", "cond", "(", "do_update", ",", "loss_with_update", ",", "lambda", ":", "beta", "*", "e_loss", ")", "d", "=", "tf", ".", "reshape", "(", "x_means_hot", ",", "x_shape", "[", ":", "-", "1", "]", "+", "[", "codebook_size", "]", ")", "return", "d", ",", "loss", ",", "distances" ]
Discretize each x into one of codebook_size codes.
[ "Discretize", "each", "x", "into", "one", "of", "codebook_size", "codes", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L957-L1004
21,939
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
vq_loss
def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target """ x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[-1] means, _, _ = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) targets = tf.reshape(targets, [-1]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) discrete_x, code_loss, distances = vq_body( x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = -distances targets_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = x + tf.stop_gradient(x_means - x) discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) target_means = tf.reshape(target_means, target_shape + [hidden_size]) return discrete_x, x_means, target_means, code_loss, targets_loss
python
def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target """ x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[-1] means, _, _ = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) targets = tf.reshape(targets, [-1]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) discrete_x, code_loss, distances = vq_body( x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = -distances targets_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = x + tf.stop_gradient(x_means - x) discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) target_means = tf.reshape(target_means, target_shape + [hidden_size]) return discrete_x, x_means, target_means, code_loss, targets_loss
[ "def", "vq_loss", "(", "x", ",", "targets", ",", "codebook_size", ",", "beta", "=", "0.25", ",", "decay", "=", "0.999", ",", "epsilon", "=", "1e-5", ",", "soft_em", "=", "False", ",", "num_samples", "=", "10", ",", "temperature", "=", "None", ",", "do_update", "=", "True", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "target_shape", "=", "common_layers", ".", "shape_list", "(", "targets", ")", "hidden_size", "=", "x_shape", "[", "-", "1", "]", "means", ",", "_", ",", "_", "=", "get_vq_codebook", "(", "codebook_size", ",", "hidden_size", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "hidden_size", "]", ")", "targets", "=", "tf", ".", "reshape", "(", "targets", ",", "[", "-", "1", "]", ")", "one_hot_targets", "=", "tf", ".", "one_hot", "(", "targets", ",", "codebook_size", ")", "target_means", "=", "tf", ".", "matmul", "(", "one_hot_targets", ",", "means", ")", "discrete_x", ",", "code_loss", ",", "distances", "=", "vq_body", "(", "x", ",", "codebook_size", ",", "beta", "=", "beta", ",", "decay", "=", "decay", ",", "epsilon", "=", "epsilon", ",", "soft_em", "=", "soft_em", ",", "num_samples", "=", "num_samples", ",", "temperature", "=", "temperature", ",", "do_update", "=", "do_update", ")", "logits", "=", "-", "distances", "targets_loss", "=", "tf", ".", "losses", ".", "sparse_softmax_cross_entropy", "(", "logits", "=", "logits", ",", "labels", "=", "targets", ")", "targets_loss", "=", "tf", ".", "reduce_mean", "(", "targets_loss", ")", "x_means", "=", "tf", ".", "matmul", "(", "discrete_x", ",", "means", ")", "x_means", "=", "x", "+", "tf", ".", "stop_gradient", "(", "x_means", "-", "x", ")", "discrete_x", "=", "tf", ".", "reshape", "(", "discrete_x", ",", "x_shape", "[", ":", "-", "1", "]", "+", "[", "codebook_size", "]", ")", "target_means", "=", "tf", ".", "reshape", "(", "target_means", ",", "target_shape", "+", "[", "hidden_size", "]", ")", "return", "discrete_x", ",", "x_means", ",", "target_means", ",", "code_loss", ",", "targets_loss" ]
Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target
[ "Compute", "the", "loss", "of", "large", "vocab", "tensors", "using", "a", "VQAE", "codebook", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1007-L1071
21,940
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
tanh_discrete_bottleneck
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode): """Simple discretization through tanh, flip bottleneck_noise many bits.""" x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck") d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0 if mode == tf.estimator.ModeKeys.TRAIN: x += tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=0.2) x = tf.tanh(x) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) if mode == tf.estimator.ModeKeys.TRAIN: noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix(d, x, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN) return d, d0
python
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode): """Simple discretization through tanh, flip bottleneck_noise many bits.""" x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck") d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0 if mode == tf.estimator.ModeKeys.TRAIN: x += tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=0.2) x = tf.tanh(x) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) if mode == tf.estimator.ModeKeys.TRAIN: noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix(d, x, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN) return d, d0
[ "def", "tanh_discrete_bottleneck", "(", "x", ",", "bottleneck_bits", ",", "bottleneck_noise", ",", "discretize_warmup_steps", ",", "mode", ")", ":", "x", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "bottleneck_bits", ",", "name", "=", "\"tanh_discrete_bottleneck\"", ")", "d0", "=", "tf", ".", "stop_gradient", "(", "2.0", "*", "tf", ".", "to_float", "(", "tf", ".", "less", "(", "0.0", ",", "x", ")", ")", ")", "-", "1.0", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "x", "+=", "tf", ".", "truncated_normal", "(", "common_layers", ".", "shape_list", "(", "x", ")", ",", "mean", "=", "0.0", ",", "stddev", "=", "0.2", ")", "x", "=", "tf", ".", "tanh", "(", "x", ")", "d", "=", "x", "+", "tf", ".", "stop_gradient", "(", "2.0", "*", "tf", ".", "to_float", "(", "tf", ".", "less", "(", "0.0", ",", "x", ")", ")", "-", "1.0", "-", "x", ")", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "noise", "=", "tf", ".", "random_uniform", "(", "common_layers", ".", "shape_list", "(", "x", ")", ")", "noise", "=", "2.0", "*", "tf", ".", "to_float", "(", "tf", ".", "less", "(", "bottleneck_noise", ",", "noise", ")", ")", "-", "1.0", "d", "*=", "noise", "d", "=", "common_layers", ".", "mix", "(", "d", ",", "x", ",", "discretize_warmup_steps", ",", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "return", "d", ",", "d0" ]
Simple discretization through tanh, flip bottleneck_noise many bits.
[ "Simple", "discretization", "through", "tanh", "flip", "bottleneck_noise", "many", "bits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1374-L1390
21,941
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
tanh_discrete_unbottleneck
def tanh_discrete_unbottleneck(x, hidden_size): """Simple un-discretization from tanh.""" x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck") return x
python
def tanh_discrete_unbottleneck(x, hidden_size): """Simple un-discretization from tanh.""" x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck") return x
[ "def", "tanh_discrete_unbottleneck", "(", "x", ",", "hidden_size", ")", ":", "x", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "hidden_size", ",", "name", "=", "\"tanh_discrete_unbottleneck\"", ")", "return", "x" ]
Simple un-discretization from tanh.
[ "Simple", "un", "-", "discretization", "from", "tanh", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1393-L1396
21,942
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
isemhash_bottleneck
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode, isemhash_noise_dev=0.5, isemhash_mix_prob=0.5): """Improved semantic hashing bottleneck.""" with tf.variable_scope("isemhash_bottleneck"): x = tf.layers.dense(x, bottleneck_bits, name="dense") y = common_layers.saturating_sigmoid(x) if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN: noise = tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev) y = common_layers.saturating_sigmoid(x + noise) d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y) d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1]. if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits. noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix( d, 2.0 * y - 1.0, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN, max_prob=isemhash_mix_prob) return d, 0.0
python
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode, isemhash_noise_dev=0.5, isemhash_mix_prob=0.5): """Improved semantic hashing bottleneck.""" with tf.variable_scope("isemhash_bottleneck"): x = tf.layers.dense(x, bottleneck_bits, name="dense") y = common_layers.saturating_sigmoid(x) if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN: noise = tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev) y = common_layers.saturating_sigmoid(x + noise) d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y) d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1]. if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits. noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix( d, 2.0 * y - 1.0, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN, max_prob=isemhash_mix_prob) return d, 0.0
[ "def", "isemhash_bottleneck", "(", "x", ",", "bottleneck_bits", ",", "bottleneck_noise", ",", "discretize_warmup_steps", ",", "mode", ",", "isemhash_noise_dev", "=", "0.5", ",", "isemhash_mix_prob", "=", "0.5", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"isemhash_bottleneck\"", ")", ":", "x", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "bottleneck_bits", ",", "name", "=", "\"dense\"", ")", "y", "=", "common_layers", ".", "saturating_sigmoid", "(", "x", ")", "if", "isemhash_noise_dev", ">", "0", "and", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "noise", "=", "tf", ".", "truncated_normal", "(", "common_layers", ".", "shape_list", "(", "x", ")", ",", "mean", "=", "0.0", ",", "stddev", "=", "isemhash_noise_dev", ")", "y", "=", "common_layers", ".", "saturating_sigmoid", "(", "x", "+", "noise", ")", "d", "=", "tf", ".", "to_float", "(", "tf", ".", "less", "(", "0.5", ",", "y", ")", ")", "+", "y", "-", "tf", ".", "stop_gradient", "(", "y", ")", "d", "=", "2.0", "*", "d", "-", "1.0", "# Move from [0, 1] to [-1, 1].", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "# Flip some bits.", "noise", "=", "tf", ".", "random_uniform", "(", "common_layers", ".", "shape_list", "(", "x", ")", ")", "noise", "=", "2.0", "*", "tf", ".", "to_float", "(", "tf", ".", "less", "(", "bottleneck_noise", ",", "noise", ")", ")", "-", "1.0", "d", "*=", "noise", "d", "=", "common_layers", ".", "mix", "(", "d", ",", "2.0", "*", "y", "-", "1.0", ",", "discretize_warmup_steps", ",", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ",", "max_prob", "=", "isemhash_mix_prob", ")", "return", "d", ",", "0.0" ]
Improved semantic hashing bottleneck.
[ "Improved", "semantic", "hashing", "bottleneck", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1399-L1426
21,943
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
isemhash_unbottleneck
def isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0): """Improved semantic hashing un-bottleneck.""" filter_size = int(hidden_size * isemhash_filter_size_multiplier) x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1]. with tf.variable_scope("isemhash_unbottleneck"): h1a = tf.layers.dense(x, filter_size, name="hidden1a") h1b = tf.layers.dense(1.0 - x, filter_size, name="hidden1b") h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name="hidden2") return tf.layers.dense(tf.nn.relu(h2), hidden_size, name="final")
python
def isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0): """Improved semantic hashing un-bottleneck.""" filter_size = int(hidden_size * isemhash_filter_size_multiplier) x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1]. with tf.variable_scope("isemhash_unbottleneck"): h1a = tf.layers.dense(x, filter_size, name="hidden1a") h1b = tf.layers.dense(1.0 - x, filter_size, name="hidden1b") h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name="hidden2") return tf.layers.dense(tf.nn.relu(h2), hidden_size, name="final")
[ "def", "isemhash_unbottleneck", "(", "x", ",", "hidden_size", ",", "isemhash_filter_size_multiplier", "=", "1.0", ")", ":", "filter_size", "=", "int", "(", "hidden_size", "*", "isemhash_filter_size_multiplier", ")", "x", "=", "0.5", "*", "(", "x", "-", "1.0", ")", "# Move from [-1, 1] to [0, 1].", "with", "tf", ".", "variable_scope", "(", "\"isemhash_unbottleneck\"", ")", ":", "h1a", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "filter_size", ",", "name", "=", "\"hidden1a\"", ")", "h1b", "=", "tf", ".", "layers", ".", "dense", "(", "1.0", "-", "x", ",", "filter_size", ",", "name", "=", "\"hidden1b\"", ")", "h2", "=", "tf", ".", "layers", ".", "dense", "(", "tf", ".", "nn", ".", "relu", "(", "h1a", "+", "h1b", ")", ",", "filter_size", ",", "name", "=", "\"hidden2\"", ")", "return", "tf", ".", "layers", ".", "dense", "(", "tf", ".", "nn", ".", "relu", "(", "h2", ")", ",", "hidden_size", ",", "name", "=", "\"final\"", ")" ]
Improved semantic hashing un-bottleneck.
[ "Improved", "semantic", "hashing", "un", "-", "bottleneck", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1429-L1437
21,944
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
parametrized_bottleneck
def parametrized_bottleneck(x, hparams): """Meta-function calling all the above bottlenecks with hparams.""" if hparams.bottleneck_kind == "tanh_discrete": d, _ = tanh_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode) return d, 0.0 if hparams.bottleneck_kind == "isemhash": return isemhash_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode, hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) if hparams.bottleneck_kind == "vq": return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon) if hparams.bottleneck_kind == "em": return vq_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, soft_em=True, num_samples=hparams.vq_num_samples) if hparams.bottleneck_kind == "gumbel_softmax": return gumbel_softmax_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, hparams.temperature_warmup_steps, hard=False, summary=True) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
python
def parametrized_bottleneck(x, hparams): """Meta-function calling all the above bottlenecks with hparams.""" if hparams.bottleneck_kind == "tanh_discrete": d, _ = tanh_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode) return d, 0.0 if hparams.bottleneck_kind == "isemhash": return isemhash_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode, hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) if hparams.bottleneck_kind == "vq": return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon) if hparams.bottleneck_kind == "em": return vq_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, soft_em=True, num_samples=hparams.vq_num_samples) if hparams.bottleneck_kind == "gumbel_softmax": return gumbel_softmax_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, hparams.temperature_warmup_steps, hard=False, summary=True) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
[ "def", "parametrized_bottleneck", "(", "x", ",", "hparams", ")", ":", "if", "hparams", ".", "bottleneck_kind", "==", "\"tanh_discrete\"", ":", "d", ",", "_", "=", "tanh_discrete_bottleneck", "(", "x", ",", "hparams", ".", "bottleneck_bits", ",", "hparams", ".", "bottleneck_noise", "*", "0.5", ",", "hparams", ".", "discretize_warmup_steps", ",", "hparams", ".", "mode", ")", "return", "d", ",", "0.0", "if", "hparams", ".", "bottleneck_kind", "==", "\"isemhash\"", ":", "return", "isemhash_bottleneck", "(", "x", ",", "hparams", ".", "bottleneck_bits", ",", "hparams", ".", "bottleneck_noise", "*", "0.5", ",", "hparams", ".", "discretize_warmup_steps", ",", "hparams", ".", "mode", ",", "hparams", ".", "isemhash_noise_dev", ",", "hparams", ".", "isemhash_mix_prob", ")", "if", "hparams", ".", "bottleneck_kind", "==", "\"vq\"", ":", "return", "vq_discrete_bottleneck", "(", "x", ",", "hparams", ".", "bottleneck_bits", ",", "hparams", ".", "vq_beta", ",", "hparams", ".", "vq_decay", ",", "hparams", ".", "vq_epsilon", ")", "if", "hparams", ".", "bottleneck_kind", "==", "\"em\"", ":", "return", "vq_discrete_bottleneck", "(", "x", ",", "hparams", ".", "bottleneck_bits", ",", "hparams", ".", "vq_beta", ",", "hparams", ".", "vq_decay", ",", "hparams", ".", "vq_epsilon", ",", "soft_em", "=", "True", ",", "num_samples", "=", "hparams", ".", "vq_num_samples", ")", "if", "hparams", ".", "bottleneck_kind", "==", "\"gumbel_softmax\"", ":", "return", "gumbel_softmax_discrete_bottleneck", "(", "x", ",", "hparams", ".", "bottleneck_bits", ",", "hparams", ".", "vq_beta", ",", "hparams", ".", "vq_decay", ",", "hparams", ".", "vq_epsilon", ",", "hparams", ".", "temperature_warmup_steps", ",", "hard", "=", "False", ",", "summary", "=", "True", ")", "raise", "ValueError", "(", "\"Unsupported hparams.bottleneck_kind %s\"", "%", "hparams", ".", "bottleneck_kind", ")" ]
Meta-function calling all the above bottlenecks with hparams.
[ "Meta", "-", "function", "calling", "all", "the", "above", "bottlenecks", "with", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1440-L1476
21,945
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
parametrized_unbottleneck
def parametrized_unbottleneck(x, hidden_size, hparams): """Meta-function calling all the above un-bottlenecks with hparams.""" if hparams.bottleneck_kind == "tanh_discrete": return tanh_discrete_unbottleneck(x, hidden_size) if hparams.bottleneck_kind == "isemhash": return isemhash_unbottleneck(x, hidden_size, hparams.isemhash_filter_size_multiplier) if hparams.bottleneck_kind in ["vq", "em", "gumbel_softmax"]: return vq_discrete_unbottleneck(x, hidden_size) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
python
def parametrized_unbottleneck(x, hidden_size, hparams): """Meta-function calling all the above un-bottlenecks with hparams.""" if hparams.bottleneck_kind == "tanh_discrete": return tanh_discrete_unbottleneck(x, hidden_size) if hparams.bottleneck_kind == "isemhash": return isemhash_unbottleneck(x, hidden_size, hparams.isemhash_filter_size_multiplier) if hparams.bottleneck_kind in ["vq", "em", "gumbel_softmax"]: return vq_discrete_unbottleneck(x, hidden_size) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
[ "def", "parametrized_unbottleneck", "(", "x", ",", "hidden_size", ",", "hparams", ")", ":", "if", "hparams", ".", "bottleneck_kind", "==", "\"tanh_discrete\"", ":", "return", "tanh_discrete_unbottleneck", "(", "x", ",", "hidden_size", ")", "if", "hparams", ".", "bottleneck_kind", "==", "\"isemhash\"", ":", "return", "isemhash_unbottleneck", "(", "x", ",", "hidden_size", ",", "hparams", ".", "isemhash_filter_size_multiplier", ")", "if", "hparams", ".", "bottleneck_kind", "in", "[", "\"vq\"", ",", "\"em\"", ",", "\"gumbel_softmax\"", "]", ":", "return", "vq_discrete_unbottleneck", "(", "x", ",", "hidden_size", ")", "raise", "ValueError", "(", "\"Unsupported hparams.bottleneck_kind %s\"", "%", "hparams", ".", "bottleneck_kind", ")" ]
Meta-function calling all the above un-bottlenecks with hparams.
[ "Meta", "-", "function", "calling", "all", "the", "above", "un", "-", "bottlenecks", "with", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1479-L1489
21,946
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
iaf_hparams
def iaf_hparams(hidden_size=512, filter_size=4096): """Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows. """ hparams = common_hparams.basic_params1() # Attention hyperparameters. hparams.hidden_size = hidden_size hparams.add_hparam("attention_key_channels", None) hparams.add_hparam("attention_value_channels", None) hparams.add_hparam("num_heads", 4) hparams.add_hparam("attention_dropout", 0.1) hparams.add_hparam("shared_rel", False) hparams.add_hparam("block_width", 1) hparams.add_hparam("block_length", 1) hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) # Preprocessing and postprocesing hyperparameters. hparams.layer_preprocess_sequence = "n" hparams.layer_prepostprocess_dropout = 0.1 hparams.norm_type = "layer" hparams.norm_epsilon = 1e-06 hparams.layer_prepostprocess_dropout_broadcast_dims = "" hparams.layer_postprocess_sequence = "da" # Feedforward neural network hyperparameters. hparams.add_hparam("filter_size", filter_size) hparams.add_hparam("ffn_layer", "conv_hidden_relu") hparams.add_hparam("relu_dropout", 0.1) return hparams
python
def iaf_hparams(hidden_size=512, filter_size=4096): """Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows. """ hparams = common_hparams.basic_params1() # Attention hyperparameters. hparams.hidden_size = hidden_size hparams.add_hparam("attention_key_channels", None) hparams.add_hparam("attention_value_channels", None) hparams.add_hparam("num_heads", 4) hparams.add_hparam("attention_dropout", 0.1) hparams.add_hparam("shared_rel", False) hparams.add_hparam("block_width", 1) hparams.add_hparam("block_length", 1) hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) # Preprocessing and postprocesing hyperparameters. hparams.layer_preprocess_sequence = "n" hparams.layer_prepostprocess_dropout = 0.1 hparams.norm_type = "layer" hparams.norm_epsilon = 1e-06 hparams.layer_prepostprocess_dropout_broadcast_dims = "" hparams.layer_postprocess_sequence = "da" # Feedforward neural network hyperparameters. hparams.add_hparam("filter_size", filter_size) hparams.add_hparam("ffn_layer", "conv_hidden_relu") hparams.add_hparam("relu_dropout", 0.1) return hparams
[ "def", "iaf_hparams", "(", "hidden_size", "=", "512", ",", "filter_size", "=", "4096", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "# Attention hyperparameters.", "hparams", ".", "hidden_size", "=", "hidden_size", "hparams", ".", "add_hparam", "(", "\"attention_key_channels\"", ",", "None", ")", "hparams", ".", "add_hparam", "(", "\"attention_value_channels\"", ",", "None", ")", "hparams", ".", "add_hparam", "(", "\"num_heads\"", ",", "4", ")", "hparams", ".", "add_hparam", "(", "\"attention_dropout\"", ",", "0.1", ")", "hparams", ".", "add_hparam", "(", "\"shared_rel\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"block_width\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"block_length\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"q_filter_width\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"kv_filter_width\"", ",", "1", ")", "# Preprocessing and postprocesing hyperparameters.", "hparams", ".", "layer_preprocess_sequence", "=", "\"n\"", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "hparams", ".", "norm_type", "=", "\"layer\"", "hparams", ".", "norm_epsilon", "=", "1e-06", "hparams", ".", "layer_prepostprocess_dropout_broadcast_dims", "=", "\"\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"da\"", "# Feedforward neural network hyperparameters.", "hparams", ".", "add_hparam", "(", "\"filter_size\"", ",", "filter_size", ")", "hparams", ".", "add_hparam", "(", "\"ffn_layer\"", ",", "\"conv_hidden_relu\"", ")", "hparams", ".", "add_hparam", "(", "\"relu_dropout\"", ",", "0.1", ")", "return", "hparams" ]
Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows.
[ "Create", "hyperpameters", "for", "inverse", "autoregressive", "flows", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1492-L1528
21,947
tensorflow/tensor2tensor
tensor2tensor/data_generators/lm1b.py
_original_vocab
def _original_vocab(tmp_dir): """Returns a set containing the original vocabulary. This is important for comparing with published results. Args: tmp_dir: directory containing dataset. Returns: a set of strings """ vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/" "vocab-2016-09-10.txt") vocab_filename = os.path.basename(vocab_url + ".en") vocab_filepath = os.path.join(tmp_dir, vocab_filename) if not os.path.exists(vocab_filepath): generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url) return set([ text_encoder.native_to_unicode(l.strip()) for l in tf.gfile.Open(vocab_filepath) ])
python
def _original_vocab(tmp_dir): """Returns a set containing the original vocabulary. This is important for comparing with published results. Args: tmp_dir: directory containing dataset. Returns: a set of strings """ vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/" "vocab-2016-09-10.txt") vocab_filename = os.path.basename(vocab_url + ".en") vocab_filepath = os.path.join(tmp_dir, vocab_filename) if not os.path.exists(vocab_filepath): generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url) return set([ text_encoder.native_to_unicode(l.strip()) for l in tf.gfile.Open(vocab_filepath) ])
[ "def", "_original_vocab", "(", "tmp_dir", ")", ":", "vocab_url", "=", "(", "\"http://download.tensorflow.org/models/LM_LSTM_CNN/\"", "\"vocab-2016-09-10.txt\"", ")", "vocab_filename", "=", "os", ".", "path", ".", "basename", "(", "vocab_url", "+", "\".en\"", ")", "vocab_filepath", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "vocab_filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "vocab_filepath", ")", ":", "generator_utils", ".", "maybe_download", "(", "tmp_dir", ",", "vocab_filename", ",", "vocab_url", ")", "return", "set", "(", "[", "text_encoder", ".", "native_to_unicode", "(", "l", ".", "strip", "(", ")", ")", "for", "l", "in", "tf", ".", "gfile", ".", "Open", "(", "vocab_filepath", ")", "]", ")" ]
Returns a set containing the original vocabulary. This is important for comparing with published results. Args: tmp_dir: directory containing dataset. Returns: a set of strings
[ "Returns", "a", "set", "containing", "the", "original", "vocabulary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/lm1b.py#L35-L55
21,948
tensorflow/tensor2tensor
tensor2tensor/data_generators/lm1b.py
_replace_oov
def _replace_oov(original_vocab, line): """Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words. """ return u" ".join( [word if word in original_vocab else u"UNK" for word in line.split()])
python
def _replace_oov(original_vocab, line): """Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words. """ return u" ".join( [word if word in original_vocab else u"UNK" for word in line.split()])
[ "def", "_replace_oov", "(", "original_vocab", ",", "line", ")", ":", "return", "u\" \"", ".", "join", "(", "[", "word", "if", "word", "in", "original_vocab", "else", "u\"UNK\"", "for", "word", "in", "line", ".", "split", "(", ")", "]", ")" ]
Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words.
[ "Replace", "out", "-", "of", "-", "vocab", "words", "with", "UNK", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/lm1b.py#L58-L71
21,949
tensorflow/tensor2tensor
tensor2tensor/models/research/cycle_gan.py
lossfn
def lossfn(real_input, fake_input, compress, hparams, lsgan, name): """Loss function.""" eps = 1e-12 with tf.variable_scope(name): d1 = discriminator(real_input, compress, hparams, "discriminator") d2 = discriminator(fake_input, compress, hparams, "discriminator", reuse=True) if lsgan: dloss = tf.reduce_mean( tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2)) gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9)) loss = (dloss + gloss)/2 else: # cross_entropy dloss = -tf.reduce_mean( tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2)) gloss = -tf.reduce_mean(tf.log(d2 + eps)) loss = (dloss + gloss)/2 return loss
python
def lossfn(real_input, fake_input, compress, hparams, lsgan, name): """Loss function.""" eps = 1e-12 with tf.variable_scope(name): d1 = discriminator(real_input, compress, hparams, "discriminator") d2 = discriminator(fake_input, compress, hparams, "discriminator", reuse=True) if lsgan: dloss = tf.reduce_mean( tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2)) gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9)) loss = (dloss + gloss)/2 else: # cross_entropy dloss = -tf.reduce_mean( tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2)) gloss = -tf.reduce_mean(tf.log(d2 + eps)) loss = (dloss + gloss)/2 return loss
[ "def", "lossfn", "(", "real_input", ",", "fake_input", ",", "compress", ",", "hparams", ",", "lsgan", ",", "name", ")", ":", "eps", "=", "1e-12", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "d1", "=", "discriminator", "(", "real_input", ",", "compress", ",", "hparams", ",", "\"discriminator\"", ")", "d2", "=", "discriminator", "(", "fake_input", ",", "compress", ",", "hparams", ",", "\"discriminator\"", ",", "reuse", "=", "True", ")", "if", "lsgan", ":", "dloss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "d1", ",", "0.9", ")", ")", "+", "tf", ".", "reduce_mean", "(", "tf", ".", "square", "(", "d2", ")", ")", "gloss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "d2", ",", "0.9", ")", ")", "loss", "=", "(", "dloss", "+", "gloss", ")", "/", "2", "else", ":", "# cross_entropy", "dloss", "=", "-", "tf", ".", "reduce_mean", "(", "tf", ".", "log", "(", "d1", "+", "eps", ")", ")", "-", "tf", ".", "reduce_mean", "(", "tf", ".", "log1p", "(", "eps", "-", "d2", ")", ")", "gloss", "=", "-", "tf", ".", "reduce_mean", "(", "tf", ".", "log", "(", "d2", "+", "eps", ")", ")", "loss", "=", "(", "dloss", "+", "gloss", ")", "/", "2", "return", "loss" ]
Loss function.
[ "Loss", "function", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/cycle_gan.py#L46-L63
21,950
tensorflow/tensor2tensor
tensor2tensor/models/research/cycle_gan.py
cycle_gan_internal
def cycle_gan_internal(inputs, targets, _, hparams): """Cycle GAN, main step used for training.""" with tf.variable_scope("cycle_gan"): # Embed inputs and targets. inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) inputs = common_layers.embedding( inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, "embed", reuse=True) x, _ = split_on_batch(inputs) _, y = split_on_batch(targets) # Y --> X y_fake = generator(y, hparams, "Fy", reuse=False) y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") # X --> Y x_fake = generator(x, hparams, "Gx", reuse=False) x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") # Cycle-Consistency y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( tf.abs(x_fake_ - x)) y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( tf.abs(y_fake_ - y)) cycloss = x_to_x_loss + y_to_y_loss sample_generated = generator(inputs, hparams, "Gx", reuse=True) sample_generated = tf.layers.dense( sample_generated, hparams.vocab_size, name="softmax", reuse=None) sample_generated = tf.stop_gradient( tf.expand_dims(sample_generated, axis=2)) losses = {"cycloss": cycloss, "y_to_x_loss": y_to_x_loss, "x_to_y_loss": x_to_y_loss} return sample_generated, losses
python
def cycle_gan_internal(inputs, targets, _, hparams): """Cycle GAN, main step used for training.""" with tf.variable_scope("cycle_gan"): # Embed inputs and targets. inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) inputs = common_layers.embedding( inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, "embed", reuse=True) x, _ = split_on_batch(inputs) _, y = split_on_batch(targets) # Y --> X y_fake = generator(y, hparams, "Fy", reuse=False) y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") # X --> Y x_fake = generator(x, hparams, "Gx", reuse=False) x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") # Cycle-Consistency y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( tf.abs(x_fake_ - x)) y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( tf.abs(y_fake_ - y)) cycloss = x_to_x_loss + y_to_y_loss sample_generated = generator(inputs, hparams, "Gx", reuse=True) sample_generated = tf.layers.dense( sample_generated, hparams.vocab_size, name="softmax", reuse=None) sample_generated = tf.stop_gradient( tf.expand_dims(sample_generated, axis=2)) losses = {"cycloss": cycloss, "y_to_x_loss": y_to_x_loss, "x_to_y_loss": x_to_y_loss} return sample_generated, losses
[ "def", "cycle_gan_internal", "(", "inputs", ",", "targets", ",", "_", ",", "hparams", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"cycle_gan\"", ")", ":", "# Embed inputs and targets.", "inputs_orig", ",", "targets_orig", "=", "tf", ".", "to_int32", "(", "inputs", ")", ",", "tf", ".", "to_int32", "(", "targets", ")", "inputs", "=", "common_layers", ".", "embedding", "(", "inputs_orig", ",", "hparams", ".", "vocab_size", ",", "hparams", ".", "hidden_size", ",", "\"embed\"", ")", "targets", "=", "common_layers", ".", "embedding", "(", "targets_orig", ",", "hparams", ".", "vocab_size", ",", "hparams", ".", "hidden_size", ",", "\"embed\"", ",", "reuse", "=", "True", ")", "x", ",", "_", "=", "split_on_batch", "(", "inputs", ")", "_", ",", "y", "=", "split_on_batch", "(", "targets", ")", "# Y --> X", "y_fake", "=", "generator", "(", "y", ",", "hparams", ",", "\"Fy\"", ",", "reuse", "=", "False", ")", "y_to_x_loss", "=", "lossfn", "(", "y", ",", "y_fake", ",", "True", ",", "hparams", ",", "True", ",", "\"YtoX\"", ")", "# X --> Y", "x_fake", "=", "generator", "(", "x", ",", "hparams", ",", "\"Gx\"", ",", "reuse", "=", "False", ")", "x_to_y_loss", "=", "lossfn", "(", "y", ",", "x_fake", ",", "True", ",", "hparams", ",", "True", ",", "\"XtoY\"", ")", "# Cycle-Consistency", "y_fake_", "=", "generator", "(", "y_fake", ",", "hparams", ",", "\"Gx\"", ",", "reuse", "=", "True", ")", "x_fake_", "=", "generator", "(", "x_fake", ",", "hparams", ",", "\"Fy\"", ",", "reuse", "=", "True", ")", "x_to_x_loss", "=", "hparams", ".", "cycle_loss_multiplier1", "*", "tf", ".", "reduce_mean", "(", "tf", ".", "abs", "(", "x_fake_", "-", "x", ")", ")", "y_to_y_loss", "=", "hparams", ".", "cycle_loss_multiplier2", "*", "tf", ".", "reduce_mean", "(", "tf", ".", "abs", "(", "y_fake_", "-", "y", ")", ")", "cycloss", "=", "x_to_x_loss", "+", "y_to_y_loss", "sample_generated", "=", "generator", "(", "inputs", ",", "hparams", ",", "\"Gx\"", ",", "reuse", "=", "True", ")", "sample_generated", "=", "tf", ".", "layers", ".", "dense", "(", "sample_generated", ",", "hparams", ".", "vocab_size", ",", "name", "=", "\"softmax\"", ",", "reuse", "=", "None", ")", "sample_generated", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "expand_dims", "(", "sample_generated", ",", "axis", "=", "2", ")", ")", "losses", "=", "{", "\"cycloss\"", ":", "cycloss", ",", "\"y_to_x_loss\"", ":", "y_to_x_loss", ",", "\"x_to_y_loss\"", ":", "x_to_y_loss", "}", "return", "sample_generated", ",", "losses" ]
Cycle GAN, main step used for training.
[ "Cycle", "GAN", "main", "step", "used", "for", "training", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/cycle_gan.py#L72-L113
21,951
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
decode_hparams
def decode_hparams(overrides=""): """Hparams for decoding.""" hparams = decoding.decode_hparams() # Number of interpolations between [0.0, 1.0]. hparams.add_hparam("num_interp", 11) # Which level(s) to interpolate. hparams.add_hparam("level_interp", [0, 1, 2]) # "all" or "ranked", interpolate all channels or a "ranked". hparams.add_hparam("channel_interp", "all") # interpolate channels ranked according to squared L2 norm. hparams.add_hparam("rank_interp", 1) # Whether on not to save frames as summaries hparams.add_hparam("save_frames", True) hparams.parse(overrides) return hparams
python
def decode_hparams(overrides=""): """Hparams for decoding.""" hparams = decoding.decode_hparams() # Number of interpolations between [0.0, 1.0]. hparams.add_hparam("num_interp", 11) # Which level(s) to interpolate. hparams.add_hparam("level_interp", [0, 1, 2]) # "all" or "ranked", interpolate all channels or a "ranked". hparams.add_hparam("channel_interp", "all") # interpolate channels ranked according to squared L2 norm. hparams.add_hparam("rank_interp", 1) # Whether on not to save frames as summaries hparams.add_hparam("save_frames", True) hparams.parse(overrides) return hparams
[ "def", "decode_hparams", "(", "overrides", "=", "\"\"", ")", ":", "hparams", "=", "decoding", ".", "decode_hparams", "(", ")", "# Number of interpolations between [0.0, 1.0].", "hparams", ".", "add_hparam", "(", "\"num_interp\"", ",", "11", ")", "# Which level(s) to interpolate.", "hparams", ".", "add_hparam", "(", "\"level_interp\"", ",", "[", "0", ",", "1", ",", "2", "]", ")", "# \"all\" or \"ranked\", interpolate all channels or a \"ranked\".", "hparams", ".", "add_hparam", "(", "\"channel_interp\"", ",", "\"all\"", ")", "# interpolate channels ranked according to squared L2 norm.", "hparams", ".", "add_hparam", "(", "\"rank_interp\"", ",", "1", ")", "# Whether on not to save frames as summaries", "hparams", ".", "add_hparam", "(", "\"save_frames\"", ",", "True", ")", "hparams", ".", "parse", "(", "overrides", ")", "return", "hparams" ]
Hparams for decoding.
[ "Hparams", "for", "decoding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L53-L67
21,952
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
preprocess_frame
def preprocess_frame(frame): """Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5] """ # Normalize from [0.0, 1.0] -> [-0.5, 0.5] frame = common_layers.convert_rgb_to_real(frame) frame = frame - 0.5 frame, _ = glow_ops.uniform_binning_correction(frame) return frame
python
def preprocess_frame(frame): """Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5] """ # Normalize from [0.0, 1.0] -> [-0.5, 0.5] frame = common_layers.convert_rgb_to_real(frame) frame = frame - 0.5 frame, _ = glow_ops.uniform_binning_correction(frame) return frame
[ "def", "preprocess_frame", "(", "frame", ")", ":", "# Normalize from [0.0, 1.0] -> [-0.5, 0.5]", "frame", "=", "common_layers", ".", "convert_rgb_to_real", "(", "frame", ")", "frame", "=", "frame", "-", "0.5", "frame", ",", "_", "=", "glow_ops", ".", "uniform_binning_correction", "(", "frame", ")", "return", "frame" ]
Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5]
[ "Preprocess", "frame", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L70-L85
21,953
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
frame_to_latents
def frame_to_latents(frame, hparams): """Encode frames to latents.""" # Preprocess frame = preprocess_frame(frame) # Encode [X_t] to [z^1_t, z^2_t .. z^l_t] glow_vals = glow_ops.encoder_decoder( "codec", frame, hparams, eps=None, reverse=False) z_top, _, level_eps, _, _ = glow_vals return z_top, level_eps
python
def frame_to_latents(frame, hparams): """Encode frames to latents.""" # Preprocess frame = preprocess_frame(frame) # Encode [X_t] to [z^1_t, z^2_t .. z^l_t] glow_vals = glow_ops.encoder_decoder( "codec", frame, hparams, eps=None, reverse=False) z_top, _, level_eps, _, _ = glow_vals return z_top, level_eps
[ "def", "frame_to_latents", "(", "frame", ",", "hparams", ")", ":", "# Preprocess", "frame", "=", "preprocess_frame", "(", "frame", ")", "# Encode [X_t] to [z^1_t, z^2_t .. z^l_t]", "glow_vals", "=", "glow_ops", ".", "encoder_decoder", "(", "\"codec\"", ",", "frame", ",", "hparams", ",", "eps", "=", "None", ",", "reverse", "=", "False", ")", "z_top", ",", "_", ",", "level_eps", ",", "_", ",", "_", "=", "glow_vals", "return", "z_top", ",", "level_eps" ]
Encode frames to latents.
[ "Encode", "frames", "to", "latents", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L88-L97
21,954
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
latents_to_frames
def latents_to_frames(z_top_interp, level_eps_interp, hparams): """Decodes latents to frames.""" # Decode [z^1_t, z^2_t .. z^l_t] to [X_t] images, _, _, _ = glow_ops.encoder_decoder( "codec", z_top_interp, hparams, eps=level_eps_interp, reverse=True) images = glow_ops.postprocess(images) return images
python
def latents_to_frames(z_top_interp, level_eps_interp, hparams): """Decodes latents to frames.""" # Decode [z^1_t, z^2_t .. z^l_t] to [X_t] images, _, _, _ = glow_ops.encoder_decoder( "codec", z_top_interp, hparams, eps=level_eps_interp, reverse=True) images = glow_ops.postprocess(images) return images
[ "def", "latents_to_frames", "(", "z_top_interp", ",", "level_eps_interp", ",", "hparams", ")", ":", "# Decode [z^1_t, z^2_t .. z^l_t] to [X_t]", "images", ",", "_", ",", "_", ",", "_", "=", "glow_ops", ".", "encoder_decoder", "(", "\"codec\"", ",", "z_top_interp", ",", "hparams", ",", "eps", "=", "level_eps_interp", ",", "reverse", "=", "True", ")", "images", "=", "glow_ops", ".", "postprocess", "(", "images", ")", "return", "images" ]
Decodes latents to frames.
[ "Decodes", "latents", "to", "frames", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L100-L106
21,955
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
interpolate
def interpolate(features, hparams, decode_hp): """Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C) """ inputs, targets = features["inputs"], features["targets"] inputs = tf.unstack(inputs, axis=1) targets = tf.unstack(targets, axis=1) coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp) # (X_1, X_t) -> (z_1, z_t) first_frame, last_frame = inputs[0], targets[-1] first_top_z, first_level_eps = frame_to_latents(first_frame, hparams) last_top_z, last_level_eps = frame_to_latents(last_frame, hparams) # Interpolate latents at all levels. first_lats = first_level_eps + [first_top_z] last_lats = last_level_eps + [last_top_z] interp_lats = [] lat_iterator = enumerate(zip(first_lats, last_lats)) for level_ind, (first_lat, last_lat) in lat_iterator: if level_ind in decode_hp.level_interp: if decode_hp.channel_interp == "all": interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs) else: interp_lat = glow_ops.linear_interpolate_rank( first_lat, last_lat, coeffs, decode_hp.rank_interp) else: interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1]) interp_lats.append(interp_lat) level_eps_interp = interp_lats[:hparams.n_levels-1] z_top_interp = interp_lats[-1] images = latents_to_frames(z_top_interp, level_eps_interp, hparams) return images, first_frame, last_frame
python
def interpolate(features, hparams, decode_hp): """Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C) """ inputs, targets = features["inputs"], features["targets"] inputs = tf.unstack(inputs, axis=1) targets = tf.unstack(targets, axis=1) coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp) # (X_1, X_t) -> (z_1, z_t) first_frame, last_frame = inputs[0], targets[-1] first_top_z, first_level_eps = frame_to_latents(first_frame, hparams) last_top_z, last_level_eps = frame_to_latents(last_frame, hparams) # Interpolate latents at all levels. first_lats = first_level_eps + [first_top_z] last_lats = last_level_eps + [last_top_z] interp_lats = [] lat_iterator = enumerate(zip(first_lats, last_lats)) for level_ind, (first_lat, last_lat) in lat_iterator: if level_ind in decode_hp.level_interp: if decode_hp.channel_interp == "all": interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs) else: interp_lat = glow_ops.linear_interpolate_rank( first_lat, last_lat, coeffs, decode_hp.rank_interp) else: interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1]) interp_lats.append(interp_lat) level_eps_interp = interp_lats[:hparams.n_levels-1] z_top_interp = interp_lats[-1] images = latents_to_frames(z_top_interp, level_eps_interp, hparams) return images, first_frame, last_frame
[ "def", "interpolate", "(", "features", ",", "hparams", ",", "decode_hp", ")", ":", "inputs", ",", "targets", "=", "features", "[", "\"inputs\"", "]", ",", "features", "[", "\"targets\"", "]", "inputs", "=", "tf", ".", "unstack", "(", "inputs", ",", "axis", "=", "1", ")", "targets", "=", "tf", ".", "unstack", "(", "targets", ",", "axis", "=", "1", ")", "coeffs", "=", "np", ".", "linspace", "(", "0.0", ",", "1.0", ",", "decode_hp", ".", "num_interp", ")", "# (X_1, X_t) -> (z_1, z_t)", "first_frame", ",", "last_frame", "=", "inputs", "[", "0", "]", ",", "targets", "[", "-", "1", "]", "first_top_z", ",", "first_level_eps", "=", "frame_to_latents", "(", "first_frame", ",", "hparams", ")", "last_top_z", ",", "last_level_eps", "=", "frame_to_latents", "(", "last_frame", ",", "hparams", ")", "# Interpolate latents at all levels.", "first_lats", "=", "first_level_eps", "+", "[", "first_top_z", "]", "last_lats", "=", "last_level_eps", "+", "[", "last_top_z", "]", "interp_lats", "=", "[", "]", "lat_iterator", "=", "enumerate", "(", "zip", "(", "first_lats", ",", "last_lats", ")", ")", "for", "level_ind", ",", "(", "first_lat", ",", "last_lat", ")", "in", "lat_iterator", ":", "if", "level_ind", "in", "decode_hp", ".", "level_interp", ":", "if", "decode_hp", ".", "channel_interp", "==", "\"all\"", ":", "interp_lat", "=", "glow_ops", ".", "linear_interpolate", "(", "first_lat", ",", "last_lat", ",", "coeffs", ")", "else", ":", "interp_lat", "=", "glow_ops", ".", "linear_interpolate_rank", "(", "first_lat", ",", "last_lat", ",", "coeffs", ",", "decode_hp", ".", "rank_interp", ")", "else", ":", "interp_lat", "=", "tf", ".", "tile", "(", "first_lat", ",", "[", "decode_hp", ".", "num_interp", ",", "1", ",", "1", ",", "1", "]", ")", "interp_lats", ".", "append", "(", "interp_lat", ")", "level_eps_interp", "=", "interp_lats", "[", ":", "hparams", ".", "n_levels", "-", "1", "]", "z_top_interp", "=", "interp_lats", "[", "-", "1", "]", "images", "=", "latents_to_frames", "(", "z_top_interp", ",", "level_eps_interp", ",", "hparams", ")", "return", "images", ",", "first_frame", ",", "last_frame" ]
Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C)
[ "Interpolate", "between", "the", "first", "input", "frame", "and", "last", "target", "frame", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L109-L150
21,956
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
get_summaries_log_dir
def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp.""" child_dir = decode_hp.summaries_log_dir level_dir = "".join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == "all": rank_dir = "all" else: rank_dir = "rank_%d" % decode_hp.rank_interp child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += "_{}".format(dataset_split) return os.path.join(output_dir, child_dir)
python
def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp.""" child_dir = decode_hp.summaries_log_dir level_dir = "".join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == "all": rank_dir = "all" else: rank_dir = "rank_%d" % decode_hp.rank_interp child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += "_{}".format(dataset_split) return os.path.join(output_dir, child_dir)
[ "def", "get_summaries_log_dir", "(", "decode_hp", ",", "output_dir", ",", "dataset_split", ")", ":", "child_dir", "=", "decode_hp", ".", "summaries_log_dir", "level_dir", "=", "\"\"", ".", "join", "(", "[", "str", "(", "level", ")", "for", "level", "in", "decode_hp", ".", "level_interp", "]", ")", "if", "decode_hp", ".", "channel_interp", "==", "\"all\"", ":", "rank_dir", "=", "\"all\"", "else", ":", "rank_dir", "=", "\"rank_%d\"", "%", "decode_hp", ".", "rank_interp", "child_dir", "=", "\"%s/%s_%s\"", "%", "(", "child_dir", ",", "level_dir", ",", "rank_dir", ")", "if", "dataset_split", "is", "not", "None", ":", "child_dir", "+=", "\"_{}\"", ".", "format", "(", "dataset_split", ")", "return", "os", ".", "path", ".", "join", "(", "output_dir", ",", "child_dir", ")" ]
Get nested summaries_log_dir based on decode_hp.
[ "Get", "nested", "summaries_log_dir", "based", "on", "decode_hp", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L153-L164
21,957
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
interpolations_to_summary
def interpolations_to_summary(sample_ind, interpolations, first_frame, last_frame, hparams, decode_hp): """Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values. """ parent_tag = "sample_%d" % sample_ind frame_shape = hparams.problem.frame_shape interp_shape = [hparams.batch_size, decode_hp.num_interp] + frame_shape interpolations = np.reshape(interpolations, interp_shape) interp_tag = "%s/interp/%s" % (parent_tag, decode_hp.channel_interp) if decode_hp.channel_interp == "ranked": interp_tag = "%s/rank_%d" % (interp_tag, decode_hp.rank_interp) summaries, _ = common_video.py_gif_summary( interp_tag, interpolations, return_summary_value=True, max_outputs=decode_hp.max_display_outputs, fps=decode_hp.frames_per_second) if decode_hp.save_frames: first_frame_summ = image_utils.image_to_tf_summary_value( first_frame, "%s/first" % parent_tag) last_frame_summ = image_utils.image_to_tf_summary_value( last_frame, "%s/last" % parent_tag) summaries.append(first_frame_summ) summaries.append(last_frame_summ) return summaries
python
def interpolations_to_summary(sample_ind, interpolations, first_frame, last_frame, hparams, decode_hp): """Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values. """ parent_tag = "sample_%d" % sample_ind frame_shape = hparams.problem.frame_shape interp_shape = [hparams.batch_size, decode_hp.num_interp] + frame_shape interpolations = np.reshape(interpolations, interp_shape) interp_tag = "%s/interp/%s" % (parent_tag, decode_hp.channel_interp) if decode_hp.channel_interp == "ranked": interp_tag = "%s/rank_%d" % (interp_tag, decode_hp.rank_interp) summaries, _ = common_video.py_gif_summary( interp_tag, interpolations, return_summary_value=True, max_outputs=decode_hp.max_display_outputs, fps=decode_hp.frames_per_second) if decode_hp.save_frames: first_frame_summ = image_utils.image_to_tf_summary_value( first_frame, "%s/first" % parent_tag) last_frame_summ = image_utils.image_to_tf_summary_value( last_frame, "%s/last" % parent_tag) summaries.append(first_frame_summ) summaries.append(last_frame_summ) return summaries
[ "def", "interpolations_to_summary", "(", "sample_ind", ",", "interpolations", ",", "first_frame", ",", "last_frame", ",", "hparams", ",", "decode_hp", ")", ":", "parent_tag", "=", "\"sample_%d\"", "%", "sample_ind", "frame_shape", "=", "hparams", ".", "problem", ".", "frame_shape", "interp_shape", "=", "[", "hparams", ".", "batch_size", ",", "decode_hp", ".", "num_interp", "]", "+", "frame_shape", "interpolations", "=", "np", ".", "reshape", "(", "interpolations", ",", "interp_shape", ")", "interp_tag", "=", "\"%s/interp/%s\"", "%", "(", "parent_tag", ",", "decode_hp", ".", "channel_interp", ")", "if", "decode_hp", ".", "channel_interp", "==", "\"ranked\"", ":", "interp_tag", "=", "\"%s/rank_%d\"", "%", "(", "interp_tag", ",", "decode_hp", ".", "rank_interp", ")", "summaries", ",", "_", "=", "common_video", ".", "py_gif_summary", "(", "interp_tag", ",", "interpolations", ",", "return_summary_value", "=", "True", ",", "max_outputs", "=", "decode_hp", ".", "max_display_outputs", ",", "fps", "=", "decode_hp", ".", "frames_per_second", ")", "if", "decode_hp", ".", "save_frames", ":", "first_frame_summ", "=", "image_utils", ".", "image_to_tf_summary_value", "(", "first_frame", ",", "\"%s/first\"", "%", "parent_tag", ")", "last_frame_summ", "=", "image_utils", ".", "image_to_tf_summary_value", "(", "last_frame", ",", "\"%s/last\"", "%", "parent_tag", ")", "summaries", ".", "append", "(", "first_frame_summ", ")", "summaries", ".", "append", "(", "last_frame_summ", ")", "return", "summaries" ]
Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values.
[ "Converts", "interpolated", "frames", "into", "tf", "summaries", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L167-L205
21,958
tensorflow/tensor2tensor
tensor2tensor/models/video/epva_params.py
next_frame_epva
def next_frame_epva(): """EPVA hparams.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l2_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-05 hparams.batch_size = 2 hparams.clip_grad_norm = 0.01 # TODO(msaffar): disentangle EPVA from SV2P hparams.add_hparam("reward_prediction", False) hparams.add_hparam("clip_pixel_values", True) hparams.add_hparam("context_frames", 5) hparams.add_hparam("enc_learning_rate", 1e-5) hparams.add_hparam("enc_pred_loss_scale", 0.1) hparams.add_hparam("enc_pred_loss_scale_delay", 6e5) hparams.add_hparam("enc_size", 64) hparams.add_hparam("enc_keep_prob", .65) hparams.add_hparam("enc_pred_use_l1_loss", False) hparams.add_hparam("enc_pred_use_l2norm", False) hparams.add_hparam("van_learning_rate", 3e-5) hparams.add_hparam("van_keep_prob", .9) hparams.add_hparam("sequence_length ", 64) hparams.add_hparam("skip_num", 2) hparams.add_hparam("pred_noise_std", 0) hparams.add_hparam("lstm_state_noise_stddev", 0) return hparams
python
def next_frame_epva(): """EPVA hparams.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l2_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-05 hparams.batch_size = 2 hparams.clip_grad_norm = 0.01 # TODO(msaffar): disentangle EPVA from SV2P hparams.add_hparam("reward_prediction", False) hparams.add_hparam("clip_pixel_values", True) hparams.add_hparam("context_frames", 5) hparams.add_hparam("enc_learning_rate", 1e-5) hparams.add_hparam("enc_pred_loss_scale", 0.1) hparams.add_hparam("enc_pred_loss_scale_delay", 6e5) hparams.add_hparam("enc_size", 64) hparams.add_hparam("enc_keep_prob", .65) hparams.add_hparam("enc_pred_use_l1_loss", False) hparams.add_hparam("enc_pred_use_l2norm", False) hparams.add_hparam("van_learning_rate", 3e-5) hparams.add_hparam("van_keep_prob", .9) hparams.add_hparam("sequence_length ", 64) hparams.add_hparam("skip_num", 2) hparams.add_hparam("pred_noise_std", 0) hparams.add_hparam("lstm_state_noise_stddev", 0) return hparams
[ "def", "next_frame_epva", "(", ")", ":", "hparams", "=", "basic_deterministic_params", ".", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "video_num_input_frames", "=", "4", "hparams", ".", "video_num_target_frames", "=", "4", "hparams", ".", "bottom", "=", "{", "\"inputs\"", ":", "modalities", ".", "video_raw_bottom", ",", "\"targets\"", ":", "modalities", ".", "video_raw_targets_bottom", ",", "}", "hparams", ".", "loss", "=", "{", "\"targets\"", ":", "modalities", ".", "video_l2_raw_loss", ",", "}", "hparams", ".", "top", "=", "{", "\"targets\"", ":", "modalities", ".", "video_raw_top", ",", "}", "hparams", ".", "learning_rate_schedule", "=", "\"constant\"", "hparams", ".", "learning_rate_constant", "=", "1e-05", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "clip_grad_norm", "=", "0.01", "# TODO(msaffar): disentangle EPVA from SV2P", "hparams", ".", "add_hparam", "(", "\"reward_prediction\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"clip_pixel_values\"", ",", "True", ")", "hparams", ".", "add_hparam", "(", "\"context_frames\"", ",", "5", ")", "hparams", ".", "add_hparam", "(", "\"enc_learning_rate\"", ",", "1e-5", ")", "hparams", ".", "add_hparam", "(", "\"enc_pred_loss_scale\"", ",", "0.1", ")", "hparams", ".", "add_hparam", "(", "\"enc_pred_loss_scale_delay\"", ",", "6e5", ")", "hparams", ".", "add_hparam", "(", "\"enc_size\"", ",", "64", ")", "hparams", ".", "add_hparam", "(", "\"enc_keep_prob\"", ",", ".65", ")", "hparams", ".", "add_hparam", "(", "\"enc_pred_use_l1_loss\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"enc_pred_use_l2norm\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"van_learning_rate\"", ",", "3e-5", ")", "hparams", ".", "add_hparam", "(", "\"van_keep_prob\"", ",", ".9", ")", "hparams", ".", "add_hparam", "(", "\"sequence_length \"", ",", "64", ")", "hparams", ".", "add_hparam", "(", "\"skip_num\"", ",", "2", ")", "hparams", ".", "add_hparam", "(", "\"pred_noise_std\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"lstm_state_noise_stddev\"", ",", "0", ")", "return", "hparams" ]
EPVA hparams.
[ "EPVA", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva_params.py#L27-L63
21,959
tensorflow/tensor2tensor
tensor2tensor/utils/multistep_optimizer.py
MultistepAdamOptimizer._create_slots
def _create_slots(self, var_list): """Create slot variables for Adam with accumulated gradients.""" super(MultistepAdamOptimizer, self)._create_slots(var_list) first_var = min(var_list, key=lambda x: x.name) self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1, name="iter", colocate_with=first_var) for v in var_list: self._zeros_slot(v, "grad_acc", self._name)
python
def _create_slots(self, var_list): """Create slot variables for Adam with accumulated gradients.""" super(MultistepAdamOptimizer, self)._create_slots(var_list) first_var = min(var_list, key=lambda x: x.name) self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1, name="iter", colocate_with=first_var) for v in var_list: self._zeros_slot(v, "grad_acc", self._name)
[ "def", "_create_slots", "(", "self", ",", "var_list", ")", ":", "super", "(", "MultistepAdamOptimizer", ",", "self", ")", ".", "_create_slots", "(", "var_list", ")", "first_var", "=", "min", "(", "var_list", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ")", "self", ".", "_create_non_slot_variable", "(", "initial_value", "=", "0", "if", "self", ".", "_n", "==", "1", "else", "1", ",", "name", "=", "\"iter\"", ",", "colocate_with", "=", "first_var", ")", "for", "v", "in", "var_list", ":", "self", ".", "_zeros_slot", "(", "v", ",", "\"grad_acc\"", ",", "self", ".", "_name", ")" ]
Create slot variables for Adam with accumulated gradients.
[ "Create", "slot", "variables", "for", "Adam", "with", "accumulated", "gradients", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/multistep_optimizer.py#L43-L51
21,960
tensorflow/tensor2tensor
tensor2tensor/utils/multistep_optimizer.py
MultistepAdamOptimizer._apply_cond
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): """Apply conditionally if counter is zero.""" grad_acc = self.get_slot(var, "grad_acc") def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) adam_op = apply_fn(total_grad, var, *args, **kwargs) with tf.control_dependencies([adam_op]): grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc), use_locking=self._use_locking) return tf.group(adam_op, grad_acc_to_zero_op) def accumulate_gradient(grad_acc, grad): assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) return tf.group(assign_op) # Strip return value return tf.cond( tf.equal(self._get_iter_variable(), 0), lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), lambda: accumulate_gradient(grad_acc, grad))
python
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): """Apply conditionally if counter is zero.""" grad_acc = self.get_slot(var, "grad_acc") def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) adam_op = apply_fn(total_grad, var, *args, **kwargs) with tf.control_dependencies([adam_op]): grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc), use_locking=self._use_locking) return tf.group(adam_op, grad_acc_to_zero_op) def accumulate_gradient(grad_acc, grad): assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) return tf.group(assign_op) # Strip return value return tf.cond( tf.equal(self._get_iter_variable(), 0), lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), lambda: accumulate_gradient(grad_acc, grad))
[ "def", "_apply_cond", "(", "self", ",", "apply_fn", ",", "grad", ",", "var", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "grad_acc", "=", "self", ".", "get_slot", "(", "var", ",", "\"grad_acc\"", ")", "def", "apply_adam", "(", "grad_acc", ",", "apply_fn", ",", "grad", ",", "var", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "total_grad", "=", "(", "grad_acc", "+", "grad", ")", "/", "tf", ".", "cast", "(", "self", ".", "_n_t", ",", "grad", ".", "dtype", ")", "adam_op", "=", "apply_fn", "(", "total_grad", ",", "var", ",", "*", "args", ",", "*", "*", "kwargs", ")", "with", "tf", ".", "control_dependencies", "(", "[", "adam_op", "]", ")", ":", "grad_acc_to_zero_op", "=", "grad_acc", ".", "assign", "(", "tf", ".", "zeros_like", "(", "grad_acc", ")", ",", "use_locking", "=", "self", ".", "_use_locking", ")", "return", "tf", ".", "group", "(", "adam_op", ",", "grad_acc_to_zero_op", ")", "def", "accumulate_gradient", "(", "grad_acc", ",", "grad", ")", ":", "assign_op", "=", "tf", ".", "assign_add", "(", "grad_acc", ",", "grad", ",", "use_locking", "=", "self", ".", "_use_locking", ")", "return", "tf", ".", "group", "(", "assign_op", ")", "# Strip return value", "return", "tf", ".", "cond", "(", "tf", ".", "equal", "(", "self", ".", "_get_iter_variable", "(", ")", ",", "0", ")", ",", "lambda", ":", "apply_adam", "(", "grad_acc", ",", "apply_fn", ",", "grad", ",", "var", ",", "*", "args", ",", "*", "*", "kwargs", ")", ",", "lambda", ":", "accumulate_gradient", "(", "grad_acc", ",", "grad", ")", ")" ]
Apply conditionally if counter is zero.
[ "Apply", "conditionally", "if", "counter", "is", "zero", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/multistep_optimizer.py#L62-L81
21,961
tensorflow/tensor2tensor
tensor2tensor/utils/multistep_optimizer.py
MultistepAdamOptimizer._finish
def _finish(self, update_ops, name_scope): """Updates beta_power variables every n batches and incrs counter.""" iter_ = self._get_iter_variable() beta1_power, beta2_power = self._get_beta_accumulators() with tf.control_dependencies(update_ops): with tf.colocate_with(iter_): def update_beta_op(): update_beta1 = beta1_power.assign( beta1_power * self._beta1_t, use_locking=self._use_locking) update_beta2 = beta2_power.assign( beta2_power * self._beta2_t, use_locking=self._use_locking) return tf.group(update_beta1, update_beta2) maybe_update_beta = tf.cond( tf.equal(iter_, 0), update_beta_op, tf.no_op) with tf.control_dependencies([maybe_update_beta]): update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t), use_locking=self._use_locking) return tf.group( *update_ops + [update_iter, maybe_update_beta], name=name_scope)
python
def _finish(self, update_ops, name_scope): """Updates beta_power variables every n batches and incrs counter.""" iter_ = self._get_iter_variable() beta1_power, beta2_power = self._get_beta_accumulators() with tf.control_dependencies(update_ops): with tf.colocate_with(iter_): def update_beta_op(): update_beta1 = beta1_power.assign( beta1_power * self._beta1_t, use_locking=self._use_locking) update_beta2 = beta2_power.assign( beta2_power * self._beta2_t, use_locking=self._use_locking) return tf.group(update_beta1, update_beta2) maybe_update_beta = tf.cond( tf.equal(iter_, 0), update_beta_op, tf.no_op) with tf.control_dependencies([maybe_update_beta]): update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t), use_locking=self._use_locking) return tf.group( *update_ops + [update_iter, maybe_update_beta], name=name_scope)
[ "def", "_finish", "(", "self", ",", "update_ops", ",", "name_scope", ")", ":", "iter_", "=", "self", ".", "_get_iter_variable", "(", ")", "beta1_power", ",", "beta2_power", "=", "self", ".", "_get_beta_accumulators", "(", ")", "with", "tf", ".", "control_dependencies", "(", "update_ops", ")", ":", "with", "tf", ".", "colocate_with", "(", "iter_", ")", ":", "def", "update_beta_op", "(", ")", ":", "update_beta1", "=", "beta1_power", ".", "assign", "(", "beta1_power", "*", "self", ".", "_beta1_t", ",", "use_locking", "=", "self", ".", "_use_locking", ")", "update_beta2", "=", "beta2_power", ".", "assign", "(", "beta2_power", "*", "self", ".", "_beta2_t", ",", "use_locking", "=", "self", ".", "_use_locking", ")", "return", "tf", ".", "group", "(", "update_beta1", ",", "update_beta2", ")", "maybe_update_beta", "=", "tf", ".", "cond", "(", "tf", ".", "equal", "(", "iter_", ",", "0", ")", ",", "update_beta_op", ",", "tf", ".", "no_op", ")", "with", "tf", ".", "control_dependencies", "(", "[", "maybe_update_beta", "]", ")", ":", "update_iter", "=", "iter_", ".", "assign", "(", "tf", ".", "mod", "(", "iter_", "+", "1", ",", "self", ".", "_n_t", ")", ",", "use_locking", "=", "self", ".", "_use_locking", ")", "return", "tf", ".", "group", "(", "*", "update_ops", "+", "[", "update_iter", ",", "maybe_update_beta", "]", ",", "name", "=", "name_scope", ")" ]
Updates beta_power variables every n batches and incrs counter.
[ "Updates", "beta_power", "variables", "every", "n", "batches", "and", "incrs", "counter", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/multistep_optimizer.py#L103-L124
21,962
tensorflow/tensor2tensor
tensor2tensor/utils/devices.py
data_parallelism_from_flags
def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): """Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism. """ dp_arg_names = inspect.getargspec(data_parallelism).args blacklist = ["daisy_chain_variables", "all_workers"] kwargs = {} for arg in dp_arg_names: if arg in blacklist: continue kwargs[arg] = getattr(tf.flags.FLAGS, arg) return data_parallelism( daisy_chain_variables=daisy_chain_variables, all_workers=all_workers, **kwargs)
python
def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): """Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism. """ dp_arg_names = inspect.getargspec(data_parallelism).args blacklist = ["daisy_chain_variables", "all_workers"] kwargs = {} for arg in dp_arg_names: if arg in blacklist: continue kwargs[arg] = getattr(tf.flags.FLAGS, arg) return data_parallelism( daisy_chain_variables=daisy_chain_variables, all_workers=all_workers, **kwargs)
[ "def", "data_parallelism_from_flags", "(", "daisy_chain_variables", "=", "True", ",", "all_workers", "=", "False", ")", ":", "dp_arg_names", "=", "inspect", ".", "getargspec", "(", "data_parallelism", ")", ".", "args", "blacklist", "=", "[", "\"daisy_chain_variables\"", ",", "\"all_workers\"", "]", "kwargs", "=", "{", "}", "for", "arg", "in", "dp_arg_names", ":", "if", "arg", "in", "blacklist", ":", "continue", "kwargs", "[", "arg", "]", "=", "getattr", "(", "tf", ".", "flags", ".", "FLAGS", ",", "arg", ")", "return", "data_parallelism", "(", "daisy_chain_variables", "=", "daisy_chain_variables", ",", "all_workers", "=", "all_workers", ",", "*", "*", "kwargs", ")" ]
Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism.
[ "Over", "which", "devices", "do", "we", "split", "each", "training", "batch", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/devices.py#L26-L59
21,963
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_lm.py
concat_generator
def concat_generator(filename, up_threshold, low_threshold=10): """Generate concatenated lines from file upto up_threshold characters.""" txt = "" for line in tf.gfile.Open(filename): line = line.strip() if len(txt) + len(line) + 1 >= up_threshold: ret = txt txt = "" # We don't yield very short long parts to prevent noisy examples. if len(ret) > low_threshold and len(ret) < up_threshold: yield {"targets": ret} if not txt: txt = line else: txt = " ".join([txt, line])
python
def concat_generator(filename, up_threshold, low_threshold=10): """Generate concatenated lines from file upto up_threshold characters.""" txt = "" for line in tf.gfile.Open(filename): line = line.strip() if len(txt) + len(line) + 1 >= up_threshold: ret = txt txt = "" # We don't yield very short long parts to prevent noisy examples. if len(ret) > low_threshold and len(ret) < up_threshold: yield {"targets": ret} if not txt: txt = line else: txt = " ".join([txt, line])
[ "def", "concat_generator", "(", "filename", ",", "up_threshold", ",", "low_threshold", "=", "10", ")", ":", "txt", "=", "\"\"", "for", "line", "in", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "len", "(", "txt", ")", "+", "len", "(", "line", ")", "+", "1", ">=", "up_threshold", ":", "ret", "=", "txt", "txt", "=", "\"\"", "# We don't yield very short long parts to prevent noisy examples.", "if", "len", "(", "ret", ")", ">", "low_threshold", "and", "len", "(", "ret", ")", "<", "up_threshold", ":", "yield", "{", "\"targets\"", ":", "ret", "}", "if", "not", "txt", ":", "txt", "=", "line", "else", ":", "txt", "=", "\" \"", ".", "join", "(", "[", "txt", ",", "line", "]", ")" ]
Generate concatenated lines from file upto up_threshold characters.
[ "Generate", "concatenated", "lines", "from", "file", "upto", "up_threshold", "characters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_lm.py#L33-L48
21,964
tensorflow/tensor2tensor
tensor2tensor/data_generators/wiki_lm.py
mix_generators
def mix_generators(generator_list): """Given python generators, generate from one, then from another, etc.""" i = 0 l = len(generator_list) stopiters_seen = 0 while stopiters_seen <= l: try: yield six.next(generator_list[i % l]) i += 1 stopiters_seen = 0 except StopIteration: i += 1 stopiters_seen += 1
python
def mix_generators(generator_list): """Given python generators, generate from one, then from another, etc.""" i = 0 l = len(generator_list) stopiters_seen = 0 while stopiters_seen <= l: try: yield six.next(generator_list[i % l]) i += 1 stopiters_seen = 0 except StopIteration: i += 1 stopiters_seen += 1
[ "def", "mix_generators", "(", "generator_list", ")", ":", "i", "=", "0", "l", "=", "len", "(", "generator_list", ")", "stopiters_seen", "=", "0", "while", "stopiters_seen", "<=", "l", ":", "try", ":", "yield", "six", ".", "next", "(", "generator_list", "[", "i", "%", "l", "]", ")", "i", "+=", "1", "stopiters_seen", "=", "0", "except", "StopIteration", ":", "i", "+=", "1", "stopiters_seen", "+=", "1" ]
Given python generators, generate from one, then from another, etc.
[ "Given", "python", "generators", "generate", "from", "one", "then", "from", "another", "etc", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_lm.py#L51-L63
21,965
tensorflow/tensor2tensor
tensor2tensor/data_generators/translate.py
compute_bleu_summaries
def compute_bleu_summaries(hook_args): """Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file. """ decode_hparams = hook_args.decode_hparams if not (decode_hparams.decode_reference and decode_hparams.decode_to_file): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) if hook_args.hparams.mlperf_mode: current_step = decode_hparams.mlperf_decode_step mlperf_log.transformer_print( key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) mlperf_log.transformer_print( key=mlperf_log.EVAL_ACCURACY, value={ "epoch": max(current_step // decode_hparams.iterations_per_loop - 1, 0), "value": bleu }) mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) if bleu >= decode_hparams.mlperf_threshold: decode_hparams.set_hparam("mlperf_success", True) return values
python
def compute_bleu_summaries(hook_args): """Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file. """ decode_hparams = hook_args.decode_hparams if not (decode_hparams.decode_reference and decode_hparams.decode_to_file): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) if hook_args.hparams.mlperf_mode: current_step = decode_hparams.mlperf_decode_step mlperf_log.transformer_print( key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) mlperf_log.transformer_print( key=mlperf_log.EVAL_ACCURACY, value={ "epoch": max(current_step // decode_hparams.iterations_per_loop - 1, 0), "value": bleu }) mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) if bleu >= decode_hparams.mlperf_threshold: decode_hparams.set_hparam("mlperf_success", True) return values
[ "def", "compute_bleu_summaries", "(", "hook_args", ")", ":", "decode_hparams", "=", "hook_args", ".", "decode_hparams", "if", "not", "(", "decode_hparams", ".", "decode_reference", "and", "decode_hparams", ".", "decode_to_file", ")", ":", "return", "None", "values", "=", "[", "]", "bleu", "=", "100", "*", "bleu_hook", ".", "bleu_wrapper", "(", "decode_hparams", ".", "decode_reference", ",", "decode_hparams", ".", "decode_to_file", ")", "values", ".", "append", "(", "tf", ".", "Summary", ".", "Value", "(", "tag", "=", "\"BLEU\"", ",", "simple_value", "=", "bleu", ")", ")", "tf", ".", "logging", ".", "info", "(", "\"%s: BLEU = %6.2f\"", "%", "(", "decode_hparams", ".", "decode_to_file", ",", "bleu", ")", ")", "if", "hook_args", ".", "hparams", ".", "mlperf_mode", ":", "current_step", "=", "decode_hparams", ".", "mlperf_decode_step", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_TARGET", ",", "value", "=", "decode_hparams", ".", "mlperf_threshold", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_ACCURACY", ",", "value", "=", "{", "\"epoch\"", ":", "max", "(", "current_step", "//", "decode_hparams", ".", "iterations_per_loop", "-", "1", ",", "0", ")", ",", "\"value\"", ":", "bleu", "}", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "EVAL_STOP", ")", "if", "bleu", ">=", "decode_hparams", ".", "mlperf_threshold", ":", "decode_hparams", ".", "set_hparam", "(", "\"mlperf_success\"", ",", "True", ")", "return", "values" ]
Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file.
[ "Compute", "BLEU", "core", "summaries", "using", "the", "decoder", "output", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/translate.py#L83-L118
21,966
tensorflow/tensor2tensor
tensor2tensor/data_generators/translate.py
_preprocess_sgm
def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines. if line.startswith("<srcset") or line.startswith("</srcset"): return "" if line.startswith("<doc") or line.startswith("</doc"): return "" if line.startswith("<p>") or line.startswith("</p>"): return "" # Strip <seg> tags. line = line.strip() if line.startswith("<seg") and line.endswith("</seg>"): i = line.index(">") return line[i + 1:-6]
python
def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines. if line.startswith("<srcset") or line.startswith("</srcset"): return "" if line.startswith("<doc") or line.startswith("</doc"): return "" if line.startswith("<p>") or line.startswith("</p>"): return "" # Strip <seg> tags. line = line.strip() if line.startswith("<seg") and line.endswith("</seg>"): i = line.index(">") return line[i + 1:-6]
[ "def", "_preprocess_sgm", "(", "line", ",", "is_sgm", ")", ":", "if", "not", "is_sgm", ":", "return", "line", "# In SGM files, remove <srcset ...>, <p>, <doc ...> lines.", "if", "line", ".", "startswith", "(", "\"<srcset\"", ")", "or", "line", ".", "startswith", "(", "\"</srcset\"", ")", ":", "return", "\"\"", "if", "line", ".", "startswith", "(", "\"<doc\"", ")", "or", "line", ".", "startswith", "(", "\"</doc\"", ")", ":", "return", "\"\"", "if", "line", ".", "startswith", "(", "\"<p>\"", ")", "or", "line", ".", "startswith", "(", "\"</p>\"", ")", ":", "return", "\"\"", "# Strip <seg> tags.", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\"<seg\"", ")", "and", "line", ".", "endswith", "(", "\"</seg>\"", ")", ":", "i", "=", "line", ".", "index", "(", "\">\"", ")", "return", "line", "[", "i", "+", "1", ":", "-", "6", "]" ]
Preprocessing to strip tags in SGM files.
[ "Preprocessing", "to", "strip", "tags", "in", "SGM", "files", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/translate.py#L121-L136
21,967
tensorflow/tensor2tensor
tensor2tensor/data_generators/translate.py
TranslateDistillProblem.get_or_create_vocab
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems.""" # We assume that vocab file is present in data_dir directory where the # data generated will be stored. vocab_filepath = os.path.join(data_dir, self.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
python
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems.""" # We assume that vocab file is present in data_dir directory where the # data generated will be stored. vocab_filepath = os.path.join(data_dir, self.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
[ "def", "get_or_create_vocab", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "force_get", "=", "False", ")", ":", "# We assume that vocab file is present in data_dir directory where the", "# data generated will be stored.", "vocab_filepath", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "self", ".", "vocab_filename", ")", "encoder", "=", "text_encoder", ".", "SubwordTextEncoder", "(", "vocab_filepath", ")", "return", "encoder" ]
Get vocab for distill problems.
[ "Get", "vocab", "for", "distill", "problems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/translate.py#L278-L284
21,968
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_trainer.py
set_hparams_from_args
def set_hparams_from_args(args): """Set hparams overrides from unparsed args list.""" if not args: return hp_prefix = "--hp_" tf.logging.info("Found unparsed command-line arguments. Checking if any " "start with %s and interpreting those as hparams " "settings.", hp_prefix) pairs = [] i = 0 while i < len(args): arg = args[i] if arg.startswith(hp_prefix): pairs.append((arg[len(hp_prefix):], args[i+1])) i += 2 else: tf.logging.warn("Found unknown flag: %s", arg) i += 1 as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) if FLAGS.hparams: as_hparams = "," + as_hparams FLAGS.hparams += as_hparams
python
def set_hparams_from_args(args): """Set hparams overrides from unparsed args list.""" if not args: return hp_prefix = "--hp_" tf.logging.info("Found unparsed command-line arguments. Checking if any " "start with %s and interpreting those as hparams " "settings.", hp_prefix) pairs = [] i = 0 while i < len(args): arg = args[i] if arg.startswith(hp_prefix): pairs.append((arg[len(hp_prefix):], args[i+1])) i += 2 else: tf.logging.warn("Found unknown flag: %s", arg) i += 1 as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) if FLAGS.hparams: as_hparams = "," + as_hparams FLAGS.hparams += as_hparams
[ "def", "set_hparams_from_args", "(", "args", ")", ":", "if", "not", "args", ":", "return", "hp_prefix", "=", "\"--hp_\"", "tf", ".", "logging", ".", "info", "(", "\"Found unparsed command-line arguments. Checking if any \"", "\"start with %s and interpreting those as hparams \"", "\"settings.\"", ",", "hp_prefix", ")", "pairs", "=", "[", "]", "i", "=", "0", "while", "i", "<", "len", "(", "args", ")", ":", "arg", "=", "args", "[", "i", "]", "if", "arg", ".", "startswith", "(", "hp_prefix", ")", ":", "pairs", ".", "append", "(", "(", "arg", "[", "len", "(", "hp_prefix", ")", ":", "]", ",", "args", "[", "i", "+", "1", "]", ")", ")", "i", "+=", "2", "else", ":", "tf", ".", "logging", ".", "warn", "(", "\"Found unknown flag: %s\"", ",", "arg", ")", "i", "+=", "1", "as_hparams", "=", "\",\"", ".", "join", "(", "[", "\"%s=%s\"", "%", "(", "key", ",", "val", ")", "for", "key", ",", "val", "in", "pairs", "]", ")", "if", "FLAGS", ".", "hparams", ":", "as_hparams", "=", "\",\"", "+", "as_hparams", "FLAGS", ".", "hparams", "+=", "as_hparams" ]
Set hparams overrides from unparsed args list.
[ "Set", "hparams", "overrides", "from", "unparsed", "args", "list", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_trainer.py#L138-L162
21,969
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_trainer.py
create_hparams
def create_hparams(): """Create hparams.""" if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set: tf.logging.warn("Not all hyperparameter sets work on TPU. " "Prefer hparams_sets with a '_tpu' suffix, " "e.g. transformer_tpu, if available for your model.") hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams, hparams_path=hparams_path)
python
def create_hparams(): """Create hparams.""" if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set: tf.logging.warn("Not all hyperparameter sets work on TPU. " "Prefer hparams_sets with a '_tpu' suffix, " "e.g. transformer_tpu, if available for your model.") hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams, hparams_path=hparams_path)
[ "def", "create_hparams", "(", ")", ":", "if", "FLAGS", ".", "use_tpu", "and", "\"tpu\"", "not", "in", "FLAGS", ".", "hparams_set", ":", "tf", ".", "logging", ".", "warn", "(", "\"Not all hyperparameter sets work on TPU. \"", "\"Prefer hparams_sets with a '_tpu' suffix, \"", "\"e.g. transformer_tpu, if available for your model.\"", ")", "hparams_path", "=", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "output_dir", ",", "\"hparams.json\"", ")", "return", "trainer_lib", ".", "create_hparams", "(", "FLAGS", ".", "hparams_set", ",", "FLAGS", ".", "hparams", ",", "hparams_path", "=", "hparams_path", ")" ]
Create hparams.
[ "Create", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_trainer.py#L165-L173
21,970
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_trainer.py
save_metadata
def save_metadata(hparams): """Saves FLAGS and hparams to output_dir.""" output_dir = os.path.expanduser(FLAGS.output_dir) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) # Save FLAGS in txt file if hasattr(FLAGS, "flags_into_string"): flags_str = FLAGS.flags_into_string() t2t_flags_str = "\n".join([ "--%s=%s" % (f.name, f.value) for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] ]) else: flags_dict = FLAGS.__dict__["__flags"] flags_str = "\n".join( ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) t2t_flags_str = None flags_txt = os.path.join(output_dir, "flags.txt") with tf.gfile.Open(flags_txt, "w") as f: f.write(flags_str) if t2t_flags_str: t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") with tf.gfile.Open(t2t_flags_txt, "w") as f: f.write(t2t_flags_str) # Save hparams as hparams.json new_hparams = hparams_lib.copy_hparams(hparams) # Modality class is not JSON serializable so remove. new_hparams.del_hparam("modality") hparams_fname = os.path.join(output_dir, "hparams.json") with tf.gfile.Open(hparams_fname, "w") as f: f.write(new_hparams.to_json(indent=0, sort_keys=True))
python
def save_metadata(hparams): """Saves FLAGS and hparams to output_dir.""" output_dir = os.path.expanduser(FLAGS.output_dir) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) # Save FLAGS in txt file if hasattr(FLAGS, "flags_into_string"): flags_str = FLAGS.flags_into_string() t2t_flags_str = "\n".join([ "--%s=%s" % (f.name, f.value) for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] ]) else: flags_dict = FLAGS.__dict__["__flags"] flags_str = "\n".join( ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) t2t_flags_str = None flags_txt = os.path.join(output_dir, "flags.txt") with tf.gfile.Open(flags_txt, "w") as f: f.write(flags_str) if t2t_flags_str: t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") with tf.gfile.Open(t2t_flags_txt, "w") as f: f.write(t2t_flags_str) # Save hparams as hparams.json new_hparams = hparams_lib.copy_hparams(hparams) # Modality class is not JSON serializable so remove. new_hparams.del_hparam("modality") hparams_fname = os.path.join(output_dir, "hparams.json") with tf.gfile.Open(hparams_fname, "w") as f: f.write(new_hparams.to_json(indent=0, sort_keys=True))
[ "def", "save_metadata", "(", "hparams", ")", ":", "output_dir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "output_dir", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "output_dir", ")", ":", "tf", ".", "gfile", ".", "MakeDirs", "(", "output_dir", ")", "# Save FLAGS in txt file", "if", "hasattr", "(", "FLAGS", ",", "\"flags_into_string\"", ")", ":", "flags_str", "=", "FLAGS", ".", "flags_into_string", "(", ")", "t2t_flags_str", "=", "\"\\n\"", ".", "join", "(", "[", "\"--%s=%s\"", "%", "(", "f", ".", "name", ",", "f", ".", "value", ")", "for", "f", "in", "FLAGS", ".", "flags_by_module_dict", "(", ")", "[", "\"tensor2tensor.utils.flags\"", "]", "]", ")", "else", ":", "flags_dict", "=", "FLAGS", ".", "__dict__", "[", "\"__flags\"", "]", "flags_str", "=", "\"\\n\"", ".", "join", "(", "[", "\"--%s=%s\"", "%", "(", "name", ",", "str", "(", "f", ")", ")", "for", "(", "name", ",", "f", ")", "in", "flags_dict", ".", "items", "(", ")", "]", ")", "t2t_flags_str", "=", "None", "flags_txt", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"flags.txt\"", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "flags_txt", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "flags_str", ")", "if", "t2t_flags_str", ":", "t2t_flags_txt", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"flags_t2t.txt\"", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "t2t_flags_txt", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "t2t_flags_str", ")", "# Save hparams as hparams.json", "new_hparams", "=", "hparams_lib", ".", "copy_hparams", "(", "hparams", ")", "# Modality class is not JSON serializable so remove.", "new_hparams", ".", "del_hparam", "(", "\"modality\"", ")", "hparams_fname", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"hparams.json\"", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "hparams_fname", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "new_hparams", ".", "to_json", "(", "indent", "=", "0", ",", "sort_keys", "=", "True", ")", ")" ]
Saves FLAGS and hparams to output_dir.
[ "Saves", "FLAGS", "and", "hparams", "to", "output_dir", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_trainer.py#L313-L348
21,971
tensorflow/tensor2tensor
tensor2tensor/models/xception.py
residual_block
def residual_block(x, hparams): """A stack of convolution blocks with residual connection.""" k = (hparams.kernel_height, hparams.kernel_width) dilations_and_kernels = [((1, 1), k) for _ in range(3)] y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels, padding="SAME", separability=0, name="residual_block") x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") return tf.nn.dropout(x, 1.0 - hparams.dropout)
python
def residual_block(x, hparams): """A stack of convolution blocks with residual connection.""" k = (hparams.kernel_height, hparams.kernel_width) dilations_and_kernels = [((1, 1), k) for _ in range(3)] y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels, padding="SAME", separability=0, name="residual_block") x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") return tf.nn.dropout(x, 1.0 - hparams.dropout)
[ "def", "residual_block", "(", "x", ",", "hparams", ")", ":", "k", "=", "(", "hparams", ".", "kernel_height", ",", "hparams", ".", "kernel_width", ")", "dilations_and_kernels", "=", "[", "(", "(", "1", ",", "1", ")", ",", "k", ")", "for", "_", "in", "range", "(", "3", ")", "]", "y", "=", "common_layers", ".", "subseparable_conv_block", "(", "x", ",", "hparams", ".", "hidden_size", ",", "dilations_and_kernels", ",", "padding", "=", "\"SAME\"", ",", "separability", "=", "0", ",", "name", "=", "\"residual_block\"", ")", "x", "=", "common_layers", ".", "layer_norm", "(", "x", "+", "y", ",", "hparams", ".", "hidden_size", ",", "name", "=", "\"lnorm\"", ")", "return", "tf", ".", "nn", ".", "dropout", "(", "x", ",", "1.0", "-", "hparams", ".", "dropout", ")" ]
A stack of convolution blocks with residual connection.
[ "A", "stack", "of", "convolution", "blocks", "with", "residual", "connection", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/xception.py#L33-L45
21,972
tensorflow/tensor2tensor
tensor2tensor/models/xception.py
xception_internal
def xception_internal(inputs, hparams): """Xception body.""" with tf.variable_scope("xception"): cur = inputs if cur.get_shape().as_list()[1] > 200: # Large image, Xception entry flow cur = xception_entry(cur, hparams.hidden_size) else: # Small image, conv cur = common_layers.conv_block( cur, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True, name="small_image_conv") for i in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % i): cur = residual_block(cur, hparams) return xception_exit(cur)
python
def xception_internal(inputs, hparams): """Xception body.""" with tf.variable_scope("xception"): cur = inputs if cur.get_shape().as_list()[1] > 200: # Large image, Xception entry flow cur = xception_entry(cur, hparams.hidden_size) else: # Small image, conv cur = common_layers.conv_block( cur, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True, name="small_image_conv") for i in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % i): cur = residual_block(cur, hparams) return xception_exit(cur)
[ "def", "xception_internal", "(", "inputs", ",", "hparams", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"xception\"", ")", ":", "cur", "=", "inputs", "if", "cur", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "1", "]", ">", "200", ":", "# Large image, Xception entry flow", "cur", "=", "xception_entry", "(", "cur", ",", "hparams", ".", "hidden_size", ")", "else", ":", "# Small image, conv", "cur", "=", "common_layers", ".", "conv_block", "(", "cur", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "3", ",", "3", ")", ")", "]", ",", "first_relu", "=", "False", ",", "padding", "=", "\"SAME\"", ",", "force2d", "=", "True", ",", "name", "=", "\"small_image_conv\"", ")", "for", "i", "in", "range", "(", "hparams", ".", "num_hidden_layers", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"layer_%d\"", "%", "i", ")", ":", "cur", "=", "residual_block", "(", "cur", ",", "hparams", ")", "return", "xception_exit", "(", "cur", ")" ]
Xception body.
[ "Xception", "body", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/xception.py#L48-L70
21,973
tensorflow/tensor2tensor
tensor2tensor/models/xception.py
xception_entry
def xception_entry(inputs, hidden_dim): """Xception entry flow.""" with tf.variable_scope("xception_entry"): def xnet_resblock(x, filters, res_relu, name): """Resblock.""" with tf.variable_scope(name): y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 2), first_relu=res_relu, force2d=True, name="res_conv0") tf.summary.image("inputs", inputs, max_outputs=2) x = common_layers.conv_block( inputs, 32, [((1, 1), (3, 3))], first_relu=False, padding="SAME", strides=(2, 2), force2d=True, name="conv0") x = common_layers.conv_block( x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1") x = xnet_resblock(x, min(128, hidden_dim), True, "block0") x = xnet_resblock(x, min(256, hidden_dim), False, "block1") return xnet_resblock(x, hidden_dim, False, "block2")
python
def xception_entry(inputs, hidden_dim): """Xception entry flow.""" with tf.variable_scope("xception_entry"): def xnet_resblock(x, filters, res_relu, name): """Resblock.""" with tf.variable_scope(name): y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 2), first_relu=res_relu, force2d=True, name="res_conv0") tf.summary.image("inputs", inputs, max_outputs=2) x = common_layers.conv_block( inputs, 32, [((1, 1), (3, 3))], first_relu=False, padding="SAME", strides=(2, 2), force2d=True, name="conv0") x = common_layers.conv_block( x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1") x = xnet_resblock(x, min(128, hidden_dim), True, "block0") x = xnet_resblock(x, min(256, hidden_dim), False, "block1") return xnet_resblock(x, hidden_dim, False, "block2")
[ "def", "xception_entry", "(", "inputs", ",", "hidden_dim", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"xception_entry\"", ")", ":", "def", "xnet_resblock", "(", "x", ",", "filters", ",", "res_relu", ",", "name", ")", ":", "\"\"\"Resblock.\"\"\"", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "y", "=", "common_layers", ".", "separable_conv_block", "(", "x", ",", "filters", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "3", ",", "3", ")", ")", ",", "(", "(", "1", ",", "1", ")", ",", "(", "3", ",", "3", ")", ")", "]", ",", "first_relu", "=", "True", ",", "padding", "=", "\"SAME\"", ",", "force2d", "=", "True", ",", "name", "=", "\"sep_conv_block\"", ")", "y", "=", "common_layers", ".", "pool", "(", "y", ",", "(", "3", ",", "3", ")", ",", "\"MAX\"", ",", "\"SAME\"", ",", "strides", "=", "(", "2", ",", "2", ")", ")", "return", "y", "+", "common_layers", ".", "conv_block", "(", "x", ",", "filters", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "1", ",", "1", ")", ")", "]", ",", "padding", "=", "\"SAME\"", ",", "strides", "=", "(", "2", ",", "2", ")", ",", "first_relu", "=", "res_relu", ",", "force2d", "=", "True", ",", "name", "=", "\"res_conv0\"", ")", "tf", ".", "summary", ".", "image", "(", "\"inputs\"", ",", "inputs", ",", "max_outputs", "=", "2", ")", "x", "=", "common_layers", ".", "conv_block", "(", "inputs", ",", "32", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "3", ",", "3", ")", ")", "]", ",", "first_relu", "=", "False", ",", "padding", "=", "\"SAME\"", ",", "strides", "=", "(", "2", ",", "2", ")", ",", "force2d", "=", "True", ",", "name", "=", "\"conv0\"", ")", "x", "=", "common_layers", ".", "conv_block", "(", "x", ",", "64", ",", "[", "(", "(", "1", ",", "1", ")", ",", "(", "3", ",", "3", ")", ")", "]", ",", "padding", "=", "\"SAME\"", ",", "force2d", "=", "True", ",", "name", "=", "\"conv1\"", ")", "x", "=", "xnet_resblock", "(", "x", ",", "min", "(", "128", ",", "hidden_dim", ")", ",", "True", ",", "\"block0\"", ")", "x", "=", "xnet_resblock", "(", "x", ",", "min", "(", "256", ",", "hidden_dim", ")", ",", "False", ",", "\"block1\"", ")", "return", "xnet_resblock", "(", "x", ",", "hidden_dim", ",", "False", ",", "\"block2\"", ")" ]
Xception entry flow.
[ "Xception", "entry", "flow", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/xception.py#L73-L110
21,974
tensorflow/tensor2tensor
tensor2tensor/models/xception.py
xception_exit
def xception_exit(inputs): """Xception exit flow.""" with tf.variable_scope("xception_exit"): x = inputs x_shape = x.get_shape().as_list() if x_shape[1] is None or x_shape[2] is None: length_float = tf.to_float(tf.shape(x)[1]) length_float *= tf.to_float(tf.shape(x)[2]) spatial_dim_float = tf.sqrt(length_float) spatial_dim = tf.to_int32(spatial_dim_float) x_depth = x_shape[3] x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) elif x_shape[1] != x_shape[2]: spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2]))) if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]: raise ValueError("Assumed inputs were square-able but they were " "not. Shape: %s" % x_shape) x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME") return tf.nn.relu(x)
python
def xception_exit(inputs): """Xception exit flow.""" with tf.variable_scope("xception_exit"): x = inputs x_shape = x.get_shape().as_list() if x_shape[1] is None or x_shape[2] is None: length_float = tf.to_float(tf.shape(x)[1]) length_float *= tf.to_float(tf.shape(x)[2]) spatial_dim_float = tf.sqrt(length_float) spatial_dim = tf.to_int32(spatial_dim_float) x_depth = x_shape[3] x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) elif x_shape[1] != x_shape[2]: spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2]))) if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]: raise ValueError("Assumed inputs were square-able but they were " "not. Shape: %s" % x_shape) x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME") return tf.nn.relu(x)
[ "def", "xception_exit", "(", "inputs", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"xception_exit\"", ")", ":", "x", "=", "inputs", "x_shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "if", "x_shape", "[", "1", "]", "is", "None", "or", "x_shape", "[", "2", "]", "is", "None", ":", "length_float", "=", "tf", ".", "to_float", "(", "tf", ".", "shape", "(", "x", ")", "[", "1", "]", ")", "length_float", "*=", "tf", ".", "to_float", "(", "tf", ".", "shape", "(", "x", ")", "[", "2", "]", ")", "spatial_dim_float", "=", "tf", ".", "sqrt", "(", "length_float", ")", "spatial_dim", "=", "tf", ".", "to_int32", "(", "spatial_dim_float", ")", "x_depth", "=", "x_shape", "[", "3", "]", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "spatial_dim", ",", "spatial_dim", ",", "x_depth", "]", ")", "elif", "x_shape", "[", "1", "]", "!=", "x_shape", "[", "2", "]", ":", "spatial_dim", "=", "int", "(", "math", ".", "sqrt", "(", "float", "(", "x_shape", "[", "1", "]", "*", "x_shape", "[", "2", "]", ")", ")", ")", "if", "spatial_dim", "*", "spatial_dim", "!=", "x_shape", "[", "1", "]", "*", "x_shape", "[", "2", "]", ":", "raise", "ValueError", "(", "\"Assumed inputs were square-able but they were \"", "\"not. Shape: %s\"", "%", "x_shape", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "spatial_dim", ",", "spatial_dim", ",", "x_depth", "]", ")", "x", "=", "common_layers", ".", "conv_block_downsample", "(", "x", ",", "(", "3", ",", "3", ")", ",", "(", "2", ",", "2", ")", ",", "\"SAME\"", ")", "return", "tf", ".", "nn", ".", "relu", "(", "x", ")" ]
Xception exit flow.
[ "Xception", "exit", "flow", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/xception.py#L113-L133
21,975
tensorflow/tensor2tensor
tensor2tensor/data_generators/wikisum/html.py
get_text_from_html
def get_text_from_html(html): """Returns a plaintext representation of HTML content.""" try: soup = bs4.BeautifulSoup(html, "html.parser") except: # pylint: disable=bare-except # Some docs don't parse return "" # Remove script and style tags for s in soup(["script", "style"]): s.decompose() return "\n".join([s for s in _soup_strings(soup)])
python
def get_text_from_html(html): """Returns a plaintext representation of HTML content.""" try: soup = bs4.BeautifulSoup(html, "html.parser") except: # pylint: disable=bare-except # Some docs don't parse return "" # Remove script and style tags for s in soup(["script", "style"]): s.decompose() return "\n".join([s for s in _soup_strings(soup)])
[ "def", "get_text_from_html", "(", "html", ")", ":", "try", ":", "soup", "=", "bs4", ".", "BeautifulSoup", "(", "html", ",", "\"html.parser\"", ")", "except", ":", "# pylint: disable=bare-except", "# Some docs don't parse", "return", "\"\"", "# Remove script and style tags", "for", "s", "in", "soup", "(", "[", "\"script\"", ",", "\"style\"", "]", ")", ":", "s", ".", "decompose", "(", ")", "return", "\"\\n\"", ".", "join", "(", "[", "s", "for", "s", "in", "_soup_strings", "(", "soup", ")", "]", ")" ]
Returns a plaintext representation of HTML content.
[ "Returns", "a", "plaintext", "representation", "of", "HTML", "content", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/html.py#L21-L32
21,976
tensorflow/tensor2tensor
tensor2tensor/data_generators/wikisum/html.py
_soup_strings
def _soup_strings(soup): """Return text strings in soup.""" paragraph_tags = set([ "caption", "details", "h1", "h2", "h3", "h4", "h5", "h6", "li", "p", "td", "div", "span" ]) skip_children = None for descendant in soup.descendants: # If we've treated a tag as a contiguous paragraph, don't re-emit the # children (see below). if skip_children is not None: try: in_skip = descendant in skip_children # pylint: disable=unsupported-membership-test except RecursionError: # pylint: disable=undefined-variable # Possible for this check to hit a nasty infinite recursion because of # BeautifulSoup __eq__ checks. in_skip = True if in_skip: continue else: skip_children = None # Treat some tags as contiguous paragraphs, regardless of other tags nested # inside (like <a> or <b>). if isinstance(descendant, bs4.Tag): if descendant.name in paragraph_tags: if descendant.find_all(paragraph_tags): # If there are nested paragraph tags, don't treat it as a single # contiguous tag. continue skip_children = list(descendant.descendants) text = " ".join(descendant.get_text(" ", strip=True).split()) if text: yield text continue if (isinstance(descendant, bs4.Comment) or not isinstance(descendant, bs4.NavigableString)): continue text = " ".join(descendant.strip().split()) if text: yield text
python
def _soup_strings(soup): """Return text strings in soup.""" paragraph_tags = set([ "caption", "details", "h1", "h2", "h3", "h4", "h5", "h6", "li", "p", "td", "div", "span" ]) skip_children = None for descendant in soup.descendants: # If we've treated a tag as a contiguous paragraph, don't re-emit the # children (see below). if skip_children is not None: try: in_skip = descendant in skip_children # pylint: disable=unsupported-membership-test except RecursionError: # pylint: disable=undefined-variable # Possible for this check to hit a nasty infinite recursion because of # BeautifulSoup __eq__ checks. in_skip = True if in_skip: continue else: skip_children = None # Treat some tags as contiguous paragraphs, regardless of other tags nested # inside (like <a> or <b>). if isinstance(descendant, bs4.Tag): if descendant.name in paragraph_tags: if descendant.find_all(paragraph_tags): # If there are nested paragraph tags, don't treat it as a single # contiguous tag. continue skip_children = list(descendant.descendants) text = " ".join(descendant.get_text(" ", strip=True).split()) if text: yield text continue if (isinstance(descendant, bs4.Comment) or not isinstance(descendant, bs4.NavigableString)): continue text = " ".join(descendant.strip().split()) if text: yield text
[ "def", "_soup_strings", "(", "soup", ")", ":", "paragraph_tags", "=", "set", "(", "[", "\"caption\"", ",", "\"details\"", ",", "\"h1\"", ",", "\"h2\"", ",", "\"h3\"", ",", "\"h4\"", ",", "\"h5\"", ",", "\"h6\"", ",", "\"li\"", ",", "\"p\"", ",", "\"td\"", ",", "\"div\"", ",", "\"span\"", "]", ")", "skip_children", "=", "None", "for", "descendant", "in", "soup", ".", "descendants", ":", "# If we've treated a tag as a contiguous paragraph, don't re-emit the", "# children (see below).", "if", "skip_children", "is", "not", "None", ":", "try", ":", "in_skip", "=", "descendant", "in", "skip_children", "# pylint: disable=unsupported-membership-test", "except", "RecursionError", ":", "# pylint: disable=undefined-variable", "# Possible for this check to hit a nasty infinite recursion because of", "# BeautifulSoup __eq__ checks.", "in_skip", "=", "True", "if", "in_skip", ":", "continue", "else", ":", "skip_children", "=", "None", "# Treat some tags as contiguous paragraphs, regardless of other tags nested", "# inside (like <a> or <b>).", "if", "isinstance", "(", "descendant", ",", "bs4", ".", "Tag", ")", ":", "if", "descendant", ".", "name", "in", "paragraph_tags", ":", "if", "descendant", ".", "find_all", "(", "paragraph_tags", ")", ":", "# If there are nested paragraph tags, don't treat it as a single", "# contiguous tag.", "continue", "skip_children", "=", "list", "(", "descendant", ".", "descendants", ")", "text", "=", "\" \"", ".", "join", "(", "descendant", ".", "get_text", "(", "\" \"", ",", "strip", "=", "True", ")", ".", "split", "(", ")", ")", "if", "text", ":", "yield", "text", "continue", "if", "(", "isinstance", "(", "descendant", ",", "bs4", ".", "Comment", ")", "or", "not", "isinstance", "(", "descendant", ",", "bs4", ".", "NavigableString", ")", ")", ":", "continue", "text", "=", "\" \"", ".", "join", "(", "descendant", ".", "strip", "(", ")", ".", "split", "(", ")", ")", "if", "text", ":", "yield", "text" ]
Return text strings in soup.
[ "Return", "text", "strings", "in", "soup", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/html.py#L35-L78
21,977
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p(): """Gets to 2.92 in just under 4 days on 8 p100s.""" hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() hparams.num_decoder_layers = 14 hparams.batch_size = 8 hparams.layer_prepostprocess_dropout = 0.2 return hparams
python
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p(): """Gets to 2.92 in just under 4 days on 8 p100s.""" hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() hparams.num_decoder_layers = 14 hparams.batch_size = 8 hparams.layer_prepostprocess_dropout = 0.2 return hparams
[ "def", "imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p", "(", ")", ":", "hparams", "=", "imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l", "(", ")", "hparams", ".", "num_decoder_layers", "=", "14", "hparams", ".", "batch_size", "=", "8", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.2", "return", "hparams" ]
Gets to 2.92 in just under 4 days on 8 p100s.
[ "Gets", "to", "2", ".", "92", "in", "just", "under", "4", "days", "on", "8", "p100s", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L537-L543
21,978
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1
def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1(): """For 256x256.""" hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() # TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in # image transformer training implementation? # hparams.img_len = 256 hparams.max_length = 66000 # allow for 256x256 hparams.batch_size = 1 hparams.num_decoder_layers = 5 hparams.hidden_size = 128 hparams.filter_size = 128 hparams.attention_key_channels = 64 hparams.attention_value_channels = 64 hparams.layer_prepostprocess_dropout = 0.0 return hparams
python
def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1(): """For 256x256.""" hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() # TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in # image transformer training implementation? # hparams.img_len = 256 hparams.max_length = 66000 # allow for 256x256 hparams.batch_size = 1 hparams.num_decoder_layers = 5 hparams.hidden_size = 128 hparams.filter_size = 128 hparams.attention_key_channels = 64 hparams.attention_value_channels = 64 hparams.layer_prepostprocess_dropout = 0.0 return hparams
[ "def", "imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1", "(", ")", ":", "hparams", "=", "imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g", "(", ")", "# TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in", "# image transformer training implementation?", "# hparams.img_len = 256", "hparams", ".", "max_length", "=", "66000", "# allow for 256x256", "hparams", ".", "batch_size", "=", "1", "hparams", ".", "num_decoder_layers", "=", "5", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "filter_size", "=", "128", "hparams", ".", "attention_key_channels", "=", "64", "hparams", ".", "attention_value_channels", "=", "64", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.0", "return", "hparams" ]
For 256x256.
[ "For", "256x256", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L565-L579
21,979
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated(): """Dilated hparams.""" hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan() hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0] hparams.dec_attention_type = cia.AttentionType.DILATED hparams.block_length = 128 hparams.block_width = 128 hparams.add_hparam("num_memory_blocks", 1) return hparams
python
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated(): """Dilated hparams.""" hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan() hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0] hparams.dec_attention_type = cia.AttentionType.DILATED hparams.block_length = 128 hparams.block_width = 128 hparams.add_hparam("num_memory_blocks", 1) return hparams
[ "def", "imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated", "(", ")", ":", "hparams", "=", "imagetransformer_base_8l_8h_big_cond_dr03_dan", "(", ")", "hparams", ".", "gap_sizes", "=", "[", "0", ",", "16", ",", "64", ",", "0", ",", "16", ",", "64", ",", "128", ",", "0", "]", "hparams", ".", "dec_attention_type", "=", "cia", ".", "AttentionType", ".", "DILATED", "hparams", ".", "block_length", "=", "128", "hparams", ".", "block_width", "=", "128", "hparams", ".", "add_hparam", "(", "\"num_memory_blocks\"", ",", "1", ")", "return", "hparams" ]
Dilated hparams.
[ "Dilated", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L635-L643
21,980
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer1d_base_8l_64by64
def imagetransformer1d_base_8l_64by64(): """hparams fo 12 layer big 1d model for imagenet 64x64.""" hparams = image_transformer_base() hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.block_length = 512 hparams.block_width = 768 hparams.layer_prepostprocess_dropout = 0.1 hparams.max_length = 14000 hparams.unconditional = int(False) return hparams
python
def imagetransformer1d_base_8l_64by64(): """hparams fo 12 layer big 1d model for imagenet 64x64.""" hparams = image_transformer_base() hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.block_length = 512 hparams.block_width = 768 hparams.layer_prepostprocess_dropout = 0.1 hparams.max_length = 14000 hparams.unconditional = int(False) return hparams
[ "def", "imagetransformer1d_base_8l_64by64", "(", ")", ":", "hparams", "=", "image_transformer_base", "(", ")", "hparams", ".", "num_heads", "=", "8", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "num_decoder_layers", "=", "8", "hparams", ".", "batch_size", "=", "1", "hparams", ".", "block_length", "=", "512", "hparams", ".", "block_width", "=", "768", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "hparams", ".", "max_length", "=", "14000", "hparams", ".", "unconditional", "=", "int", "(", "False", ")", "return", "hparams" ]
hparams fo 12 layer big 1d model for imagenet 64x64.
[ "hparams", "fo", "12", "layer", "big", "1d", "model", "for", "imagenet", "64x64", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L688-L701
21,981
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_moe_tiny
def imagetransformer_moe_tiny(): """Set of hyperparameters for a very small imagetransformer with MoE.""" hparams = imagetransformer_tiny() hparams.hidden_size = 64 hparams.batch_size = 1 hparams.num_hidden_layers = 3 hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE. hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated). hparams.moe_num_experts = 16 # Number of experts in each MoE layer. hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4). hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok). return hparams
python
def imagetransformer_moe_tiny(): """Set of hyperparameters for a very small imagetransformer with MoE.""" hparams = imagetransformer_tiny() hparams.hidden_size = 64 hparams.batch_size = 1 hparams.num_hidden_layers = 3 hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE. hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated). hparams.moe_num_experts = 16 # Number of experts in each MoE layer. hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4). hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok). return hparams
[ "def", "imagetransformer_moe_tiny", "(", ")", ":", "hparams", "=", "imagetransformer_tiny", "(", ")", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "batch_size", "=", "1", "hparams", ".", "num_hidden_layers", "=", "3", "hparams", ".", "dec_attention_type", "=", "cia", ".", "AttentionType", ".", "MOE_LOCAL_1D", "hparams", ".", "add_hparam", "(", "\"moe_layers_decoder\"", ",", "\"1\"", ")", "# Which layer is MoE.", "hparams", ".", "moe_hidden_sizes", "=", "\"1024\"", "# Hidden layer sizes (comma-separated).", "hparams", ".", "moe_num_experts", "=", "16", "# Number of experts in each MoE layer.", "hparams", ".", "moe_k", "=", "2", "# How many experts to use per batch element (try 2 or 4).", "hparams", ".", "moe_loss_coef", "=", "1e-2", "# MoE loss coefficient (1e-2 is usually ok).", "return", "hparams" ]
Set of hyperparameters for a very small imagetransformer with MoE.
[ "Set", "of", "hyperparameters", "for", "a", "very", "small", "imagetransformer", "with", "MoE", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L918-L930
21,982
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_sep_channels_8l_tpu
def imagetransformer_sep_channels_8l_tpu(): """Hparams for training imagetransformer on tpu.""" hparams = imagetransformer_sep_channels_8l() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.shared_embedding_and_softmax_weights = False return hparams
python
def imagetransformer_sep_channels_8l_tpu(): """Hparams for training imagetransformer on tpu.""" hparams = imagetransformer_sep_channels_8l() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.shared_embedding_and_softmax_weights = False return hparams
[ "def", "imagetransformer_sep_channels_8l_tpu", "(", ")", ":", "hparams", "=", "imagetransformer_sep_channels_8l", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "num_heads", "=", "4", "# heads are expensive on tpu", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "return", "hparams" ]
Hparams for training imagetransformer on tpu.
[ "Hparams", "for", "training", "imagetransformer", "on", "tpu", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L941-L948
21,983
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_b10l_4h_big_uncond_dr03_tpu
def imagetransformer_b10l_4h_big_uncond_dr03_tpu(): """Small model for tpu cifar 10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.block_length = 128 hparams.hidden_size = 512 hparams.filter_size = 1024 hparams.learning_rate = 0.2 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" return hparams
python
def imagetransformer_b10l_4h_big_uncond_dr03_tpu(): """Small model for tpu cifar 10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.block_length = 128 hparams.hidden_size = 512 hparams.filter_size = 1024 hparams.learning_rate = 0.2 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" return hparams
[ "def", "imagetransformer_b10l_4h_big_uncond_dr03_tpu", "(", ")", ":", "hparams", "=", "imagetransformer_bas8l_8h_big_uncond_dr03_imgnet", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "num_heads", "=", "4", "# heads are expensive on tpu", "hparams", ".", "num_decoder_layers", "=", "10", "hparams", ".", "block_length", "=", "128", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "1024", "hparams", ".", "learning_rate", "=", "0.2", "hparams", ".", "layer_preprocess_sequence", "=", "\"none\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"dan\"", "return", "hparams" ]
Small model for tpu cifar 10.
[ "Small", "model", "for", "tpu", "cifar", "10", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L952-L965
21,984
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_b10l_dr03_moe_tpu
def imagetransformer_b10l_dr03_moe_tpu(): """Moe tpu params.""" hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.ffn_layer = "local_moe_tpu" return hparams
python
def imagetransformer_b10l_dr03_moe_tpu(): """Moe tpu params.""" hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.ffn_layer = "local_moe_tpu" return hparams
[ "def", "imagetransformer_b10l_dr03_moe_tpu", "(", ")", ":", "hparams", "=", "imagetransformer_b10l_4h_big_uncond_dr03_tpu", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "num_heads", "=", "4", "# heads are expensive on tpu", "hparams", ".", "num_decoder_layers", "=", "10", "hparams", ".", "layer_preprocess_sequence", "=", "\"none\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"dan\"", "hparams", ".", "ffn_layer", "=", "\"local_moe_tpu\"", "return", "hparams" ]
Moe tpu params.
[ "Moe", "tpu", "params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L969-L979
21,985
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_cifar_tpu_range
def imagetransformer_cifar_tpu_range(rhp): """Range of hyperparameters for vizier.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE) rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16]) rhp.set_discrete("hidden_size", [256, 512, 1024]) rhp.set_discrete("block_length", [128, 256, 512]) rhp.set_categorical("dec_attention_type", [ cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
python
def imagetransformer_cifar_tpu_range(rhp): """Range of hyperparameters for vizier.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE) rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16]) rhp.set_discrete("hidden_size", [256, 512, 1024]) rhp.set_discrete("block_length", [128, 256, 512]) rhp.set_categorical("dec_attention_type", [ cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
[ "def", "imagetransformer_cifar_tpu_range", "(", "rhp", ")", ":", "# After starting from base, set intervals for some parameters.", "rhp", ".", "set_float", "(", "\"learning_rate\"", ",", "0.01", ",", "1.0", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_discrete", "(", "\"num_decoder_layers\"", ",", "[", "8", ",", "10", ",", "12", ",", "14", ",", "16", "]", ")", "rhp", ".", "set_discrete", "(", "\"hidden_size\"", ",", "[", "256", ",", "512", ",", "1024", "]", ")", "rhp", ".", "set_discrete", "(", "\"block_length\"", ",", "[", "128", ",", "256", ",", "512", "]", ")", "rhp", ".", "set_categorical", "(", "\"dec_attention_type\"", ",", "[", "cia", ".", "AttentionType", ".", "RELATIVE_LOCAL_1D", ",", "cia", ".", "AttentionType", ".", "LOCAL_1D", "]", ")" ]
Range of hyperparameters for vizier.
[ "Range", "of", "hyperparameters", "for", "vizier", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L1054-L1062
21,986
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_b12l_4h_b128_h512_uncond_dr01_im
def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im(): """TPU related imagenet model.""" hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 6000 hparams.layer_prepostprocess_dropout = 0.1 return hparams
python
def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im(): """TPU related imagenet model.""" hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 6000 hparams.layer_prepostprocess_dropout = 0.1 return hparams
[ "def", "imagetransformer_b12l_4h_b128_h512_uncond_dr01_im", "(", ")", ":", "hparams", "=", "imagetransformer_b12l_4h_b256_uncond_dr03_tpu", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_schedule", "=", "\"rsqrt_decay\"", "hparams", ".", "learning_rate_warmup_steps", "=", "6000", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "return", "hparams" ]
TPU related imagenet model.
[ "TPU", "related", "imagenet", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L1085-L1094
21,987
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_b12l_4h_b128_uncond_dr03_tpu
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu(): """TPU config for cifar 10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 128 hparams.hidden_size = 256 hparams.filter_size = 2048 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 return hparams
python
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu(): """TPU config for cifar 10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 128 hparams.hidden_size = 256 hparams.filter_size = 2048 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 return hparams
[ "def", "imagetransformer_b12l_4h_b128_uncond_dr03_tpu", "(", ")", ":", "hparams", "=", "imagetransformer_bas8l_8h_big_uncond_dr03_imgnet", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "num_heads", "=", "4", "# heads are expensive on tpu", "hparams", ".", "num_decoder_layers", "=", "12", "hparams", ".", "block_length", "=", "128", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "layer_preprocess_sequence", "=", "\"none\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"dan\"", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.1", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_schedule", "=", "\"rsqrt_decay\"", "hparams", ".", "learning_rate_warmup_steps", "=", "10000", "return", "hparams" ]
TPU config for cifar 10.
[ "TPU", "config", "for", "cifar", "10", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L1110-L1126
21,988
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
imagetransformer_b12l_8h_b256_uncond_dr03_tpu
def imagetransformer_b12l_8h_b256_uncond_dr03_tpu(): """TPU related 12 layer 8 heads model.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 8 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 return hparams
python
def imagetransformer_b12l_8h_b256_uncond_dr03_tpu(): """TPU related 12 layer 8 heads model.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 8 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 return hparams
[ "def", "imagetransformer_b12l_8h_b256_uncond_dr03_tpu", "(", ")", ":", "hparams", "=", "imagetransformer_bas8l_8h_big_uncond_dr03_imgnet", "(", ")", "update_hparams_for_tpu", "(", "hparams", ")", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "num_heads", "=", "8", "# heads are expensive on tpu", "hparams", ".", "num_decoder_layers", "=", "12", "hparams", ".", "block_length", "=", "256", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "layer_preprocess_sequence", "=", "\"none\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"dan\"", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.3", "return", "hparams" ]
TPU related 12 layer 8 heads model.
[ "TPU", "related", "12", "layer", "8", "heads", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L1130-L1143
21,989
tensorflow/tensor2tensor
tensor2tensor/rl/restarter.py
Restarter.training_loop
def training_loop(self): """Context manager wrapping the training loop, updates step counters.""" if not self.restarting: self._write_counters(self._local_step_at_start, self._global_step) tf.logging.info( "Training %s up to %d, %d to go", self.model_mode, self.target_local_step, self.steps_to_go ) yield self._write_counters(self.target_local_step, -1)
python
def training_loop(self): """Context manager wrapping the training loop, updates step counters.""" if not self.restarting: self._write_counters(self._local_step_at_start, self._global_step) tf.logging.info( "Training %s up to %d, %d to go", self.model_mode, self.target_local_step, self.steps_to_go ) yield self._write_counters(self.target_local_step, -1)
[ "def", "training_loop", "(", "self", ")", ":", "if", "not", "self", ".", "restarting", ":", "self", ".", "_write_counters", "(", "self", ".", "_local_step_at_start", ",", "self", ".", "_global_step", ")", "tf", ".", "logging", ".", "info", "(", "\"Training %s up to %d, %d to go\"", ",", "self", ".", "model_mode", ",", "self", ".", "target_local_step", ",", "self", ".", "steps_to_go", ")", "yield", "self", ".", "_write_counters", "(", "self", ".", "target_local_step", ",", "-", "1", ")" ]
Context manager wrapping the training loop, updates step counters.
[ "Context", "manager", "wrapping", "the", "training", "loop", "updates", "step", "counters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/restarter.py#L90-L102
21,990
tensorflow/tensor2tensor
tensor2tensor/data_generators/ptb.py
_read_words
def _read_words(filename): """Reads words from a file.""" with tf.gfile.GFile(filename, "r") as f: if sys.version_info[0] >= 3: return f.read().replace("\n", " %s " % EOS).split() else: return f.read().decode("utf-8").replace("\n", " %s " % EOS).split()
python
def _read_words(filename): """Reads words from a file.""" with tf.gfile.GFile(filename, "r") as f: if sys.version_info[0] >= 3: return f.read().replace("\n", " %s " % EOS).split() else: return f.read().decode("utf-8").replace("\n", " %s " % EOS).split()
[ "def", "_read_words", "(", "filename", ")", ":", "with", "tf", ".", "gfile", ".", "GFile", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "return", "f", ".", "read", "(", ")", ".", "replace", "(", "\"\\n\"", ",", "\" %s \"", "%", "EOS", ")", ".", "split", "(", ")", "else", ":", "return", "f", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ".", "replace", "(", "\"\\n\"", ",", "\" %s \"", "%", "EOS", ")", ".", "split", "(", ")" ]
Reads words from a file.
[ "Reads", "words", "from", "a", "file", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/ptb.py#L39-L45
21,991
tensorflow/tensor2tensor
tensor2tensor/data_generators/ptb.py
_build_vocab
def _build_vocab(filename, vocab_path, vocab_size): """Reads a file to build a vocabulary of `vocab_size` most common words. The vocabulary is sorted by occurrence count and has one word per line. Originally from: https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/reader.py Args: filename: file to read list of words from. vocab_path: path where to save the vocabulary. vocab_size: size of the vocabulary to generate. """ data = _read_words(filename) counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) words = words[:vocab_size] with open(vocab_path, "w") as f: f.write("\n".join(words))
python
def _build_vocab(filename, vocab_path, vocab_size): """Reads a file to build a vocabulary of `vocab_size` most common words. The vocabulary is sorted by occurrence count and has one word per line. Originally from: https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/reader.py Args: filename: file to read list of words from. vocab_path: path where to save the vocabulary. vocab_size: size of the vocabulary to generate. """ data = _read_words(filename) counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) words = words[:vocab_size] with open(vocab_path, "w") as f: f.write("\n".join(words))
[ "def", "_build_vocab", "(", "filename", ",", "vocab_path", ",", "vocab_size", ")", ":", "data", "=", "_read_words", "(", "filename", ")", "counter", "=", "collections", ".", "Counter", "(", "data", ")", "count_pairs", "=", "sorted", "(", "counter", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "-", "x", "[", "1", "]", ",", "x", "[", "0", "]", ")", ")", "words", ",", "_", "=", "list", "(", "zip", "(", "*", "count_pairs", ")", ")", "words", "=", "words", "[", ":", "vocab_size", "]", "with", "open", "(", "vocab_path", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "words", ")", ")" ]
Reads a file to build a vocabulary of `vocab_size` most common words. The vocabulary is sorted by occurrence count and has one word per line. Originally from: https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/reader.py Args: filename: file to read list of words from. vocab_path: path where to save the vocabulary. vocab_size: size of the vocabulary to generate.
[ "Reads", "a", "file", "to", "build", "a", "vocabulary", "of", "vocab_size", "most", "common", "words", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/ptb.py#L48-L66
21,992
tensorflow/tensor2tensor
tensor2tensor/data_generators/ptb.py
_get_token_encoder
def _get_token_encoder(vocab_dir, vocab_name, filename): """Reads from file and returns a `TokenTextEncoder` for the vocabulary.""" vocab_path = os.path.join(vocab_dir, vocab_name) if not tf.gfile.Exists(vocab_path): _build_vocab(filename, vocab_path, 10000) return text_encoder.TokenTextEncoder(vocab_path)
python
def _get_token_encoder(vocab_dir, vocab_name, filename): """Reads from file and returns a `TokenTextEncoder` for the vocabulary.""" vocab_path = os.path.join(vocab_dir, vocab_name) if not tf.gfile.Exists(vocab_path): _build_vocab(filename, vocab_path, 10000) return text_encoder.TokenTextEncoder(vocab_path)
[ "def", "_get_token_encoder", "(", "vocab_dir", ",", "vocab_name", ",", "filename", ")", ":", "vocab_path", "=", "os", ".", "path", ".", "join", "(", "vocab_dir", ",", "vocab_name", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "vocab_path", ")", ":", "_build_vocab", "(", "filename", ",", "vocab_path", ",", "10000", ")", "return", "text_encoder", ".", "TokenTextEncoder", "(", "vocab_path", ")" ]
Reads from file and returns a `TokenTextEncoder` for the vocabulary.
[ "Reads", "from", "file", "and", "returns", "a", "TokenTextEncoder", "for", "the", "vocabulary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/ptb.py#L69-L74
21,993
tensorflow/tensor2tensor
tensor2tensor/visualization/attention.py
resize
def resize(att_mat, max_length=None): """Normalize attention matrices and reshape as necessary.""" for i, att in enumerate(att_mat): # Add extra batch dim for viz code to work. if att.ndim == 3: att = np.expand_dims(att, axis=0) if max_length is not None: # Sum across different attention values for each token. att = att[:, :, :max_length, :max_length] row_sums = np.sum(att, axis=2) # Normalize att /= row_sums[:, :, np.newaxis] att_mat[i] = att return att_mat
python
def resize(att_mat, max_length=None): """Normalize attention matrices and reshape as necessary.""" for i, att in enumerate(att_mat): # Add extra batch dim for viz code to work. if att.ndim == 3: att = np.expand_dims(att, axis=0) if max_length is not None: # Sum across different attention values for each token. att = att[:, :, :max_length, :max_length] row_sums = np.sum(att, axis=2) # Normalize att /= row_sums[:, :, np.newaxis] att_mat[i] = att return att_mat
[ "def", "resize", "(", "att_mat", ",", "max_length", "=", "None", ")", ":", "for", "i", ",", "att", "in", "enumerate", "(", "att_mat", ")", ":", "# Add extra batch dim for viz code to work.", "if", "att", ".", "ndim", "==", "3", ":", "att", "=", "np", ".", "expand_dims", "(", "att", ",", "axis", "=", "0", ")", "if", "max_length", "is", "not", "None", ":", "# Sum across different attention values for each token.", "att", "=", "att", "[", ":", ",", ":", ",", ":", "max_length", ",", ":", "max_length", "]", "row_sums", "=", "np", ".", "sum", "(", "att", ",", "axis", "=", "2", ")", "# Normalize", "att", "/=", "row_sums", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", "att_mat", "[", "i", "]", "=", "att", "return", "att_mat" ]
Normalize attention matrices and reshape as necessary.
[ "Normalize", "attention", "matrices", "and", "reshape", "as", "necessary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/attention.py#L62-L75
21,994
tensorflow/tensor2tensor
tensor2tensor/visualization/attention.py
_get_attention
def _get_attention(inp_text, out_text, enc_atts, dec_atts, encdec_atts): """Compute representation of the attention ready for the d3 visualization. Args: inp_text: list of strings, words to be displayed on the left of the vis out_text: list of strings, words to be displayed on the right of the vis enc_atts: numpy array, encoder self-attentions [num_layers, batch_size, num_heads, enc_length, enc_length] dec_atts: numpy array, decoder self-attentions [num_layers, batch_size, num_heads, dec_length, dec_length] encdec_atts: numpy array, encoder-decoder attentions [num_layers, batch_size, num_heads, dec_length, enc_length] Returns: Dictionary of attention representations with the structure: { 'all': Representations for showing all attentions at the same time. 'inp_inp': Representations for showing encoder self-attentions 'inp_out': Representations for showing encoder-decoder attentions 'out_out': Representations for showing decoder self-attentions } and each sub-dictionary has structure: { 'att': list of inter attentions matrices, one for each attention head 'top_text': list of strings, words to be displayed on the left of the vis 'bot_text': list of strings, words to be displayed on the right of the vis } """ def get_full_attention(layer): """Get the full input+output - input+output attentions.""" enc_att = enc_atts[layer][0] dec_att = dec_atts[layer][0] encdec_att = encdec_atts[layer][0] enc_att = np.transpose(enc_att, [0, 2, 1]) dec_att = np.transpose(dec_att, [0, 2, 1]) encdec_att = np.transpose(encdec_att, [0, 2, 1]) # [heads, query_length, memory_length] enc_length = enc_att.shape[1] dec_length = dec_att.shape[1] num_heads = enc_att.shape[0] first = np.concatenate([enc_att, encdec_att], axis=2) second = np.concatenate( [np.zeros((num_heads, dec_length, enc_length)), dec_att], axis=2) full_att = np.concatenate([first, second], axis=1) return [ha.T.tolist() for ha in full_att] def get_inp_inp_attention(layer): att = np.transpose(enc_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_out_inp_attention(layer): att = np.transpose(encdec_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_out_out_attention(layer): att = np.transpose(dec_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_attentions(get_attention_fn): num_layers = len(enc_atts) return [get_attention_fn(i) for i in range(num_layers)] attentions = { 'all': { 'att': get_attentions(get_full_attention), 'top_text': inp_text + out_text, 'bot_text': inp_text + out_text, }, 'inp_inp': { 'att': get_attentions(get_inp_inp_attention), 'top_text': inp_text, 'bot_text': inp_text, }, 'inp_out': { 'att': get_attentions(get_out_inp_attention), 'top_text': inp_text, 'bot_text': out_text, }, 'out_out': { 'att': get_attentions(get_out_out_attention), 'top_text': out_text, 'bot_text': out_text, }, } return attentions
python
def _get_attention(inp_text, out_text, enc_atts, dec_atts, encdec_atts): """Compute representation of the attention ready for the d3 visualization. Args: inp_text: list of strings, words to be displayed on the left of the vis out_text: list of strings, words to be displayed on the right of the vis enc_atts: numpy array, encoder self-attentions [num_layers, batch_size, num_heads, enc_length, enc_length] dec_atts: numpy array, decoder self-attentions [num_layers, batch_size, num_heads, dec_length, dec_length] encdec_atts: numpy array, encoder-decoder attentions [num_layers, batch_size, num_heads, dec_length, enc_length] Returns: Dictionary of attention representations with the structure: { 'all': Representations for showing all attentions at the same time. 'inp_inp': Representations for showing encoder self-attentions 'inp_out': Representations for showing encoder-decoder attentions 'out_out': Representations for showing decoder self-attentions } and each sub-dictionary has structure: { 'att': list of inter attentions matrices, one for each attention head 'top_text': list of strings, words to be displayed on the left of the vis 'bot_text': list of strings, words to be displayed on the right of the vis } """ def get_full_attention(layer): """Get the full input+output - input+output attentions.""" enc_att = enc_atts[layer][0] dec_att = dec_atts[layer][0] encdec_att = encdec_atts[layer][0] enc_att = np.transpose(enc_att, [0, 2, 1]) dec_att = np.transpose(dec_att, [0, 2, 1]) encdec_att = np.transpose(encdec_att, [0, 2, 1]) # [heads, query_length, memory_length] enc_length = enc_att.shape[1] dec_length = dec_att.shape[1] num_heads = enc_att.shape[0] first = np.concatenate([enc_att, encdec_att], axis=2) second = np.concatenate( [np.zeros((num_heads, dec_length, enc_length)), dec_att], axis=2) full_att = np.concatenate([first, second], axis=1) return [ha.T.tolist() for ha in full_att] def get_inp_inp_attention(layer): att = np.transpose(enc_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_out_inp_attention(layer): att = np.transpose(encdec_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_out_out_attention(layer): att = np.transpose(dec_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_attentions(get_attention_fn): num_layers = len(enc_atts) return [get_attention_fn(i) for i in range(num_layers)] attentions = { 'all': { 'att': get_attentions(get_full_attention), 'top_text': inp_text + out_text, 'bot_text': inp_text + out_text, }, 'inp_inp': { 'att': get_attentions(get_inp_inp_attention), 'top_text': inp_text, 'bot_text': inp_text, }, 'inp_out': { 'att': get_attentions(get_out_inp_attention), 'top_text': inp_text, 'bot_text': out_text, }, 'out_out': { 'att': get_attentions(get_out_out_attention), 'top_text': out_text, 'bot_text': out_text, }, } return attentions
[ "def", "_get_attention", "(", "inp_text", ",", "out_text", ",", "enc_atts", ",", "dec_atts", ",", "encdec_atts", ")", ":", "def", "get_full_attention", "(", "layer", ")", ":", "\"\"\"Get the full input+output - input+output attentions.\"\"\"", "enc_att", "=", "enc_atts", "[", "layer", "]", "[", "0", "]", "dec_att", "=", "dec_atts", "[", "layer", "]", "[", "0", "]", "encdec_att", "=", "encdec_atts", "[", "layer", "]", "[", "0", "]", "enc_att", "=", "np", ".", "transpose", "(", "enc_att", ",", "[", "0", ",", "2", ",", "1", "]", ")", "dec_att", "=", "np", ".", "transpose", "(", "dec_att", ",", "[", "0", ",", "2", ",", "1", "]", ")", "encdec_att", "=", "np", ".", "transpose", "(", "encdec_att", ",", "[", "0", ",", "2", ",", "1", "]", ")", "# [heads, query_length, memory_length]", "enc_length", "=", "enc_att", ".", "shape", "[", "1", "]", "dec_length", "=", "dec_att", ".", "shape", "[", "1", "]", "num_heads", "=", "enc_att", ".", "shape", "[", "0", "]", "first", "=", "np", ".", "concatenate", "(", "[", "enc_att", ",", "encdec_att", "]", ",", "axis", "=", "2", ")", "second", "=", "np", ".", "concatenate", "(", "[", "np", ".", "zeros", "(", "(", "num_heads", ",", "dec_length", ",", "enc_length", ")", ")", ",", "dec_att", "]", ",", "axis", "=", "2", ")", "full_att", "=", "np", ".", "concatenate", "(", "[", "first", ",", "second", "]", ",", "axis", "=", "1", ")", "return", "[", "ha", ".", "T", ".", "tolist", "(", ")", "for", "ha", "in", "full_att", "]", "def", "get_inp_inp_attention", "(", "layer", ")", ":", "att", "=", "np", ".", "transpose", "(", "enc_atts", "[", "layer", "]", "[", "0", "]", ",", "(", "0", ",", "2", ",", "1", ")", ")", "return", "[", "ha", ".", "T", ".", "tolist", "(", ")", "for", "ha", "in", "att", "]", "def", "get_out_inp_attention", "(", "layer", ")", ":", "att", "=", "np", ".", "transpose", "(", "encdec_atts", "[", "layer", "]", "[", "0", "]", ",", "(", "0", ",", "2", ",", "1", ")", ")", "return", "[", "ha", ".", "T", ".", "tolist", "(", ")", "for", "ha", "in", "att", "]", "def", "get_out_out_attention", "(", "layer", ")", ":", "att", "=", "np", ".", "transpose", "(", "dec_atts", "[", "layer", "]", "[", "0", "]", ",", "(", "0", ",", "2", ",", "1", ")", ")", "return", "[", "ha", ".", "T", ".", "tolist", "(", ")", "for", "ha", "in", "att", "]", "def", "get_attentions", "(", "get_attention_fn", ")", ":", "num_layers", "=", "len", "(", "enc_atts", ")", "return", "[", "get_attention_fn", "(", "i", ")", "for", "i", "in", "range", "(", "num_layers", ")", "]", "attentions", "=", "{", "'all'", ":", "{", "'att'", ":", "get_attentions", "(", "get_full_attention", ")", ",", "'top_text'", ":", "inp_text", "+", "out_text", ",", "'bot_text'", ":", "inp_text", "+", "out_text", ",", "}", ",", "'inp_inp'", ":", "{", "'att'", ":", "get_attentions", "(", "get_inp_inp_attention", ")", ",", "'top_text'", ":", "inp_text", ",", "'bot_text'", ":", "inp_text", ",", "}", ",", "'inp_out'", ":", "{", "'att'", ":", "get_attentions", "(", "get_out_inp_attention", ")", ",", "'top_text'", ":", "inp_text", ",", "'bot_text'", ":", "out_text", ",", "}", ",", "'out_out'", ":", "{", "'att'", ":", "get_attentions", "(", "get_out_out_attention", ")", ",", "'top_text'", ":", "out_text", ",", "'bot_text'", ":", "out_text", ",", "}", ",", "}", "return", "attentions" ]
Compute representation of the attention ready for the d3 visualization. Args: inp_text: list of strings, words to be displayed on the left of the vis out_text: list of strings, words to be displayed on the right of the vis enc_atts: numpy array, encoder self-attentions [num_layers, batch_size, num_heads, enc_length, enc_length] dec_atts: numpy array, decoder self-attentions [num_layers, batch_size, num_heads, dec_length, dec_length] encdec_atts: numpy array, encoder-decoder attentions [num_layers, batch_size, num_heads, dec_length, enc_length] Returns: Dictionary of attention representations with the structure: { 'all': Representations for showing all attentions at the same time. 'inp_inp': Representations for showing encoder self-attentions 'inp_out': Representations for showing encoder-decoder attentions 'out_out': Representations for showing decoder self-attentions } and each sub-dictionary has structure: { 'att': list of inter attentions matrices, one for each attention head 'top_text': list of strings, words to be displayed on the left of the vis 'bot_text': list of strings, words to be displayed on the right of the vis }
[ "Compute", "representation", "of", "the", "attention", "ready", "for", "the", "d3", "visualization", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/attention.py#L78-L163
21,995
tensorflow/tensor2tensor
tensor2tensor/data_generators/tokenizer.py
decode
def decode(tokens): """Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string """ token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens] ret = [] for i, token in enumerate(tokens): if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]: ret.append(u" ") ret.append(token) return "".join(ret)
python
def decode(tokens): """Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string """ token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens] ret = [] for i, token in enumerate(tokens): if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]: ret.append(u" ") ret.append(token) return "".join(ret)
[ "def", "decode", "(", "tokens", ")", ":", "token_is_alnum", "=", "[", "t", "[", "0", "]", "in", "_ALPHANUMERIC_CHAR_SET", "for", "t", "in", "tokens", "]", "ret", "=", "[", "]", "for", "i", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "if", "i", ">", "0", "and", "token_is_alnum", "[", "i", "-", "1", "]", "and", "token_is_alnum", "[", "i", "]", ":", "ret", ".", "append", "(", "u\" \"", ")", "ret", ".", "append", "(", "token", ")", "return", "\"\"", ".", "join", "(", "ret", ")" ]
Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string
[ "Decode", "a", "list", "of", "tokens", "to", "a", "unicode", "string", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/tokenizer.py#L91-L105
21,996
tensorflow/tensor2tensor
tensor2tensor/data_generators/tokenizer.py
_read_filepattern
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True): """Reads files matching a wildcard pattern, yielding the contents. Args: filepattern: A wildcard pattern matching one or more files. max_lines: If set, stop reading after reading this many lines. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Yields: The contents of the files as lines, if split_on_newlines is True, or the entire contents of each file if False. """ filenames = sorted(tf.gfile.Glob(filepattern)) lines_read = 0 for filename in filenames: with tf.gfile.Open(filename) as f: if split_on_newlines: for line in f: yield line.strip() lines_read += 1 if max_lines and lines_read >= max_lines: return else: if max_lines: doc = [] for line in f: doc.append(line) lines_read += 1 if max_lines and lines_read >= max_lines: yield "".join(doc) return yield "".join(doc) else: yield f.read()
python
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True): """Reads files matching a wildcard pattern, yielding the contents. Args: filepattern: A wildcard pattern matching one or more files. max_lines: If set, stop reading after reading this many lines. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Yields: The contents of the files as lines, if split_on_newlines is True, or the entire contents of each file if False. """ filenames = sorted(tf.gfile.Glob(filepattern)) lines_read = 0 for filename in filenames: with tf.gfile.Open(filename) as f: if split_on_newlines: for line in f: yield line.strip() lines_read += 1 if max_lines and lines_read >= max_lines: return else: if max_lines: doc = [] for line in f: doc.append(line) lines_read += 1 if max_lines and lines_read >= max_lines: yield "".join(doc) return yield "".join(doc) else: yield f.read()
[ "def", "_read_filepattern", "(", "filepattern", ",", "max_lines", "=", "None", ",", "split_on_newlines", "=", "True", ")", ":", "filenames", "=", "sorted", "(", "tf", ".", "gfile", ".", "Glob", "(", "filepattern", ")", ")", "lines_read", "=", "0", "for", "filename", "in", "filenames", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", "as", "f", ":", "if", "split_on_newlines", ":", "for", "line", "in", "f", ":", "yield", "line", ".", "strip", "(", ")", "lines_read", "+=", "1", "if", "max_lines", "and", "lines_read", ">=", "max_lines", ":", "return", "else", ":", "if", "max_lines", ":", "doc", "=", "[", "]", "for", "line", "in", "f", ":", "doc", ".", "append", "(", "line", ")", "lines_read", "+=", "1", "if", "max_lines", "and", "lines_read", ">=", "max_lines", ":", "yield", "\"\"", ".", "join", "(", "doc", ")", "return", "yield", "\"\"", ".", "join", "(", "doc", ")", "else", ":", "yield", "f", ".", "read", "(", ")" ]
Reads files matching a wildcard pattern, yielding the contents. Args: filepattern: A wildcard pattern matching one or more files. max_lines: If set, stop reading after reading this many lines. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Yields: The contents of the files as lines, if split_on_newlines is True, or the entire contents of each file if False.
[ "Reads", "files", "matching", "a", "wildcard", "pattern", "yielding", "the", "contents", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/tokenizer.py#L108-L145
21,997
tensorflow/tensor2tensor
tensor2tensor/data_generators/tokenizer.py
corpus_token_counts
def corpus_token_counts( text_filepattern, corpus_max_lines, split_on_newlines=True): """Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count. """ counts = collections.Counter() for doc in _read_filepattern( text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines): counts.update(encode(_native_to_unicode(doc))) mlperf_log.transformer_print( key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts)) return counts
python
def corpus_token_counts( text_filepattern, corpus_max_lines, split_on_newlines=True): """Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count. """ counts = collections.Counter() for doc in _read_filepattern( text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines): counts.update(encode(_native_to_unicode(doc))) mlperf_log.transformer_print( key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts)) return counts
[ "def", "corpus_token_counts", "(", "text_filepattern", ",", "corpus_max_lines", ",", "split_on_newlines", "=", "True", ")", ":", "counts", "=", "collections", ".", "Counter", "(", ")", "for", "doc", "in", "_read_filepattern", "(", "text_filepattern", ",", "max_lines", "=", "corpus_max_lines", ",", "split_on_newlines", "=", "split_on_newlines", ")", ":", "counts", ".", "update", "(", "encode", "(", "_native_to_unicode", "(", "doc", ")", ")", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "PREPROC_VOCAB_SIZE", ",", "value", "=", "len", "(", "counts", ")", ")", "return", "counts" ]
Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count.
[ "Read", "the", "corpus", "and", "compute", "a", "dictionary", "of", "token", "counts", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/tokenizer.py#L148-L171
21,998
tensorflow/tensor2tensor
tensor2tensor/data_generators/tokenizer.py
vocab_token_counts
def vocab_token_counts(text_filepattern, max_lines): """Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count. """ ret = {} for i, line in enumerate( _read_filepattern(text_filepattern, max_lines=max_lines)): if "," not in line: tf.logging.warning("Malformed vocab line #%d '%s'", i, line) continue token, count = line.rsplit(",", 1) ret[_native_to_unicode(token)] = int(count) return ret
python
def vocab_token_counts(text_filepattern, max_lines): """Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count. """ ret = {} for i, line in enumerate( _read_filepattern(text_filepattern, max_lines=max_lines)): if "," not in line: tf.logging.warning("Malformed vocab line #%d '%s'", i, line) continue token, count = line.rsplit(",", 1) ret[_native_to_unicode(token)] = int(count) return ret
[ "def", "vocab_token_counts", "(", "text_filepattern", ",", "max_lines", ")", ":", "ret", "=", "{", "}", "for", "i", ",", "line", "in", "enumerate", "(", "_read_filepattern", "(", "text_filepattern", ",", "max_lines", "=", "max_lines", ")", ")", ":", "if", "\",\"", "not", "in", "line", ":", "tf", ".", "logging", ".", "warning", "(", "\"Malformed vocab line #%d '%s'\"", ",", "i", ",", "line", ")", "continue", "token", ",", "count", "=", "line", ".", "rsplit", "(", "\",\"", ",", "1", ")", "ret", "[", "_native_to_unicode", "(", "token", ")", "]", "=", "int", "(", "count", ")", "return", "ret" ]
Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count.
[ "Read", "a", "vocab", "file", "and", "return", "a", "dictionary", "of", "token", "counts", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/tokenizer.py#L174-L197
21,999
tensorflow/tensor2tensor
tensor2tensor/serving/serving_utils.py
_make_example
def _make_example(input_ids, problem, input_feature_name="inputs"): """Make a tf.train.Example for the problem. features[input_feature_name] = input_ids Also fills in any other required features with dummy values. Args: input_ids: list<int>. problem: Problem. input_feature_name: name of feature for input_ids. Returns: tf.train.Example """ features = { input_feature_name: tf.train.Feature(int64_list=tf.train.Int64List(value=input_ids)) } # Fill in dummy values for any other required features that presumably # will not actually be used for prediction. data_fields, _ = problem.example_reading_spec() for fname, ftype in data_fields.items(): if fname == input_feature_name: continue if not isinstance(ftype, tf.FixedLenFeature): # Only FixedLenFeatures are required continue if ftype.default_value is not None: # If there's a default value, no need to fill it in continue num_elements = functools.reduce(lambda acc, el: acc * el, ftype.shape, 1) if ftype.dtype in [tf.int32, tf.int64]: value = tf.train.Feature( int64_list=tf.train.Int64List(value=[0] * num_elements)) if ftype.dtype in [tf.float32, tf.float64]: value = tf.train.Feature( float_list=tf.train.FloatList(value=[0.] * num_elements)) if ftype.dtype == tf.bytes: value = tf.train.Feature( bytes_list=tf.train.BytesList(value=[""] * num_elements)) tf.logging.info("Adding dummy value for feature %s as it is required by " "the Problem.", fname) features[fname] = value return tf.train.Example(features=tf.train.Features(feature=features))
python
def _make_example(input_ids, problem, input_feature_name="inputs"): """Make a tf.train.Example for the problem. features[input_feature_name] = input_ids Also fills in any other required features with dummy values. Args: input_ids: list<int>. problem: Problem. input_feature_name: name of feature for input_ids. Returns: tf.train.Example """ features = { input_feature_name: tf.train.Feature(int64_list=tf.train.Int64List(value=input_ids)) } # Fill in dummy values for any other required features that presumably # will not actually be used for prediction. data_fields, _ = problem.example_reading_spec() for fname, ftype in data_fields.items(): if fname == input_feature_name: continue if not isinstance(ftype, tf.FixedLenFeature): # Only FixedLenFeatures are required continue if ftype.default_value is not None: # If there's a default value, no need to fill it in continue num_elements = functools.reduce(lambda acc, el: acc * el, ftype.shape, 1) if ftype.dtype in [tf.int32, tf.int64]: value = tf.train.Feature( int64_list=tf.train.Int64List(value=[0] * num_elements)) if ftype.dtype in [tf.float32, tf.float64]: value = tf.train.Feature( float_list=tf.train.FloatList(value=[0.] * num_elements)) if ftype.dtype == tf.bytes: value = tf.train.Feature( bytes_list=tf.train.BytesList(value=[""] * num_elements)) tf.logging.info("Adding dummy value for feature %s as it is required by " "the Problem.", fname) features[fname] = value return tf.train.Example(features=tf.train.Features(feature=features))
[ "def", "_make_example", "(", "input_ids", ",", "problem", ",", "input_feature_name", "=", "\"inputs\"", ")", ":", "features", "=", "{", "input_feature_name", ":", "tf", ".", "train", ".", "Feature", "(", "int64_list", "=", "tf", ".", "train", ".", "Int64List", "(", "value", "=", "input_ids", ")", ")", "}", "# Fill in dummy values for any other required features that presumably", "# will not actually be used for prediction.", "data_fields", ",", "_", "=", "problem", ".", "example_reading_spec", "(", ")", "for", "fname", ",", "ftype", "in", "data_fields", ".", "items", "(", ")", ":", "if", "fname", "==", "input_feature_name", ":", "continue", "if", "not", "isinstance", "(", "ftype", ",", "tf", ".", "FixedLenFeature", ")", ":", "# Only FixedLenFeatures are required", "continue", "if", "ftype", ".", "default_value", "is", "not", "None", ":", "# If there's a default value, no need to fill it in", "continue", "num_elements", "=", "functools", ".", "reduce", "(", "lambda", "acc", ",", "el", ":", "acc", "*", "el", ",", "ftype", ".", "shape", ",", "1", ")", "if", "ftype", ".", "dtype", "in", "[", "tf", ".", "int32", ",", "tf", ".", "int64", "]", ":", "value", "=", "tf", ".", "train", ".", "Feature", "(", "int64_list", "=", "tf", ".", "train", ".", "Int64List", "(", "value", "=", "[", "0", "]", "*", "num_elements", ")", ")", "if", "ftype", ".", "dtype", "in", "[", "tf", ".", "float32", ",", "tf", ".", "float64", "]", ":", "value", "=", "tf", ".", "train", ".", "Feature", "(", "float_list", "=", "tf", ".", "train", ".", "FloatList", "(", "value", "=", "[", "0.", "]", "*", "num_elements", ")", ")", "if", "ftype", ".", "dtype", "==", "tf", ".", "bytes", ":", "value", "=", "tf", ".", "train", ".", "Feature", "(", "bytes_list", "=", "tf", ".", "train", ".", "BytesList", "(", "value", "=", "[", "\"\"", "]", "*", "num_elements", ")", ")", "tf", ".", "logging", ".", "info", "(", "\"Adding dummy value for feature %s as it is required by \"", "\"the Problem.\"", ",", "fname", ")", "features", "[", "fname", "]", "=", "value", "return", "tf", ".", "train", ".", "Example", "(", "features", "=", "tf", ".", "train", ".", "Features", "(", "feature", "=", "features", ")", ")" ]
Make a tf.train.Example for the problem. features[input_feature_name] = input_ids Also fills in any other required features with dummy values. Args: input_ids: list<int>. problem: Problem. input_feature_name: name of feature for input_ids. Returns: tf.train.Example
[ "Make", "a", "tf", ".", "train", ".", "Example", "for", "the", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/serving_utils.py#L36-L81