partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
restore_state
Restore State.
tensor2tensor/trax/trax.py
def restore_state(output_dir): """Restore State.""" params_file = os.path.join(output_dir, "model.pkl") if not gfile.exists(params_file): return State(step=None, params=None, history=trax_history.History()) with gfile.GFile(params_file, "rb") as f: (params, step, history) = pickle.load(f) log("Model loaded from %s at step %d" % (params_file, step)) logging.debug("From loaded model : history = %s", history) return State(step=step, params=params, history=history)
def restore_state(output_dir): """Restore State.""" params_file = os.path.join(output_dir, "model.pkl") if not gfile.exists(params_file): return State(step=None, params=None, history=trax_history.History()) with gfile.GFile(params_file, "rb") as f: (params, step, history) = pickle.load(f) log("Model loaded from %s at step %d" % (params_file, step)) logging.debug("From loaded model : history = %s", history) return State(step=step, params=params, history=history)
[ "Restore", "State", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L129-L139
[ "def", "restore_state", "(", "output_dir", ")", ":", "params_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model.pkl\"", ")", "if", "not", "gfile", ".", "exists", "(", "params_file", ")", ":", "return", "State", "(", "step", "=",...
272500b6efe353aeb638d2745ed56e519462ca31
train
save_state
Save State and optionally gin config.
tensor2tensor/trax/trax.py
def save_state(state, output_dir, keep=False): """Save State and optionally gin config.""" params_file = os.path.join(output_dir, "model.pkl") with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) if keep: params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step)) with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) log("Model saved to %s" % params_file, stdout=False)
def save_state(state, output_dir, keep=False): """Save State and optionally gin config.""" params_file = os.path.join(output_dir, "model.pkl") with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) if keep: params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step)) with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) log("Model saved to %s" % params_file, stdout=False)
[ "Save", "State", "and", "optionally", "gin", "config", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L152-L161
[ "def", "save_state", "(", "state", ",", "output_dir", ",", "keep", "=", "False", ")", ":", "params_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model.pkl\"", ")", "with", "gfile", ".", "GFile", "(", "params_file", ",", "\"wb\"",...
272500b6efe353aeb638d2745ed56e519462ca31
train
evaluate_train_and_eval
Evalaute on train and eval data, and log metrics.
tensor2tensor/trax/trax.py
def evaluate_train_and_eval(step, inputs, predict_fun, eval_steps, rng, train_sw=None, eval_sw=None, history=None): """Evalaute on train and eval data, and log metrics.""" step_log(step, "Evaluation") train_metrics, eval_metrics = [ evaluate( # pylint: disable=g-complex-comprehension itertools.islice(input_stream(), eval_steps), predict_fun, _METRICS, rng) for input_stream in [inputs.train_eval_stream, inputs.eval_stream]] if train_sw: log_metrics(train_metrics, train_sw, "train", step, history=history) if eval_sw: log_metrics(eval_metrics, eval_sw, "eval", step, history=history) step_log(step, "Finished evaluation") return train_metrics, eval_metrics
def evaluate_train_and_eval(step, inputs, predict_fun, eval_steps, rng, train_sw=None, eval_sw=None, history=None): """Evalaute on train and eval data, and log metrics.""" step_log(step, "Evaluation") train_metrics, eval_metrics = [ evaluate( # pylint: disable=g-complex-comprehension itertools.islice(input_stream(), eval_steps), predict_fun, _METRICS, rng) for input_stream in [inputs.train_eval_stream, inputs.eval_stream]] if train_sw: log_metrics(train_metrics, train_sw, "train", step, history=history) if eval_sw: log_metrics(eval_metrics, eval_sw, "eval", step, history=history) step_log(step, "Finished evaluation") return train_metrics, eval_metrics
[ "Evalaute", "on", "train", "and", "eval", "data", "and", "log", "metrics", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L172-L189
[ "def", "evaluate_train_and_eval", "(", "step", ",", "inputs", ",", "predict_fun", ",", "eval_steps", ",", "rng", ",", "train_sw", "=", "None", ",", "eval_sw", "=", "None", ",", "history", "=", "None", ")", ":", "step_log", "(", "step", ",", "\"Evaluation\"...
272500b6efe353aeb638d2745ed56e519462ca31
train
evaluate
Evaluate. Args: inputs_stream: iterable of inputs to evaluate on. predict_fun: function from inputs to predictions. params should already be partially applied. metric_funs: dict from metric name to metric function, which takes inputs and predictions and returns a scalar metric value. rng: random number generator. Returns: metrics: dict from metric name to metric value averaged over the number of inputs.
tensor2tensor/trax/trax.py
def evaluate(inputs_stream, predict_fun, metric_funs, rng): """Evaluate. Args: inputs_stream: iterable of inputs to evaluate on. predict_fun: function from inputs to predictions. params should already be partially applied. metric_funs: dict from metric name to metric function, which takes inputs and predictions and returns a scalar metric value. rng: random number generator. Returns: metrics: dict from metric name to metric value averaged over the number of inputs. """ metrics = collections.defaultdict(float) count = 0 for inp in inputs_stream: count += 1 rng, subrng = jax_random.split(rng) preds = predict_fun(inp[0], rng=subrng) for m, f in six.iteritems(metric_funs): metrics[m] += f(inp, preds) return {m: v / count for (m, v) in six.iteritems(metrics)}
def evaluate(inputs_stream, predict_fun, metric_funs, rng): """Evaluate. Args: inputs_stream: iterable of inputs to evaluate on. predict_fun: function from inputs to predictions. params should already be partially applied. metric_funs: dict from metric name to metric function, which takes inputs and predictions and returns a scalar metric value. rng: random number generator. Returns: metrics: dict from metric name to metric value averaged over the number of inputs. """ metrics = collections.defaultdict(float) count = 0 for inp in inputs_stream: count += 1 rng, subrng = jax_random.split(rng) preds = predict_fun(inp[0], rng=subrng) for m, f in six.iteritems(metric_funs): metrics[m] += f(inp, preds) return {m: v / count for (m, v) in six.iteritems(metrics)}
[ "Evaluate", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L192-L215
[ "def", "evaluate", "(", "inputs_stream", ",", "predict_fun", ",", "metric_funs", ",", "rng", ")", ":", "metrics", "=", "collections", ".", "defaultdict", "(", "float", ")", "count", "=", "0", "for", "inp", "in", "inputs_stream", ":", "count", "+=", "1", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
log_metrics
Log metrics to summary writer and history.
tensor2tensor/trax/trax.py
def log_metrics(metrics, summ_writer, log_prefix, step, history=None): """Log metrics to summary writer and history.""" rjust_len = max([len(name) for name in metrics]) for name, value in six.iteritems(metrics): step_log(step, "%s %s | % .8f" % ( log_prefix.ljust(5), name.rjust(rjust_len), value)) full_name = "metrics/" + name if history: history.append(log_prefix, full_name, step, value) if summ_writer: summ_writer.scalar(full_name, value, step)
def log_metrics(metrics, summ_writer, log_prefix, step, history=None): """Log metrics to summary writer and history.""" rjust_len = max([len(name) for name in metrics]) for name, value in six.iteritems(metrics): step_log(step, "%s %s | % .8f" % ( log_prefix.ljust(5), name.rjust(rjust_len), value)) full_name = "metrics/" + name if history: history.append(log_prefix, full_name, step, value) if summ_writer: summ_writer.scalar(full_name, value, step)
[ "Log", "metrics", "to", "summary", "writer", "and", "history", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L218-L228
[ "def", "log_metrics", "(", "metrics", ",", "summ_writer", ",", "log_prefix", ",", "step", ",", "history", "=", "None", ")", ":", "rjust_len", "=", "max", "(", "[", "len", "(", "name", ")", "for", "name", "in", "metrics", "]", ")", "for", "name", ",",...
272500b6efe353aeb638d2745ed56e519462ca31
train
get_random_number_generator_and_set_seed
Get a JAX random number generator and set random seed everywhere.
tensor2tensor/trax/trax.py
def get_random_number_generator_and_set_seed(seed=None): """Get a JAX random number generator and set random seed everywhere.""" random.seed(seed) # While python random accepts None as seed and uses time/os seed then, # some other functions expect integers so we create one here. if seed is None: seed = random.randint(0, 2**31 - 1) tf.set_random_seed(seed) numpy.random.seed(seed) return jax_random.get_prng(seed)
def get_random_number_generator_and_set_seed(seed=None): """Get a JAX random number generator and set random seed everywhere.""" random.seed(seed) # While python random accepts None as seed and uses time/os seed then, # some other functions expect integers so we create one here. if seed is None: seed = random.randint(0, 2**31 - 1) tf.set_random_seed(seed) numpy.random.seed(seed) return jax_random.get_prng(seed)
[ "Get", "a", "JAX", "random", "number", "generator", "and", "set", "random", "seed", "everywhere", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L231-L240
[ "def", "get_random_number_generator_and_set_seed", "(", "seed", "=", "None", ")", ":", "random", ".", "seed", "(", "seed", ")", "# While python random accepts None as seed and uses time/os seed then,", "# some other functions expect integers so we create one here.", "if", "seed", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
epochs
Iterator over epochs until steps is reached. 1-indexed. Args: steps: int, total number of steps. Infinite if None. epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to enable variable length epochs. Yields: (epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
tensor2tensor/trax/trax.py
def epochs(steps=None, epoch_steps=1): """Iterator over epochs until steps is reached. 1-indexed. Args: steps: int, total number of steps. Infinite if None. epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to enable variable length epochs. Yields: (epoch: int, epoch id, epoch_steps: int, number of steps in this epoch) """ try: iter(epoch_steps) except TypeError: epoch_steps = itertools.repeat(epoch_steps) step = 0 for epoch, epoch_steps in enumerate(epoch_steps): epoch_steps = min(epoch_steps, steps - step) yield (epoch + 1, epoch_steps) step += epoch_steps if steps and step >= steps: break
def epochs(steps=None, epoch_steps=1): """Iterator over epochs until steps is reached. 1-indexed. Args: steps: int, total number of steps. Infinite if None. epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to enable variable length epochs. Yields: (epoch: int, epoch id, epoch_steps: int, number of steps in this epoch) """ try: iter(epoch_steps) except TypeError: epoch_steps = itertools.repeat(epoch_steps) step = 0 for epoch, epoch_steps in enumerate(epoch_steps): epoch_steps = min(epoch_steps, steps - step) yield (epoch + 1, epoch_steps) step += epoch_steps if steps and step >= steps: break
[ "Iterator", "over", "epochs", "until", "steps", "is", "reached", ".", "1", "-", "indexed", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L255-L277
[ "def", "epochs", "(", "steps", "=", "None", ",", "epoch_steps", "=", "1", ")", ":", "try", ":", "iter", "(", "epoch_steps", ")", "except", "TypeError", ":", "epoch_steps", "=", "itertools", ".", "repeat", "(", "epoch_steps", ")", "step", "=", "0", "for...
272500b6efe353aeb638d2745ed56e519462ca31
train
_jit_predict_fun
Use jit on model_predict if required.
tensor2tensor/trax/trax.py
def _jit_predict_fun(model_predict, num_devices): """Use jit on model_predict if required.""" def predict(x, params=(), rng=None): """Predict function jited and parallelized as requested.""" # On one device, jit and run. if num_devices == 1: return backend.jit(model_predict)(x, params, rng=rng) # Multi-devices, pmap and run. @functools.partial(backend.pmap, axis_name="batch") def mapped_predict(x, params, rng): return model_predict(x, params, rng=rng) pred = mapped_predict( reshape_by_device(x, num_devices), params, jax_random.split(rng, num_devices)) # Need to reduce the [device, per-device-batch, ...] tensors back to # a [batch, ...] tensor. The tensors may be nested. if not isinstance(x, (list, tuple)): # Not nested. batch_size = x.shape[0] return np.reshape(pred, [batch_size] + list(pred.shape[2:])) batch_size = x[0].shape[0] return [np.reshape(p, [batch_size] + list(p.shape[2:])) for p in pred] return predict
def _jit_predict_fun(model_predict, num_devices): """Use jit on model_predict if required.""" def predict(x, params=(), rng=None): """Predict function jited and parallelized as requested.""" # On one device, jit and run. if num_devices == 1: return backend.jit(model_predict)(x, params, rng=rng) # Multi-devices, pmap and run. @functools.partial(backend.pmap, axis_name="batch") def mapped_predict(x, params, rng): return model_predict(x, params, rng=rng) pred = mapped_predict( reshape_by_device(x, num_devices), params, jax_random.split(rng, num_devices)) # Need to reduce the [device, per-device-batch, ...] tensors back to # a [batch, ...] tensor. The tensors may be nested. if not isinstance(x, (list, tuple)): # Not nested. batch_size = x.shape[0] return np.reshape(pred, [batch_size] + list(pred.shape[2:])) batch_size = x[0].shape[0] return [np.reshape(p, [batch_size] + list(p.shape[2:])) for p in pred] return predict
[ "Use", "jit", "on", "model_predict", "if", "required", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L280-L304
[ "def", "_jit_predict_fun", "(", "model_predict", ",", "num_devices", ")", ":", "def", "predict", "(", "x", ",", "params", "=", "(", ")", ",", "rng", "=", "None", ")", ":", "\"\"\"Predict function jited and parallelized as requested.\"\"\"", "# On one device, jit and r...
272500b6efe353aeb638d2745ed56e519462ca31
train
_jit_update_fun
Get jit-ed update function for loss, optimizer, learning rate function.
tensor2tensor/trax/trax.py
def _jit_update_fun(predict_fun, loss_fun, optimizer, lr_fun, num_devices): """Get jit-ed update function for loss, optimizer, learning rate function.""" if num_devices == 1: # TODO(lukaszkaiser): remove branch when not needed. def single_update(i, opt_state, batch, rng): rng, subrng = jax_random.split(rng[0]) _, opt_update = optimizer(lr_fun) params = trax_opt.get_params(opt_state) return opt_update(i, backend.grad(loss_fun)( params, batch, predict_fun, rng), opt_state), [subrng] return backend.jit(single_update) @functools.partial(backend.pmap, axis_name="batch") def mapped_update(i, opt_state, batch, rng): """This is a multi-device version of the update function above.""" # We assume all tensors have the first dimension = num_devices. rng, subrng = jax_random.split(rng) _, opt_update = optimizer(lr_fun) params = trax_opt.get_params(opt_state) grads = backend.grad(loss_fun)(params, batch, predict_fun, rng) grads = jax.tree_util.tree_map( lambda g: lax.psum(g, "batch"), grads) return opt_update(i, grads, opt_state), subrng def update(i, opt_state, batch, rng): return mapped_update(jax.replicate(i), opt_state, batch, rng) return update
def _jit_update_fun(predict_fun, loss_fun, optimizer, lr_fun, num_devices): """Get jit-ed update function for loss, optimizer, learning rate function.""" if num_devices == 1: # TODO(lukaszkaiser): remove branch when not needed. def single_update(i, opt_state, batch, rng): rng, subrng = jax_random.split(rng[0]) _, opt_update = optimizer(lr_fun) params = trax_opt.get_params(opt_state) return opt_update(i, backend.grad(loss_fun)( params, batch, predict_fun, rng), opt_state), [subrng] return backend.jit(single_update) @functools.partial(backend.pmap, axis_name="batch") def mapped_update(i, opt_state, batch, rng): """This is a multi-device version of the update function above.""" # We assume all tensors have the first dimension = num_devices. rng, subrng = jax_random.split(rng) _, opt_update = optimizer(lr_fun) params = trax_opt.get_params(opt_state) grads = backend.grad(loss_fun)(params, batch, predict_fun, rng) grads = jax.tree_util.tree_map( lambda g: lax.psum(g, "batch"), grads) return opt_update(i, grads, opt_state), subrng def update(i, opt_state, batch, rng): return mapped_update(jax.replicate(i), opt_state, batch, rng) return update
[ "Get", "jit", "-", "ed", "update", "function", "for", "loss", "optimizer", "learning", "rate", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L307-L333
[ "def", "_jit_update_fun", "(", "predict_fun", ",", "loss_fun", ",", "optimizer", ",", "lr_fun", ",", "num_devices", ")", ":", "if", "num_devices", "==", "1", ":", "# TODO(lukaszkaiser): remove branch when not needed.", "def", "single_update", "(", "i", ",", "opt_sta...
272500b6efe353aeb638d2745ed56e519462ca31
train
_reshape_by_device_single
Reshape x into a shape [num_devices, ...].
tensor2tensor/trax/trax.py
def _reshape_by_device_single(x, num_devices): """Reshape x into a shape [num_devices, ...].""" x_shape = list(x.shape) batch_size = x_shape[0] batch_size_per_device = batch_size // num_devices # We require that num_devices divides batch_size evenly. if batch_size_per_device * num_devices != batch_size: logging.fatal( "We require that num_devices[%d] divides batch_size[%d] evenly.", num_devices, batch_size) # New shape. new_shape_prefix = [num_devices, batch_size_per_device] return np.reshape(x, new_shape_prefix + x_shape[1:])
def _reshape_by_device_single(x, num_devices): """Reshape x into a shape [num_devices, ...].""" x_shape = list(x.shape) batch_size = x_shape[0] batch_size_per_device = batch_size // num_devices # We require that num_devices divides batch_size evenly. if batch_size_per_device * num_devices != batch_size: logging.fatal( "We require that num_devices[%d] divides batch_size[%d] evenly.", num_devices, batch_size) # New shape. new_shape_prefix = [num_devices, batch_size_per_device] return np.reshape(x, new_shape_prefix + x_shape[1:])
[ "Reshape", "x", "into", "a", "shape", "[", "num_devices", "...", "]", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L336-L348
[ "def", "_reshape_by_device_single", "(", "x", ",", "num_devices", ")", ":", "x_shape", "=", "list", "(", "x", ".", "shape", ")", "batch_size", "=", "x_shape", "[", "0", "]", "batch_size_per_device", "=", "batch_size", "//", "num_devices", "# We require that num_...
272500b6efe353aeb638d2745ed56e519462ca31
train
reshape_by_device
Reshape possibly nested x into a shape [num_devices, ...].
tensor2tensor/trax/trax.py
def reshape_by_device(x, num_devices): """Reshape possibly nested x into a shape [num_devices, ...].""" return layers.nested_map( x, lambda x: _reshape_by_device_single(x, num_devices))
def reshape_by_device(x, num_devices): """Reshape possibly nested x into a shape [num_devices, ...].""" return layers.nested_map( x, lambda x: _reshape_by_device_single(x, num_devices))
[ "Reshape", "possibly", "nested", "x", "into", "a", "shape", "[", "num_devices", "...", "]", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L351-L354
[ "def", "reshape_by_device", "(", "x", ",", "num_devices", ")", ":", "return", "layers", ".", "nested_map", "(", "x", ",", "lambda", "x", ":", "_reshape_by_device_single", "(", "x", ",", "num_devices", ")", ")" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
train
Train the model on the inputs. Args: output_dir: Directory where to put the logs and checkpoints. model: The model to train as a callable returning 2 callables, an init_fun and apply_fun. loss_fun: callable with signature: params, trax.inputs.Inputs, model, rng -> loss. inputs: callable returning trax.inputs.Inputs. optimizer: The optimizer as a callable taking a learning_rate callable and returning 2 callables, opt_init and opt_update. lr_schedule: A learning rate schedule as a function that takes history and returns a function from step to learning rate (a float). train_steps: int, total number of training steps. save_steps: list of integers. Keep a model file at each of the supplied save steps. eval_steps: int, num of steps per evaluation. If None or 0, eval disabled. eval_frequency: int, how often to run evaluation (every eval_frequency steps). If None or 0, eval disabled. num_devices: how many devices to use (if None, default, use all available) random_seed: the random seed to use; time/os dependent if None (default). run_debug_step: bool, if True, will run the model and loss without @jit for one step. save_forward_graph: bool, if True, save forward computation graph to file. Returns: trax.State
tensor2tensor/trax/trax.py
def train(output_dir, model=gin.REQUIRED, loss_fun=loss, inputs=trax_inputs.inputs, optimizer=trax_opt.adam, lr_schedule=lr.MultifactorSchedule, train_steps=1000, save_steps=None, eval_steps=10, eval_frequency=100, num_devices=None, random_seed=None, run_debug_step=False, save_forward_graph=False): """Train the model on the inputs. Args: output_dir: Directory where to put the logs and checkpoints. model: The model to train as a callable returning 2 callables, an init_fun and apply_fun. loss_fun: callable with signature: params, trax.inputs.Inputs, model, rng -> loss. inputs: callable returning trax.inputs.Inputs. optimizer: The optimizer as a callable taking a learning_rate callable and returning 2 callables, opt_init and opt_update. lr_schedule: A learning rate schedule as a function that takes history and returns a function from step to learning rate (a float). train_steps: int, total number of training steps. save_steps: list of integers. Keep a model file at each of the supplied save steps. eval_steps: int, num of steps per evaluation. If None or 0, eval disabled. eval_frequency: int, how often to run evaluation (every eval_frequency steps). If None or 0, eval disabled. num_devices: how many devices to use (if None, default, use all available) random_seed: the random seed to use; time/os dependent if None (default). run_debug_step: bool, if True, will run the model and loss without @jit for one step. save_forward_graph: bool, if True, save forward computation graph to file. Returns: trax.State """ if save_steps is None: save_steps = [] num_devices = num_devices or jax.lib.xla_bridge.device_count() rng = get_random_number_generator_and_set_seed(random_seed) gfile.makedirs(output_dir) # Create summary writers and history. train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train")) eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval")) inputs = inputs(num_devices) # Setup optimizer and model state = restore_state(output_dir) history = state.history lr_fun = lr_schedule(history) opt_init, _ = optimizer(lr_fun) model_train = model(mode="train") model_predict_eval = model(mode="eval") # Setup state step = state.step or 0 rng, init_rng = jax_random.split(rng) rngs = jax_random.split(rng, num_devices) first_shape = inputs.input_shape[0] # If the inputs are a tuple/list, add [-1] (batch) to each element. if isinstance(first_shape, (list, tuple)): model_input_shape = tuple( [tuple([-1] + list(shape)) for shape in inputs.input_shape]) else: # Otherwise just add [-1] to the input shape. model_input_shape = tuple([-1] + list(inputs.input_shape)) params = state.params or model_train.initialize(model_input_shape, init_rng) opt_state = opt_init(params) if num_devices > 1: # TODO(lukaszkaiser): use everywhere when pmap is stable. opt_state = jax.replicate(opt_state) # jit model_predict and update so they're fast jit_model_predict_eval = _jit_predict_fun(model_predict_eval, num_devices) jit_update_fun = _jit_update_fun( model_train, loss_fun, optimizer, lr_fun, num_devices) train_stream = inputs.train_stream() epoch_steps = [train_steps] # Only training if eval_frequency is 0 or None. if eval_frequency and eval_steps > 0: epoch_steps = itertools.chain([1, # first epoch only 1 step eval_frequency - 1], itertools.repeat(eval_frequency)) step_log(step, "Starting training using %d devices" % num_devices) # Non-compiled debug step helps find problems in models easier. if run_debug_step: debug_loss = loss_fun(params, next(train_stream), model_train, rng) step_log(step, "Debug step loss %.8f" % debug_loss) for epoch, epoch_steps in epochs(train_steps, epoch_steps): # Log separator print() # Timer start_time = time.time() for _ in range(epoch_steps): # Train next_train_batch = next(train_stream) if num_devices > 1: # TODO(lukaszkaiser): use everywhere when possible. next_train_batch = reshape_by_device(next_train_batch, num_devices) opt_state, rngs = jit_update_fun(step, opt_state, next_train_batch, rngs) step += 1 if step in save_steps: save_state(State(params=params, step=step, history=history), output_dir, keep=True) # LR log if step == 1 or step % 10 == 0: train_sw.scalar("training/learning rate", lr_fun(step), step=step) # Timer epoch_time = time.time() - start_time step_log(step, "Ran %d train steps in %0.2f secs" % (epoch_steps, epoch_time)) if epoch_steps > 1: train_sw.scalar("training/steps per second", epoch_steps / epoch_time, step=step) # Print number of parameters params = trax_opt.get_params(opt_state) if step == 1: sizes = layers.sizes(params) total_size = layers.nested_reduce(sizes, sum) step_log(step, "Total trainable parameters size: %d" % total_size) # Evaluate evaluate_train_and_eval( step=step, inputs=inputs, predict_fun=functools.partial(jit_model_predict_eval, params=params), eval_steps=eval_steps, rng=rng, train_sw=train_sw, eval_sw=eval_sw, history=history) # Save computation graph if save_forward_graph and step == 1: # Dump forward computation graph to file. computation = jax.xla_computation(model_predict_eval)( next_train_batch[0], params=params, rng=rng) with gfile.GFile(os.path.join(output_dir, "forward_graph.dot"), "w") as f: f.write(computation.GetHloDotGraph()) # Save state save_state(State(params=params, step=step, history=history), output_dir) # Save Gin config # Gin only tracks the used parameters, so we save it after the first epoch. if epoch == 1: save_gin(output_dir, train_sw) # Update learning rate with new history old_lr_fun = lr_fun lr_fun = lr_schedule(history) if lr_fun != old_lr_fun: # For performance, only jit if there is a change. jit_update_fun = _jit_update_fun( model_train, loss_fun, optimizer, lr_fun, num_devices) # Flush summary writers train_sw.flush() eval_sw.flush() step_log(step, "Training done") return State(params=params, step=step, history=history)
def train(output_dir, model=gin.REQUIRED, loss_fun=loss, inputs=trax_inputs.inputs, optimizer=trax_opt.adam, lr_schedule=lr.MultifactorSchedule, train_steps=1000, save_steps=None, eval_steps=10, eval_frequency=100, num_devices=None, random_seed=None, run_debug_step=False, save_forward_graph=False): """Train the model on the inputs. Args: output_dir: Directory where to put the logs and checkpoints. model: The model to train as a callable returning 2 callables, an init_fun and apply_fun. loss_fun: callable with signature: params, trax.inputs.Inputs, model, rng -> loss. inputs: callable returning trax.inputs.Inputs. optimizer: The optimizer as a callable taking a learning_rate callable and returning 2 callables, opt_init and opt_update. lr_schedule: A learning rate schedule as a function that takes history and returns a function from step to learning rate (a float). train_steps: int, total number of training steps. save_steps: list of integers. Keep a model file at each of the supplied save steps. eval_steps: int, num of steps per evaluation. If None or 0, eval disabled. eval_frequency: int, how often to run evaluation (every eval_frequency steps). If None or 0, eval disabled. num_devices: how many devices to use (if None, default, use all available) random_seed: the random seed to use; time/os dependent if None (default). run_debug_step: bool, if True, will run the model and loss without @jit for one step. save_forward_graph: bool, if True, save forward computation graph to file. Returns: trax.State """ if save_steps is None: save_steps = [] num_devices = num_devices or jax.lib.xla_bridge.device_count() rng = get_random_number_generator_and_set_seed(random_seed) gfile.makedirs(output_dir) # Create summary writers and history. train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train")) eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval")) inputs = inputs(num_devices) # Setup optimizer and model state = restore_state(output_dir) history = state.history lr_fun = lr_schedule(history) opt_init, _ = optimizer(lr_fun) model_train = model(mode="train") model_predict_eval = model(mode="eval") # Setup state step = state.step or 0 rng, init_rng = jax_random.split(rng) rngs = jax_random.split(rng, num_devices) first_shape = inputs.input_shape[0] # If the inputs are a tuple/list, add [-1] (batch) to each element. if isinstance(first_shape, (list, tuple)): model_input_shape = tuple( [tuple([-1] + list(shape)) for shape in inputs.input_shape]) else: # Otherwise just add [-1] to the input shape. model_input_shape = tuple([-1] + list(inputs.input_shape)) params = state.params or model_train.initialize(model_input_shape, init_rng) opt_state = opt_init(params) if num_devices > 1: # TODO(lukaszkaiser): use everywhere when pmap is stable. opt_state = jax.replicate(opt_state) # jit model_predict and update so they're fast jit_model_predict_eval = _jit_predict_fun(model_predict_eval, num_devices) jit_update_fun = _jit_update_fun( model_train, loss_fun, optimizer, lr_fun, num_devices) train_stream = inputs.train_stream() epoch_steps = [train_steps] # Only training if eval_frequency is 0 or None. if eval_frequency and eval_steps > 0: epoch_steps = itertools.chain([1, # first epoch only 1 step eval_frequency - 1], itertools.repeat(eval_frequency)) step_log(step, "Starting training using %d devices" % num_devices) # Non-compiled debug step helps find problems in models easier. if run_debug_step: debug_loss = loss_fun(params, next(train_stream), model_train, rng) step_log(step, "Debug step loss %.8f" % debug_loss) for epoch, epoch_steps in epochs(train_steps, epoch_steps): # Log separator print() # Timer start_time = time.time() for _ in range(epoch_steps): # Train next_train_batch = next(train_stream) if num_devices > 1: # TODO(lukaszkaiser): use everywhere when possible. next_train_batch = reshape_by_device(next_train_batch, num_devices) opt_state, rngs = jit_update_fun(step, opt_state, next_train_batch, rngs) step += 1 if step in save_steps: save_state(State(params=params, step=step, history=history), output_dir, keep=True) # LR log if step == 1 or step % 10 == 0: train_sw.scalar("training/learning rate", lr_fun(step), step=step) # Timer epoch_time = time.time() - start_time step_log(step, "Ran %d train steps in %0.2f secs" % (epoch_steps, epoch_time)) if epoch_steps > 1: train_sw.scalar("training/steps per second", epoch_steps / epoch_time, step=step) # Print number of parameters params = trax_opt.get_params(opt_state) if step == 1: sizes = layers.sizes(params) total_size = layers.nested_reduce(sizes, sum) step_log(step, "Total trainable parameters size: %d" % total_size) # Evaluate evaluate_train_and_eval( step=step, inputs=inputs, predict_fun=functools.partial(jit_model_predict_eval, params=params), eval_steps=eval_steps, rng=rng, train_sw=train_sw, eval_sw=eval_sw, history=history) # Save computation graph if save_forward_graph and step == 1: # Dump forward computation graph to file. computation = jax.xla_computation(model_predict_eval)( next_train_batch[0], params=params, rng=rng) with gfile.GFile(os.path.join(output_dir, "forward_graph.dot"), "w") as f: f.write(computation.GetHloDotGraph()) # Save state save_state(State(params=params, step=step, history=history), output_dir) # Save Gin config # Gin only tracks the used parameters, so we save it after the first epoch. if epoch == 1: save_gin(output_dir, train_sw) # Update learning rate with new history old_lr_fun = lr_fun lr_fun = lr_schedule(history) if lr_fun != old_lr_fun: # For performance, only jit if there is a change. jit_update_fun = _jit_update_fun( model_train, loss_fun, optimizer, lr_fun, num_devices) # Flush summary writers train_sw.flush() eval_sw.flush() step_log(step, "Training done") return State(params=params, step=step, history=history)
[ "Train", "the", "model", "on", "the", "inputs", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L358-L532
[ "def", "train", "(", "output_dir", ",", "model", "=", "gin", ".", "REQUIRED", ",", "loss_fun", "=", "loss", ",", "inputs", "=", "trax_inputs", ".", "inputs", ",", "optimizer", "=", "trax_opt", ".", "adam", ",", "lr_schedule", "=", "lr", ".", "Multifactor...
272500b6efe353aeb638d2745ed56e519462ca31
train
_compute_fans
Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out).
tensor2tensor/keras/initializers.py
def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1. for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size if isinstance(fan_in, tf.Dimension): fan_in = fan_in.value if isinstance(fan_out, tf.Dimension): fan_out = fan_out.value return fan_in, fan_out
def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1. for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size if isinstance(fan_in, tf.Dimension): fan_in = fan_in.value if isinstance(fan_out, tf.Dimension): fan_out = fan_out.value return fan_in, fan_out
[ "Computes", "the", "number", "of", "input", "and", "output", "units", "for", "a", "weight", "shape", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/keras/initializers.py#L32-L60
[ "def", "_compute_fans", "(", "shape", ")", ":", "if", "len", "(", "shape", ")", "<", "1", ":", "# Just to avoid errors for constants.", "fan_in", "=", "fan_out", "=", "1", "elif", "len", "(", "shape", ")", "==", "1", ":", "fan_in", "=", "fan_out", "=", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
get
Getter for loading from strings; returns value if can't load.
tensor2tensor/keras/initializers.py
def get(identifier, value=None): """Getter for loading from strings; returns value if can't load.""" if value is None: value = identifier if identifier is None: return None elif isinstance(identifier, dict): try: return deserialize(identifier) except ValueError: return value elif isinstance(identifier, six.string_types): config = {'class_name': str(identifier), 'config': {}} try: return deserialize(config) except ValueError: return value elif callable(identifier): return identifier return value
def get(identifier, value=None): """Getter for loading from strings; returns value if can't load.""" if value is None: value = identifier if identifier is None: return None elif isinstance(identifier, dict): try: return deserialize(identifier) except ValueError: return value elif isinstance(identifier, six.string_types): config = {'class_name': str(identifier), 'config': {}} try: return deserialize(config) except ValueError: return value elif callable(identifier): return identifier return value
[ "Getter", "for", "loading", "from", "strings", ";", "returns", "value", "if", "can", "t", "load", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/keras/initializers.py#L279-L298
[ "def", "get", "(", "identifier", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "value", "=", "identifier", "if", "identifier", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "identifier", ",", "dict", ")", ":",...
272500b6efe353aeb638d2745ed56e519462ca31
train
Trajectory.add_time_step
Creates a time-step and appends it to the list. Args: **create_time_step_kwargs: Forwarded to time_step.TimeStep.create_time_step.
tensor2tensor/envs/trajectory.py
def add_time_step(self, **create_time_step_kwargs): """Creates a time-step and appends it to the list. Args: **create_time_step_kwargs: Forwarded to time_step.TimeStep.create_time_step. """ ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs) assert isinstance(ts, time_step.TimeStep) self._time_steps.append(ts)
def add_time_step(self, **create_time_step_kwargs): """Creates a time-step and appends it to the list. Args: **create_time_step_kwargs: Forwarded to time_step.TimeStep.create_time_step. """ ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs) assert isinstance(ts, time_step.TimeStep) self._time_steps.append(ts)
[ "Creates", "a", "time", "-", "step", "and", "appends", "it", "to", "the", "list", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L42-L51
[ "def", "add_time_step", "(", "self", ",", "*", "*", "create_time_step_kwargs", ")", ":", "ts", "=", "time_step", ".", "TimeStep", ".", "create_time_step", "(", "*", "*", "create_time_step_kwargs", ")", "assert", "isinstance", "(", "ts", ",", "time_step", ".", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
Trajectory.change_last_time_step
Replace the last time-steps with the given kwargs.
tensor2tensor/envs/trajectory.py
def change_last_time_step(self, **replace_time_step_kwargs): """Replace the last time-steps with the given kwargs.""" # Pre-conditions: self._time_steps shouldn't be empty. assert self._time_steps self._time_steps[-1] = self._time_steps[-1].replace( **replace_time_step_kwargs)
def change_last_time_step(self, **replace_time_step_kwargs): """Replace the last time-steps with the given kwargs.""" # Pre-conditions: self._time_steps shouldn't be empty. assert self._time_steps self._time_steps[-1] = self._time_steps[-1].replace( **replace_time_step_kwargs)
[ "Replace", "the", "last", "time", "-", "steps", "with", "the", "given", "kwargs", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L53-L59
[ "def", "change_last_time_step", "(", "self", ",", "*", "*", "replace_time_step_kwargs", ")", ":", "# Pre-conditions: self._time_steps shouldn't be empty.", "assert", "self", ".", "_time_steps", "self", ".", "_time_steps", "[", "-", "1", "]", "=", "self", ".", "_time...
272500b6efe353aeb638d2745ed56e519462ca31
train
Trajectory.reward
Returns a tuple of sum of raw and processed rewards.
tensor2tensor/envs/trajectory.py
def reward(self): """Returns a tuple of sum of raw and processed rewards.""" raw_rewards, processed_rewards = 0, 0 for ts in self.time_steps: # NOTE: raw_reward and processed_reward are None for the first time-step. if ts.raw_reward is not None: raw_rewards += ts.raw_reward if ts.processed_reward is not None: processed_rewards += ts.processed_reward return raw_rewards, processed_rewards
def reward(self): """Returns a tuple of sum of raw and processed rewards.""" raw_rewards, processed_rewards = 0, 0 for ts in self.time_steps: # NOTE: raw_reward and processed_reward are None for the first time-step. if ts.raw_reward is not None: raw_rewards += ts.raw_reward if ts.processed_reward is not None: processed_rewards += ts.processed_reward return raw_rewards, processed_rewards
[ "Returns", "a", "tuple", "of", "sum", "of", "raw", "and", "processed", "rewards", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L85-L94
[ "def", "reward", "(", "self", ")", ":", "raw_rewards", ",", "processed_rewards", "=", "0", ",", "0", "for", "ts", "in", "self", ".", "time_steps", ":", "# NOTE: raw_reward and processed_reward are None for the first time-step.", "if", "ts", ".", "raw_reward", "is", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
BatchTrajectory._complete_trajectory
Completes the given trajectory at the given index.
tensor2tensor/envs/trajectory.py
def _complete_trajectory(self, trajectory, index): """Completes the given trajectory at the given index.""" assert isinstance(trajectory, Trajectory) # This *should* be the case. assert trajectory.last_time_step.action is None # Add to completed trajectories. self._completed_trajectories.append(trajectory) # Make a new one to replace it. self._trajectories[index] = Trajectory()
def _complete_trajectory(self, trajectory, index): """Completes the given trajectory at the given index.""" assert isinstance(trajectory, Trajectory) # This *should* be the case. assert trajectory.last_time_step.action is None # Add to completed trajectories. self._completed_trajectories.append(trajectory) # Make a new one to replace it. self._trajectories[index] = Trajectory()
[ "Completes", "the", "given", "trajectory", "at", "the", "given", "index", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L133-L145
[ "def", "_complete_trajectory", "(", "self", ",", "trajectory", ",", "index", ")", ":", "assert", "isinstance", "(", "trajectory", ",", "Trajectory", ")", "# This *should* be the case.", "assert", "trajectory", ".", "last_time_step", ".", "action", "is", "None", "#...
272500b6efe353aeb638d2745ed56e519462ca31
train
BatchTrajectory.reset
Resets trajectories at given indices and populates observations. Reset can either be called right at the beginning, when there are no time-steps, or to reset a currently active trajectory. If resetting a currently active trajectory then we save it in self._completed_trajectories. Args: indices: 1-D np.ndarray stating the indices to reset. observations: np.ndarray of shape (indices len, obs.shape) of observations
tensor2tensor/envs/trajectory.py
def reset(self, indices, observations): """Resets trajectories at given indices and populates observations. Reset can either be called right at the beginning, when there are no time-steps, or to reset a currently active trajectory. If resetting a currently active trajectory then we save it in self._completed_trajectories. Args: indices: 1-D np.ndarray stating the indices to reset. observations: np.ndarray of shape (indices len, obs.shape) of observations """ # Pre-conditions: indices, observations are np arrays. # : indices is one-dimensional. # : their first dimension (batch) is the same. assert isinstance(indices, np.ndarray) assert len(indices.shape) == 1 assert isinstance(observations, np.ndarray) assert indices.shape[0] == observations.shape[0] for index, observation in zip(indices, observations): trajectory = self._trajectories[index] # Are we starting a new trajectory at the given index? if not trajectory.is_active: # Then create a new time-step here with the given observation. trajectory.add_time_step(observation=observation) # That's all we need to do here. continue # If however we are resetting a currently active trajectory then we need # to put that in self._completed_trajectories and make a new trajectory # with the current observation. # TODO(afrozm): Should we mark these are done? Or is the done=False and # this being the last time-step in the trajectory good enough to recognize # that this was reset? # Mark trajectory as completed and move into completed_trajectories. self._complete_trajectory(trajectory, index) # Put the observation in the newly created trajectory. # TODO(afrozm): Add 0 reward. self._trajectories[index].add_time_step(observation=observation)
def reset(self, indices, observations): """Resets trajectories at given indices and populates observations. Reset can either be called right at the beginning, when there are no time-steps, or to reset a currently active trajectory. If resetting a currently active trajectory then we save it in self._completed_trajectories. Args: indices: 1-D np.ndarray stating the indices to reset. observations: np.ndarray of shape (indices len, obs.shape) of observations """ # Pre-conditions: indices, observations are np arrays. # : indices is one-dimensional. # : their first dimension (batch) is the same. assert isinstance(indices, np.ndarray) assert len(indices.shape) == 1 assert isinstance(observations, np.ndarray) assert indices.shape[0] == observations.shape[0] for index, observation in zip(indices, observations): trajectory = self._trajectories[index] # Are we starting a new trajectory at the given index? if not trajectory.is_active: # Then create a new time-step here with the given observation. trajectory.add_time_step(observation=observation) # That's all we need to do here. continue # If however we are resetting a currently active trajectory then we need # to put that in self._completed_trajectories and make a new trajectory # with the current observation. # TODO(afrozm): Should we mark these are done? Or is the done=False and # this being the last time-step in the trajectory good enough to recognize # that this was reset? # Mark trajectory as completed and move into completed_trajectories. self._complete_trajectory(trajectory, index) # Put the observation in the newly created trajectory. # TODO(afrozm): Add 0 reward. self._trajectories[index].add_time_step(observation=observation)
[ "Resets", "trajectories", "at", "given", "indices", "and", "populates", "observations", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L147-L192
[ "def", "reset", "(", "self", ",", "indices", ",", "observations", ")", ":", "# Pre-conditions: indices, observations are np arrays.", "# : indices is one-dimensional.", "# : their first dimension (batch) is the same.", "assert", "isinstance", "(", "indices...
272500b6efe353aeb638d2745ed56e519462ca31
train
BatchTrajectory.complete_all_trajectories
Essentially same as reset, but we don't have observations.
tensor2tensor/envs/trajectory.py
def complete_all_trajectories(self): """Essentially same as reset, but we don't have observations.""" for index in range(self.batch_size): trajectory = self._trajectories[index] assert trajectory.is_active self._complete_trajectory(trajectory, index)
def complete_all_trajectories(self): """Essentially same as reset, but we don't have observations.""" for index in range(self.batch_size): trajectory = self._trajectories[index] assert trajectory.is_active self._complete_trajectory(trajectory, index)
[ "Essentially", "same", "as", "reset", "but", "we", "don", "t", "have", "observations", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L194-L199
[ "def", "complete_all_trajectories", "(", "self", ")", ":", "for", "index", "in", "range", "(", "self", ".", "batch_size", ")", ":", "trajectory", "=", "self", ".", "_trajectories", "[", "index", "]", "assert", "trajectory", ".", "is_active", "self", ".", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
BatchTrajectory.step
Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t
tensor2tensor/envs/trajectory.py
def step(self, observations, raw_rewards, processed_rewards, dones, actions): """Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t """ # Pre-conditions assert isinstance(observations, np.ndarray) assert isinstance(raw_rewards, np.ndarray) assert isinstance(processed_rewards, np.ndarray) assert isinstance(dones, np.ndarray) assert isinstance(actions, np.ndarray) # We assume that we step in all envs, i.e. not like reset where we can reset # some envs and not others. assert self.batch_size == observations.shape[0] assert self.batch_size == raw_rewards.shape[0] assert self.batch_size == processed_rewards.shape[0] assert self.batch_size == dones.shape[0] assert self.batch_size == actions.shape[0] for index in range(self.batch_size): trajectory = self._trajectories[index] # NOTE: If the trajectory isn't active, that means it doesn't have any # time-steps in it, but we are in step, so the assumption is that it has # a prior observation from which we are stepping away from. # TODO(afrozm): Let's re-visit this if it becomes too restrictive. assert trajectory.is_active # To this trajectory's last time-step, set actions. trajectory.change_last_time_step(action=actions[index]) # Create a new time-step to add observation, done & rewards (no actions). trajectory.add_time_step( observation=observations[index], done=dones[index], raw_reward=raw_rewards[index], processed_reward=processed_rewards[index]) # If the trajectory is completed, i.e. dones[index] == True, then we # account for it right-away. if dones[index]: self._complete_trajectory(trajectory, index) # NOTE: The new trajectory at `index` is going to be in-active and # `reset` should be called on it. assert not self._trajectories[index].is_active
def step(self, observations, raw_rewards, processed_rewards, dones, actions): """Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t """ # Pre-conditions assert isinstance(observations, np.ndarray) assert isinstance(raw_rewards, np.ndarray) assert isinstance(processed_rewards, np.ndarray) assert isinstance(dones, np.ndarray) assert isinstance(actions, np.ndarray) # We assume that we step in all envs, i.e. not like reset where we can reset # some envs and not others. assert self.batch_size == observations.shape[0] assert self.batch_size == raw_rewards.shape[0] assert self.batch_size == processed_rewards.shape[0] assert self.batch_size == dones.shape[0] assert self.batch_size == actions.shape[0] for index in range(self.batch_size): trajectory = self._trajectories[index] # NOTE: If the trajectory isn't active, that means it doesn't have any # time-steps in it, but we are in step, so the assumption is that it has # a prior observation from which we are stepping away from. # TODO(afrozm): Let's re-visit this if it becomes too restrictive. assert trajectory.is_active # To this trajectory's last time-step, set actions. trajectory.change_last_time_step(action=actions[index]) # Create a new time-step to add observation, done & rewards (no actions). trajectory.add_time_step( observation=observations[index], done=dones[index], raw_reward=raw_rewards[index], processed_reward=processed_rewards[index]) # If the trajectory is completed, i.e. dones[index] == True, then we # account for it right-away. if dones[index]: self._complete_trajectory(trajectory, index) # NOTE: The new trajectory at `index` is going to be in-active and # `reset` should be called on it. assert not self._trajectories[index].is_active
[ "Record", "the", "information", "obtained", "from", "taking", "a", "step", "in", "all", "envs", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L201-L266
[ "def", "step", "(", "self", ",", "observations", ",", "raw_rewards", ",", "processed_rewards", ",", "dones", ",", "actions", ")", ":", "# Pre-conditions", "assert", "isinstance", "(", "observations", ",", "np", ".", "ndarray", ")", "assert", "isinstance", "(",...
272500b6efe353aeb638d2745ed56e519462ca31
train
BatchTrajectory.num_time_steps
Returns the number of time-steps in completed and incomplete trajectories.
tensor2tensor/envs/trajectory.py
def num_time_steps(self): """Returns the number of time-steps in completed and incomplete trajectories.""" num_time_steps = sum(t.num_time_steps for t in self.trajectories) return num_time_steps + self.num_completed_time_steps
def num_time_steps(self): """Returns the number of time-steps in completed and incomplete trajectories.""" num_time_steps = sum(t.num_time_steps for t in self.trajectories) return num_time_steps + self.num_completed_time_steps
[ "Returns", "the", "number", "of", "time", "-", "steps", "in", "completed", "and", "incomplete", "trajectories", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L275-L279
[ "def", "num_time_steps", "(", "self", ")", ":", "num_time_steps", "=", "sum", "(", "t", ".", "num_time_steps", "for", "t", "in", "self", ".", "trajectories", ")", "return", "num_time_steps", "+", "self", ".", "num_completed_time_steps" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
BatchTrajectory.observations_np
Pads the observations in all the trajectories and returns them. Args: boundary: integer, Observations will be padded to (n * boundary) + 1 where n is an integer. Returns: a tuple(padded_observations, time_steps), with shapes: padded_observations: (self.batch_size, n * boundary + 1) + OBS time_steps: integer list of length = self.batch_size
tensor2tensor/envs/trajectory.py
def observations_np(self, boundary=20): """Pads the observations in all the trajectories and returns them. Args: boundary: integer, Observations will be padded to (n * boundary) + 1 where n is an integer. Returns: a tuple(padded_observations, time_steps), with shapes: padded_observations: (self.batch_size, n * boundary + 1) + OBS time_steps: integer list of length = self.batch_size """ list_observations_np_ts = [t.observations_np for t in self.trajectories] # Every element in `list_observations_np_ts` is shaped (t,) + OBS OBS = list_observations_np_ts[0].shape[1:] # pylint: disable=invalid-name num_time_steps = [t.num_time_steps for t in self.trajectories] t_max = max(num_time_steps) # t_max is rounded to the next multiple of `boundary` boundary = int(boundary) bucket_length = boundary * int(np.ceil(float(t_max) / boundary)) def padding_config(obs): # We're padding the first axis only, since that is the time-step. num_to_pad = bucket_length + 1 - obs.shape[0] return [(0, num_to_pad)] + [(0, 0)] * len(OBS) return np.stack([ np.pad(obs, padding_config(obs), "constant") for obs in list_observations_np_ts]), num_time_steps
def observations_np(self, boundary=20): """Pads the observations in all the trajectories and returns them. Args: boundary: integer, Observations will be padded to (n * boundary) + 1 where n is an integer. Returns: a tuple(padded_observations, time_steps), with shapes: padded_observations: (self.batch_size, n * boundary + 1) + OBS time_steps: integer list of length = self.batch_size """ list_observations_np_ts = [t.observations_np for t in self.trajectories] # Every element in `list_observations_np_ts` is shaped (t,) + OBS OBS = list_observations_np_ts[0].shape[1:] # pylint: disable=invalid-name num_time_steps = [t.num_time_steps for t in self.trajectories] t_max = max(num_time_steps) # t_max is rounded to the next multiple of `boundary` boundary = int(boundary) bucket_length = boundary * int(np.ceil(float(t_max) / boundary)) def padding_config(obs): # We're padding the first axis only, since that is the time-step. num_to_pad = bucket_length + 1 - obs.shape[0] return [(0, num_to_pad)] + [(0, 0)] * len(OBS) return np.stack([ np.pad(obs, padding_config(obs), "constant") for obs in list_observations_np_ts]), num_time_steps
[ "Pads", "the", "observations", "in", "all", "the", "trajectories", "and", "returns", "them", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L286-L315
[ "def", "observations_np", "(", "self", ",", "boundary", "=", "20", ")", ":", "list_observations_np_ts", "=", "[", "t", ".", "observations_np", "for", "t", "in", "self", ".", "trajectories", "]", "# Every element in `list_observations_np_ts` is shaped (t,) + OBS", "OBS...
272500b6efe353aeb638d2745ed56e519462ca31
train
_generate_examples
Generate squad examples. Args: tmp_dir: a string dataset_split: problem.DatasetSplit.TRAIN or problem.DatasetSplit.EVAL Yields: dictionaries representing examples
tensor2tensor/data_generators/squad.py
def _generate_examples(tmp_dir, dataset_split): """Generate squad examples. Args: tmp_dir: a string dataset_split: problem.DatasetSplit.TRAIN or problem.DatasetSplit.EVAL Yields: dictionaries representing examples """ if dataset_split == problem.DatasetSplit.TRAIN: file_name = _TRAINING_SET else: file_name = _DEV_SET squad_file = generator_utils.maybe_download(tmp_dir, file_name, os.path.join(_URL, file_name)) with tf.gfile.GFile(squad_file, mode="r") as fp: squad = json.load(fp) version = squad["version"] for article in squad["data"]: if "title" in article: title = article["title"].strip() else: title = "no title" for paragraph in article["paragraphs"]: context = paragraph["context"].strip() for qa in paragraph["qas"]: question = qa["question"].strip() id_ = qa["id"] answer_starts = [answer["answer_start"] for answer in qa["answers"]] answers = [answer["text"].strip() for answer in qa["answers"]] # Features currently used are "context", "question", and "answers". # Others are extracted here for the ease of future expansions. example = { "version": version, "title": title, "context": context, "question": question, "id": id_, "answer_starts": answer_starts, "answers": answers, "num_answers": len(answers), "is_supervised": True, } yield example
def _generate_examples(tmp_dir, dataset_split): """Generate squad examples. Args: tmp_dir: a string dataset_split: problem.DatasetSplit.TRAIN or problem.DatasetSplit.EVAL Yields: dictionaries representing examples """ if dataset_split == problem.DatasetSplit.TRAIN: file_name = _TRAINING_SET else: file_name = _DEV_SET squad_file = generator_utils.maybe_download(tmp_dir, file_name, os.path.join(_URL, file_name)) with tf.gfile.GFile(squad_file, mode="r") as fp: squad = json.load(fp) version = squad["version"] for article in squad["data"]: if "title" in article: title = article["title"].strip() else: title = "no title" for paragraph in article["paragraphs"]: context = paragraph["context"].strip() for qa in paragraph["qas"]: question = qa["question"].strip() id_ = qa["id"] answer_starts = [answer["answer_start"] for answer in qa["answers"]] answers = [answer["text"].strip() for answer in qa["answers"]] # Features currently used are "context", "question", and "answers". # Others are extracted here for the ease of future expansions. example = { "version": version, "title": title, "context": context, "question": question, "id": id_, "answer_starts": answer_starts, "answers": answers, "num_answers": len(answers), "is_supervised": True, } yield example
[ "Generate", "squad", "examples", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/squad.py#L39-L85
[ "def", "_generate_examples", "(", "tmp_dir", ",", "dataset_split", ")", ":", "if", "dataset_split", "==", "problem", ".", "DatasetSplit", ".", "TRAIN", ":", "file_name", "=", "_TRAINING_SET", "else", ":", "file_name", "=", "_DEV_SET", "squad_file", "=", "generat...
272500b6efe353aeb638d2745ed56e519462ca31
train
self_attention_layer
Create self-attention layer based on hyperparameters.
tensor2tensor/models/mtf_transformer2.py
def self_attention_layer(hparams, prefix): """Create self-attention layer based on hyperparameters.""" return transformer_layers.SelfAttention( num_heads=hparams.get(prefix + "num_heads"), num_memory_heads=hparams.get(prefix + "num_memory_heads"), key_value_size=hparams.d_kv, shared_kv=hparams.get(prefix + "shared_kv", False), attention_kwargs=attention_kwargs_from_hparams(hparams))
def self_attention_layer(hparams, prefix): """Create self-attention layer based on hyperparameters.""" return transformer_layers.SelfAttention( num_heads=hparams.get(prefix + "num_heads"), num_memory_heads=hparams.get(prefix + "num_memory_heads"), key_value_size=hparams.d_kv, shared_kv=hparams.get(prefix + "shared_kv", False), attention_kwargs=attention_kwargs_from_hparams(hparams))
[ "Create", "self", "-", "attention", "layer", "based", "on", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L311-L318
[ "def", "self_attention_layer", "(", "hparams", ",", "prefix", ")", ":", "return", "transformer_layers", ".", "SelfAttention", "(", "num_heads", "=", "hparams", ".", "get", "(", "prefix", "+", "\"num_heads\"", ")", ",", "num_memory_heads", "=", "hparams", ".", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
local_self_attention_layer
Create self-attention layer based on hyperparameters.
tensor2tensor/models/mtf_transformer2.py
def local_self_attention_layer(hparams, prefix): """Create self-attention layer based on hyperparameters.""" return transformer_layers.LocalSelfAttention( num_heads=hparams.get(prefix + "num_heads"), num_memory_heads=hparams.get(prefix + "num_memory_heads"), radius=hparams.local_attention_radius, key_value_size=hparams.d_kv, shared_kv=hparams.get(prefix + "shared_kv", False), attention_kwargs=attention_kwargs_from_hparams(hparams))
def local_self_attention_layer(hparams, prefix): """Create self-attention layer based on hyperparameters.""" return transformer_layers.LocalSelfAttention( num_heads=hparams.get(prefix + "num_heads"), num_memory_heads=hparams.get(prefix + "num_memory_heads"), radius=hparams.local_attention_radius, key_value_size=hparams.d_kv, shared_kv=hparams.get(prefix + "shared_kv", False), attention_kwargs=attention_kwargs_from_hparams(hparams))
[ "Create", "self", "-", "attention", "layer", "based", "on", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L322-L330
[ "def", "local_self_attention_layer", "(", "hparams", ",", "prefix", ")", ":", "return", "transformer_layers", ".", "LocalSelfAttention", "(", "num_heads", "=", "hparams", ".", "get", "(", "prefix", "+", "\"num_heads\"", ")", ",", "num_memory_heads", "=", "hparams"...
272500b6efe353aeb638d2745ed56e519462ca31
train
layer_stack_from_hparams
Create a layer stack based on the hyperparameter values.
tensor2tensor/models/mtf_transformer2.py
def layer_stack_from_hparams(hparams, prefix): """Create a layer stack based on the hyperparameter values.""" layers = hparams.get(prefix + "layers") return transformer.LayerStack( [layers_registry[l](hparams, prefix) for l in layers], dropout_rate=hparams.layer_prepostprocess_dropout, norm_epsilon=hparams.norm_epsilon)
def layer_stack_from_hparams(hparams, prefix): """Create a layer stack based on the hyperparameter values.""" layers = hparams.get(prefix + "layers") return transformer.LayerStack( [layers_registry[l](hparams, prefix) for l in layers], dropout_rate=hparams.layer_prepostprocess_dropout, norm_epsilon=hparams.norm_epsilon)
[ "Create", "a", "layer", "stack", "based", "on", "the", "hyperparameter", "values", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L366-L372
[ "def", "layer_stack_from_hparams", "(", "hparams", ",", "prefix", ")", ":", "layers", "=", "hparams", ".", "get", "(", "prefix", "+", "\"layers\"", ")", "return", "transformer", ".", "LayerStack", "(", "[", "layers_registry", "[", "l", "]", "(", "hparams", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_unitransformer_base
Hyperparameters for single-stack Transformer.
tensor2tensor/models/mtf_transformer2.py
def mtf_unitransformer_base(): """Hyperparameters for single-stack Transformer.""" hparams = mtf_transformer2_base() hparams.add_hparam("autoregressive", True) # HYPERPARAMETERS FOR THE SINGLE LAYER STACK hparams.add_hparam("layers", ["self_att", "drd"] * 6) # number of heads in multihead attention hparams.add_hparam("num_heads", 8) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("num_memory_heads", 0) # share attention keys and values hparams.add_hparam("shared_kv", False) # if nonzero then use local attention hparams.add_hparam("local_attention_radius", 128) return hparams
def mtf_unitransformer_base(): """Hyperparameters for single-stack Transformer.""" hparams = mtf_transformer2_base() hparams.add_hparam("autoregressive", True) # HYPERPARAMETERS FOR THE SINGLE LAYER STACK hparams.add_hparam("layers", ["self_att", "drd"] * 6) # number of heads in multihead attention hparams.add_hparam("num_heads", 8) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("num_memory_heads", 0) # share attention keys and values hparams.add_hparam("shared_kv", False) # if nonzero then use local attention hparams.add_hparam("local_attention_radius", 128) return hparams
[ "Hyperparameters", "for", "single", "-", "stack", "Transformer", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L454-L469
[ "def", "mtf_unitransformer_base", "(", ")", ":", "hparams", "=", "mtf_transformer2_base", "(", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive\"", ",", "True", ")", "# HYPERPARAMETERS FOR THE SINGLE LAYER STACK", "hparams", ".", "add_hparam", "(", "\"layers\""...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_bitransformer_base
Machine translation base configuration.
tensor2tensor/models/mtf_transformer2.py
def mtf_bitransformer_base(): """Machine translation base configuration.""" hparams = mtf_transformer2_base() hparams.max_length = 256 hparams.shared_embedding = True # HYPERPARAMETERS FOR THE LAYER STACKS hparams.add_hparam("encoder_layers", ["self_att", "drd"] * 6) hparams.add_hparam("decoder_layers", ["self_att", "enc_att", "drd"] * 6) hparams.add_hparam("encoder_num_layers", 6) hparams.add_hparam("decoder_num_layers", 6) # number of heads in multihead attention hparams.add_hparam("encoder_num_heads", 8) hparams.add_hparam("decoder_num_heads", 8) hparams.add_hparam("local_attention_radius", 128) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("encoder_num_memory_heads", 0) hparams.add_hparam("decoder_num_memory_heads", 0) # share attention keys and values hparams.add_hparam("encoder_shared_kv", False) hparams.add_hparam("decoder_shared_kv", False) # Parameters for computing the maximum decode length in beam search. # Maximum decode length is: # min(max_length, # decode_length_multiplier * input_length + decode_length_constant) hparams.add_hparam("decode_length_multiplier", 1.5) hparams.add_hparam("decode_length_constant", 10.0) # used during decoding hparams.add_hparam("alpha", 0.6) hparams.sampling_temp = 0.0 return hparams
def mtf_bitransformer_base(): """Machine translation base configuration.""" hparams = mtf_transformer2_base() hparams.max_length = 256 hparams.shared_embedding = True # HYPERPARAMETERS FOR THE LAYER STACKS hparams.add_hparam("encoder_layers", ["self_att", "drd"] * 6) hparams.add_hparam("decoder_layers", ["self_att", "enc_att", "drd"] * 6) hparams.add_hparam("encoder_num_layers", 6) hparams.add_hparam("decoder_num_layers", 6) # number of heads in multihead attention hparams.add_hparam("encoder_num_heads", 8) hparams.add_hparam("decoder_num_heads", 8) hparams.add_hparam("local_attention_radius", 128) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("encoder_num_memory_heads", 0) hparams.add_hparam("decoder_num_memory_heads", 0) # share attention keys and values hparams.add_hparam("encoder_shared_kv", False) hparams.add_hparam("decoder_shared_kv", False) # Parameters for computing the maximum decode length in beam search. # Maximum decode length is: # min(max_length, # decode_length_multiplier * input_length + decode_length_constant) hparams.add_hparam("decode_length_multiplier", 1.5) hparams.add_hparam("decode_length_constant", 10.0) # used during decoding hparams.add_hparam("alpha", 0.6) hparams.sampling_temp = 0.0 return hparams
[ "Machine", "translation", "base", "configuration", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L473-L505
[ "def", "mtf_bitransformer_base", "(", ")", ":", "hparams", "=", "mtf_transformer2_base", "(", ")", "hparams", ".", "max_length", "=", "256", "hparams", ".", "shared_embedding", "=", "True", "# HYPERPARAMETERS FOR THE LAYER STACKS", "hparams", ".", "add_hparam", "(", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_bitransformer_tiny
Small encoder-decoder model for testing.
tensor2tensor/models/mtf_transformer2.py
def mtf_bitransformer_tiny(): """Small encoder-decoder model for testing.""" hparams = mtf_bitransformer_base() hparams.batch_size = 2 hparams.mesh_shape = "" hparams.d_model = 128 hparams.encoder_layers = ["self_att", "drd"] * 2 hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 2 hparams.num_heads = 4 hparams.d_ff = 512 return hparams
def mtf_bitransformer_tiny(): """Small encoder-decoder model for testing.""" hparams = mtf_bitransformer_base() hparams.batch_size = 2 hparams.mesh_shape = "" hparams.d_model = 128 hparams.encoder_layers = ["self_att", "drd"] * 2 hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 2 hparams.num_heads = 4 hparams.d_ff = 512 return hparams
[ "Small", "encoder", "-", "decoder", "model", "for", "testing", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L521-L531
[ "def", "mtf_bitransformer_tiny", "(", ")", ":", "hparams", "=", "mtf_bitransformer_base", "(", ")", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "mesh_shape", "=", "\"\"", "hparams", ".", "d_model", "=", "128", "hparams", ".", "encoder_layers", "=",...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_unitransformer_all_layers_tiny
Test out all the layers on local CPU.
tensor2tensor/models/mtf_transformer2.py
def mtf_unitransformer_all_layers_tiny(): """Test out all the layers on local CPU.""" hparams = mtf_unitransformer_tiny() hparams.moe_num_experts = 4 hparams.moe_expert_x = 4 hparams.moe_expert_y = 4 hparams.moe_hidden_size = 512 hparams.layers = ["self_att", "local_self_att", "moe_1d", "moe_2d", "drd"] return hparams
def mtf_unitransformer_all_layers_tiny(): """Test out all the layers on local CPU.""" hparams = mtf_unitransformer_tiny() hparams.moe_num_experts = 4 hparams.moe_expert_x = 4 hparams.moe_expert_y = 4 hparams.moe_hidden_size = 512 hparams.layers = ["self_att", "local_self_att", "moe_1d", "moe_2d", "drd"] return hparams
[ "Test", "out", "all", "the", "layers", "on", "local", "CPU", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L535-L543
[ "def", "mtf_unitransformer_all_layers_tiny", "(", ")", ":", "hparams", "=", "mtf_unitransformer_tiny", "(", ")", "hparams", ".", "moe_num_experts", "=", "4", "hparams", ".", "moe_expert_x", "=", "4", "hparams", ".", "moe_expert_y", "=", "4", "hparams", ".", "moe...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_bitransformer_all_layers_tiny
Test out all the layers on local CPU.
tensor2tensor/models/mtf_transformer2.py
def mtf_bitransformer_all_layers_tiny(): """Test out all the layers on local CPU.""" hparams = mtf_bitransformer_tiny() hparams.moe_num_experts = 4 hparams.moe_expert_x = 4 hparams.moe_expert_y = 4 hparams.moe_hidden_size = 512 hparams.encoder_layers = [ "self_att", "local_self_att", "moe_1d", "moe_2d", "drd"] hparams.decoder_layers = [ "self_att", "local_self_att", "enc_att", "moe_1d", "moe_2d", "drd"] return hparams
def mtf_bitransformer_all_layers_tiny(): """Test out all the layers on local CPU.""" hparams = mtf_bitransformer_tiny() hparams.moe_num_experts = 4 hparams.moe_expert_x = 4 hparams.moe_expert_y = 4 hparams.moe_hidden_size = 512 hparams.encoder_layers = [ "self_att", "local_self_att", "moe_1d", "moe_2d", "drd"] hparams.decoder_layers = [ "self_att", "local_self_att", "enc_att", "moe_1d", "moe_2d", "drd"] return hparams
[ "Test", "out", "all", "the", "layers", "on", "local", "CPU", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L547-L558
[ "def", "mtf_bitransformer_all_layers_tiny", "(", ")", ":", "hparams", "=", "mtf_bitransformer_tiny", "(", ")", "hparams", ".", "moe_num_experts", "=", "4", "hparams", ".", "moe_expert_x", "=", "4", "hparams", ".", "moe_expert_y", "=", "4", "hparams", ".", "moe_h...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtr_lm_dense
Series of architectures for language modeling. We assume infinite training data, so no dropout necessary. You can use languagemodel_wiki_noref_v32k_l1k. (1 epoch = ~46000 steps). TODO(noam): find a large enough dataset for these experiments. Args: sz: an integer Returns: a hparams
tensor2tensor/models/mtf_transformer2.py
def mtr_lm_dense(sz): """Series of architectures for language modeling. We assume infinite training data, so no dropout necessary. You can use languagemodel_wiki_noref_v32k_l1k. (1 epoch = ~46000 steps). TODO(noam): find a large enough dataset for these experiments. Args: sz: an integer Returns: a hparams """ n = 2 ** sz hparams = mtf_unitransformer_base() hparams.d_model = 1024 hparams.max_length = 1024 hparams.batch_size = 128 # Parameters for my_layer_stack() hparams.num_hidden_layers = 6 hparams.d_ff = 8192 * n hparams.d_kv = 256 hparams.num_heads = 8 * n hparams.learning_rate_decay_steps = 65536 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" return hparams
def mtr_lm_dense(sz): """Series of architectures for language modeling. We assume infinite training data, so no dropout necessary. You can use languagemodel_wiki_noref_v32k_l1k. (1 epoch = ~46000 steps). TODO(noam): find a large enough dataset for these experiments. Args: sz: an integer Returns: a hparams """ n = 2 ** sz hparams = mtf_unitransformer_base() hparams.d_model = 1024 hparams.max_length = 1024 hparams.batch_size = 128 # Parameters for my_layer_stack() hparams.num_hidden_layers = 6 hparams.d_ff = 8192 * n hparams.d_kv = 256 hparams.num_heads = 8 * n hparams.learning_rate_decay_steps = 65536 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" return hparams
[ "Series", "of", "architectures", "for", "language", "modeling", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L562-L590
[ "def", "mtr_lm_dense", "(", "sz", ")", ":", "n", "=", "2", "**", "sz", "hparams", "=", "mtf_unitransformer_base", "(", ")", "hparams", ".", "d_model", "=", "1024", "hparams", ".", "max_length", "=", "1024", "hparams", ".", "batch_size", "=", "128", "# Pa...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtr_lm_v1
Model incorporating mixture-of-experts, local and global attention. ~6B parameters 32 experts in 3 hierarchichal moe layers. Returns: a hparams
tensor2tensor/models/mtf_transformer2.py
def mtr_lm_v1(): """Model incorporating mixture-of-experts, local and global attention. ~6B parameters 32 experts in 3 hierarchichal moe layers. Returns: a hparams """ hparams = mtr_lm_dense(0) hparams.layers = (["local_self_att", "local_self_att", "drd", "self_att", "drd", "local_self_att", "local_self_att", "moe_2d"] * 4)[:-1] hparams.d_kv = 128 hparams.moe_expert_x = 8 hparams.moe_expert_y = 4 hparams.moe_hidden_size = 32768 hparams.d_ff = 2048 hparams.num_memory_heads = 0 hparams.mesh_shape = "b0:4;b1:8" hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.outer_batch_size = 4 return hparams
def mtr_lm_v1(): """Model incorporating mixture-of-experts, local and global attention. ~6B parameters 32 experts in 3 hierarchichal moe layers. Returns: a hparams """ hparams = mtr_lm_dense(0) hparams.layers = (["local_self_att", "local_self_att", "drd", "self_att", "drd", "local_self_att", "local_self_att", "moe_2d"] * 4)[:-1] hparams.d_kv = 128 hparams.moe_expert_x = 8 hparams.moe_expert_y = 4 hparams.moe_hidden_size = 32768 hparams.d_ff = 2048 hparams.num_memory_heads = 0 hparams.mesh_shape = "b0:4;b1:8" hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.outer_batch_size = 4 return hparams
[ "Model", "incorporating", "mixture", "-", "of", "-", "experts", "local", "and", "global", "attention", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L626-L649
[ "def", "mtr_lm_v1", "(", ")", ":", "hparams", "=", "mtr_lm_dense", "(", "0", ")", "hparams", ".", "layers", "=", "(", "[", "\"local_self_att\"", ",", "\"local_self_att\"", ",", "\"drd\"", ",", "\"self_att\"", ",", "\"drd\"", ",", "\"local_self_att\"", ",", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtr_tr_dense
Series of machine translation models. All models are trained on sequences of 256 tokens. You can use the dataset translate_enfr_wmt32k_packed. 154000 steps = 3 epochs. Args: sz: an integer Returns: a hparams
tensor2tensor/models/mtf_transformer2.py
def mtr_tr_dense(sz): """Series of machine translation models. All models are trained on sequences of 256 tokens. You can use the dataset translate_enfr_wmt32k_packed. 154000 steps = 3 epochs. Args: sz: an integer Returns: a hparams """ n = 2 ** sz hparams = mtf_bitransformer_base() hparams.d_model = 1024 hparams.max_length = 256 hparams.batch_size = 128 hparams.d_ff = int(4096 * n) hparams.d_kv = 128 hparams.encoder_num_heads = int(8 * n) hparams.decoder_num_heads = int(8 * n) # one epoch for translate_enfr_wmt32k_packed = 51400 steps hparams.learning_rate_decay_steps = 51400 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" hparams.label_smoothing = 0.1 hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 return hparams
def mtr_tr_dense(sz): """Series of machine translation models. All models are trained on sequences of 256 tokens. You can use the dataset translate_enfr_wmt32k_packed. 154000 steps = 3 epochs. Args: sz: an integer Returns: a hparams """ n = 2 ** sz hparams = mtf_bitransformer_base() hparams.d_model = 1024 hparams.max_length = 256 hparams.batch_size = 128 hparams.d_ff = int(4096 * n) hparams.d_kv = 128 hparams.encoder_num_heads = int(8 * n) hparams.decoder_num_heads = int(8 * n) # one epoch for translate_enfr_wmt32k_packed = 51400 steps hparams.learning_rate_decay_steps = 51400 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" hparams.label_smoothing = 0.1 hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 return hparams
[ "Series", "of", "machine", "translation", "models", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L660-L691
[ "def", "mtr_tr_dense", "(", "sz", ")", ":", "n", "=", "2", "**", "sz", "hparams", "=", "mtf_bitransformer_base", "(", ")", "hparams", ".", "d_model", "=", "1024", "hparams", ".", "max_length", "=", "256", "hparams", ".", "batch_size", "=", "128", "hparam...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtr_tr_dense_local
With local self-attention in the decoder.
tensor2tensor/models/mtf_transformer2.py
def mtr_tr_dense_local(sz): """With local self-attention in the decoder.""" hparams = mtr_tr_dense(sz) hparams.decoder_layers = ["local_self_att", "enc_att", "drd"] * 6 hparams.local_attention_radius = 32 return hparams
def mtr_tr_dense_local(sz): """With local self-attention in the decoder.""" hparams = mtr_tr_dense(sz) hparams.decoder_layers = ["local_self_att", "enc_att", "drd"] * 6 hparams.local_attention_radius = 32 return hparams
[ "With", "local", "self", "-", "attention", "in", "the", "decoder", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L734-L739
[ "def", "mtr_tr_dense_local", "(", "sz", ")", ":", "hparams", "=", "mtr_tr_dense", "(", "sz", ")", "hparams", ".", "decoder_layers", "=", "[", "\"local_self_att\"", ",", "\"enc_att\"", ",", "\"drd\"", "]", "*", "6", "hparams", ".", "local_attention_radius", "="...
272500b6efe353aeb638d2745ed56e519462ca31
train
recurrent_transformer_decoder
Recurrent decoder function.
tensor2tensor/models/research/vqa_recurrent_self_attention.py
def recurrent_transformer_decoder( decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """Recurrent decoder function.""" x = decoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): ffn_unit = functools.partial( # use encoder ffn, since decoder ffn use left padding universal_transformer_util.transformer_encoder_ffn_unit, hparams=hparams, nonpadding_mask=nonpadding) attention_unit = functools.partial( universal_transformer_util.transformer_decoder_attention_unit, hparams=hparams, encoder_output=encoder_output, decoder_self_attention_bias=decoder_self_attention_bias, encoder_decoder_attention_bias=encoder_decoder_attention_bias, attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x, extra_output = universal_transformer_util.universal_transformer_layer( x, hparams, ffn_unit, attention_unit) return common_layers.layer_preprocess(x, hparams), extra_output
def recurrent_transformer_decoder( decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """Recurrent decoder function.""" x = decoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): ffn_unit = functools.partial( # use encoder ffn, since decoder ffn use left padding universal_transformer_util.transformer_encoder_ffn_unit, hparams=hparams, nonpadding_mask=nonpadding) attention_unit = functools.partial( universal_transformer_util.transformer_decoder_attention_unit, hparams=hparams, encoder_output=encoder_output, decoder_self_attention_bias=decoder_self_attention_bias, encoder_decoder_attention_bias=encoder_decoder_attention_bias, attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x, extra_output = universal_transformer_util.universal_transformer_layer( x, hparams, ffn_unit, attention_unit) return common_layers.layer_preprocess(x, hparams), extra_output
[ "Recurrent", "decoder", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_recurrent_self_attention.py#L138-L173
[ "def", "recurrent_transformer_decoder", "(", "decoder_input", ",", "encoder_output", ",", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "hparams", ",", "name", "=", "\"decoder\"", ",", "nonpadding", "=", "None", ",", "save_weights_to", "=", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
vqa_recurrent_self_attention_base
VQA attention baseline hparams.
tensor2tensor/models/research/vqa_recurrent_self_attention.py
def vqa_recurrent_self_attention_base(): """VQA attention baseline hparams.""" hparams = universal_transformer.universal_transformer_base() hparams.batch_size = 1024 hparams.use_fixed_batch_size = True hparams.weight_decay = 0. hparams.clip_grad_norm = 0. # use default initializer # hparams.initializer = "xavier" hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_normalized_decay") hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate_constant = 7e-4 hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 # hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0.1 hparams.multiply_embedding_mode = "sqrt_depth" # add new hparams # use raw image as input hparams.add_hparam("image_input_type", "feature") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) # question hidden size # hparams.hidden_size = 512 # hparams.filter_size = 1024 # hparams.num_hidden_layers = 4 # self attention parts # hparams.norm_type = "layer" # hparams.layer_preprocess_sequence = "n" # hparams.layer_postprocess_sequence = "da" # hparams.layer_prepostprocess_dropout = 0.1 # hparams.attention_dropout = 0.1 # hparams.relu_dropout = 0.1 # hparams.add_hparam("pos", "timing") # hparams.add_hparam("num_encoder_layers", 0) # hparams.add_hparam("num_decoder_layers", 0) # hparams.add_hparam("num_heads", 8) # hparams.add_hparam("attention_key_channels", 0) # hparams.add_hparam("attention_value_channels", 0) # hparams.add_hparam("self_attention_type", "dot_product") # iterative part hparams.transformer_ffn_type = "fc" return hparams
def vqa_recurrent_self_attention_base(): """VQA attention baseline hparams.""" hparams = universal_transformer.universal_transformer_base() hparams.batch_size = 1024 hparams.use_fixed_batch_size = True hparams.weight_decay = 0. hparams.clip_grad_norm = 0. # use default initializer # hparams.initializer = "xavier" hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_normalized_decay") hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate_constant = 7e-4 hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 # hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0.1 hparams.multiply_embedding_mode = "sqrt_depth" # add new hparams # use raw image as input hparams.add_hparam("image_input_type", "feature") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) # question hidden size # hparams.hidden_size = 512 # hparams.filter_size = 1024 # hparams.num_hidden_layers = 4 # self attention parts # hparams.norm_type = "layer" # hparams.layer_preprocess_sequence = "n" # hparams.layer_postprocess_sequence = "da" # hparams.layer_prepostprocess_dropout = 0.1 # hparams.attention_dropout = 0.1 # hparams.relu_dropout = 0.1 # hparams.add_hparam("pos", "timing") # hparams.add_hparam("num_encoder_layers", 0) # hparams.add_hparam("num_decoder_layers", 0) # hparams.add_hparam("num_heads", 8) # hparams.add_hparam("attention_key_channels", 0) # hparams.add_hparam("attention_value_channels", 0) # hparams.add_hparam("self_attention_type", "dot_product") # iterative part hparams.transformer_ffn_type = "fc" return hparams
[ "VQA", "attention", "baseline", "hparams", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_recurrent_self_attention.py#L177-L233
[ "def", "vqa_recurrent_self_attention_base", "(", ")", ":", "hparams", "=", "universal_transformer", ".", "universal_transformer_base", "(", ")", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "use_fixed_batch_size", "=", "True", "hparams", ".", "weight_dec...
272500b6efe353aeb638d2745ed56e519462ca31
train
batch_norm_relu
Block of batch norm and relu.
tensor2tensor/models/mtf_resnet.py
def batch_norm_relu(inputs, is_training, relu=True): """Block of batch norm and relu.""" inputs = mtf.layers.batch_norm( inputs, is_training, BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, init_zero=(not relu)) if relu: inputs = mtf.relu(inputs) return inputs
def batch_norm_relu(inputs, is_training, relu=True): """Block of batch norm and relu.""" inputs = mtf.layers.batch_norm( inputs, is_training, BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, init_zero=(not relu)) if relu: inputs = mtf.relu(inputs) return inputs
[ "Block", "of", "batch", "norm", "and", "relu", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L38-L48
[ "def", "batch_norm_relu", "(", "inputs", ",", "is_training", ",", "relu", "=", "True", ")", ":", "inputs", "=", "mtf", ".", "layers", ".", "batch_norm", "(", "inputs", ",", "is_training", ",", "BATCH_NORM_DECAY", ",", "epsilon", "=", "BATCH_NORM_EPSILON", ",...
272500b6efe353aeb638d2745ed56e519462ca31
train
bottleneck_block
Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: a `mtf.Tensor` of shape `[batch_dim, row_blocks, col_blocks, rows, cols, in_channels]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training mode. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. row_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis col_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis Returns: The output `Tensor` of the block.
tensor2tensor/models/mtf_resnet.py
def bottleneck_block(inputs, filters, is_training, strides, projection_shortcut=None, row_blocks_dim=None, col_blocks_dim=None): """Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: a `mtf.Tensor` of shape `[batch_dim, row_blocks, col_blocks, rows, cols, in_channels]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training mode. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. row_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis col_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis Returns: The output `Tensor` of the block. """ shortcut = inputs filter_h_dim = mtf.Dimension("filter_height", 3) filter_w_dim = mtf.Dimension("filter_width", 3) one_h_dim = mtf.Dimension("filter_height", 1) one_w_dim = mtf.Dimension("filter_width", 1) if projection_shortcut is not None: filters_dim = mtf.Dimension("filtersp", filters) kernel = mtf.get_variable( inputs.mesh, "kernel", mtf.Shape( [one_h_dim, one_w_dim, inputs.shape.dims[-1], filters_dim])) shortcut = projection_shortcut(inputs, kernel) # First conv block filters1_dim = mtf.Dimension("filters1", filters) kernel1 = mtf.get_variable( inputs.mesh, "kernel1", mtf.Shape( [one_h_dim, one_w_dim, inputs.shape.dims[-1], filters1_dim])) inputs = mtf.conv2d_with_blocks( inputs, kernel1, strides=[1, 1, 1, 1], padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim) # TODO(nikip): Add Dropout? inputs = batch_norm_relu(inputs, is_training) # Second conv block filters2_dim = mtf.Dimension("filters2", 4*filters) kernel2 = mtf.get_variable( inputs.mesh, "kernel2", mtf.Shape( [filter_h_dim, filter_w_dim, filters1_dim, filters2_dim])) inputs = mtf.conv2d_with_blocks( inputs, kernel2, strides=[1, 1, 1, 1], padding="SAME", h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim) inputs = batch_norm_relu(inputs, is_training) # Third wide conv filter block filters3_dim = mtf.Dimension("filters3", filters) filters3_kernel = mtf.get_variable( inputs.mesh, "wide_kernel", mtf.Shape( [one_h_dim, one_w_dim, filters2_dim, filters3_dim])) inputs = mtf.conv2d_with_blocks( inputs, filters3_kernel, strides, padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim) # TODO(nikip): Althought the original resnet code has this batch norm, in our # setup this is causing no gradients to be passed. Investigate further. # inputs = batch_norm_relu(inputs, is_training, relu=True) # TODO(nikip): Maybe add residual with a projection? return mtf.relu( shortcut + mtf.rename_dimension( inputs, inputs.shape.dims[-1].name, shortcut.shape.dims[-1].name))
def bottleneck_block(inputs, filters, is_training, strides, projection_shortcut=None, row_blocks_dim=None, col_blocks_dim=None): """Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: a `mtf.Tensor` of shape `[batch_dim, row_blocks, col_blocks, rows, cols, in_channels]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training mode. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. row_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis col_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis Returns: The output `Tensor` of the block. """ shortcut = inputs filter_h_dim = mtf.Dimension("filter_height", 3) filter_w_dim = mtf.Dimension("filter_width", 3) one_h_dim = mtf.Dimension("filter_height", 1) one_w_dim = mtf.Dimension("filter_width", 1) if projection_shortcut is not None: filters_dim = mtf.Dimension("filtersp", filters) kernel = mtf.get_variable( inputs.mesh, "kernel", mtf.Shape( [one_h_dim, one_w_dim, inputs.shape.dims[-1], filters_dim])) shortcut = projection_shortcut(inputs, kernel) # First conv block filters1_dim = mtf.Dimension("filters1", filters) kernel1 = mtf.get_variable( inputs.mesh, "kernel1", mtf.Shape( [one_h_dim, one_w_dim, inputs.shape.dims[-1], filters1_dim])) inputs = mtf.conv2d_with_blocks( inputs, kernel1, strides=[1, 1, 1, 1], padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim) # TODO(nikip): Add Dropout? inputs = batch_norm_relu(inputs, is_training) # Second conv block filters2_dim = mtf.Dimension("filters2", 4*filters) kernel2 = mtf.get_variable( inputs.mesh, "kernel2", mtf.Shape( [filter_h_dim, filter_w_dim, filters1_dim, filters2_dim])) inputs = mtf.conv2d_with_blocks( inputs, kernel2, strides=[1, 1, 1, 1], padding="SAME", h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim) inputs = batch_norm_relu(inputs, is_training) # Third wide conv filter block filters3_dim = mtf.Dimension("filters3", filters) filters3_kernel = mtf.get_variable( inputs.mesh, "wide_kernel", mtf.Shape( [one_h_dim, one_w_dim, filters2_dim, filters3_dim])) inputs = mtf.conv2d_with_blocks( inputs, filters3_kernel, strides, padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim) # TODO(nikip): Althought the original resnet code has this batch norm, in our # setup this is causing no gradients to be passed. Investigate further. # inputs = batch_norm_relu(inputs, is_training, relu=True) # TODO(nikip): Maybe add residual with a projection? return mtf.relu( shortcut + mtf.rename_dimension( inputs, inputs.shape.dims[-1].name, shortcut.shape.dims[-1].name))
[ "Bottleneck", "block", "variant", "for", "residual", "networks", "with", "BN", "after", "convolutions", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L51-L142
[ "def", "bottleneck_block", "(", "inputs", ",", "filters", ",", "is_training", ",", "strides", ",", "projection_shortcut", "=", "None", ",", "row_blocks_dim", "=", "None", ",", "col_blocks_dim", "=", "None", ")", ":", "shortcut", "=", "inputs", "filter_h_dim", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
block_layer
Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. row_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis col_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis Returns: The output `Tensor` of the block layer.
tensor2tensor/models/mtf_resnet.py
def block_layer(inputs, filters, blocks, strides, is_training, name, row_blocks_dim=None, col_blocks_dim=None): """Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. row_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis col_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis Returns: The output `Tensor` of the block layer. """ with tf.variable_scope(name, default_name="block_layer"): # Only the first block per block_layer uses projection_shortcut and strides def projection_shortcut(inputs, kernel): """Project identity branch.""" inputs = mtf.conv2d_with_blocks( inputs, kernel, strides=strides, padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim) return batch_norm_relu( inputs, is_training, relu=False) inputs = bottleneck_block( inputs, filters, is_training, strides=strides, projection_shortcut=projection_shortcut, row_blocks_dim=row_blocks_dim, col_blocks_dim=col_blocks_dim) for i in range(1, blocks): with tf.variable_scope("bottleneck_%d" % i): inputs = bottleneck_block( inputs, filters, is_training, strides=[1, 1, 1, 1], projection_shortcut=None, row_blocks_dim=row_blocks_dim, col_blocks_dim=col_blocks_dim) return inputs
def block_layer(inputs, filters, blocks, strides, is_training, name, row_blocks_dim=None, col_blocks_dim=None): """Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. row_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis col_blocks_dim: a mtf.Dimension, row dimension which is spatially partitioned along mesh axis Returns: The output `Tensor` of the block layer. """ with tf.variable_scope(name, default_name="block_layer"): # Only the first block per block_layer uses projection_shortcut and strides def projection_shortcut(inputs, kernel): """Project identity branch.""" inputs = mtf.conv2d_with_blocks( inputs, kernel, strides=strides, padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim) return batch_norm_relu( inputs, is_training, relu=False) inputs = bottleneck_block( inputs, filters, is_training, strides=strides, projection_shortcut=projection_shortcut, row_blocks_dim=row_blocks_dim, col_blocks_dim=col_blocks_dim) for i in range(1, blocks): with tf.variable_scope("bottleneck_%d" % i): inputs = bottleneck_block( inputs, filters, is_training, strides=[1, 1, 1, 1], projection_shortcut=None, row_blocks_dim=row_blocks_dim, col_blocks_dim=col_blocks_dim) return inputs
[ "Creates", "one", "layer", "of", "blocks", "for", "the", "ResNet", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L145-L204
[ "def", "block_layer", "(", "inputs", ",", "filters", ",", "blocks", ",", "strides", ",", "is_training", ",", "name", ",", "row_blocks_dim", "=", "None", ",", "col_blocks_dim", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_resnet_base
Set of hyperparameters.
tensor2tensor/models/mtf_resnet.py
def mtf_resnet_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.no_data_parallelism = True hparams.use_fixed_batch_size = True hparams.batch_size = 32 hparams.max_length = 3072 hparams.hidden_size = 256 hparams.label_smoothing = 0.0 # 8-way model-parallelism hparams.add_hparam("mesh_shape", "batch:8") hparams.add_hparam("layout", "batch:batch") hparams.add_hparam("filter_size", 1024) hparams.add_hparam("num_layers", 6) # Share weights between input and target embeddings hparams.shared_embedding = True hparams.shared_embedding_and_softmax_weights = True hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.add_hparam("d_kv", 32) # Image related hparams hparams.add_hparam("img_len", 32) hparams.add_hparam("num_channels", 3) hparams.add_hparam("row_blocks", 1) hparams.add_hparam("col_blocks", 1) hparams.add_hparam("rows_size", 32) hparams.add_hparam("cols_size", 32) # Model-specific parameters hparams.add_hparam("layer_sizes", [3, 4, 6, 3]) hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512]) hparams.add_hparam("is_cifar", False) # Variable init hparams.initializer = "normal_unit_scaling" hparams.initializer_gain = 2. # TODO(nikip): Change optimization scheme? hparams.learning_rate = 0.1 return hparams
def mtf_resnet_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.no_data_parallelism = True hparams.use_fixed_batch_size = True hparams.batch_size = 32 hparams.max_length = 3072 hparams.hidden_size = 256 hparams.label_smoothing = 0.0 # 8-way model-parallelism hparams.add_hparam("mesh_shape", "batch:8") hparams.add_hparam("layout", "batch:batch") hparams.add_hparam("filter_size", 1024) hparams.add_hparam("num_layers", 6) # Share weights between input and target embeddings hparams.shared_embedding = True hparams.shared_embedding_and_softmax_weights = True hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.add_hparam("d_kv", 32) # Image related hparams hparams.add_hparam("img_len", 32) hparams.add_hparam("num_channels", 3) hparams.add_hparam("row_blocks", 1) hparams.add_hparam("col_blocks", 1) hparams.add_hparam("rows_size", 32) hparams.add_hparam("cols_size", 32) # Model-specific parameters hparams.add_hparam("layer_sizes", [3, 4, 6, 3]) hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512]) hparams.add_hparam("is_cifar", False) # Variable init hparams.initializer = "normal_unit_scaling" hparams.initializer_gain = 2. # TODO(nikip): Change optimization scheme? hparams.learning_rate = 0.1 return hparams
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L333-L376
[ "def", "mtf_resnet_base", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "no_data_parallelism", "=", "True", "hparams", ".", "use_fixed_batch_size", "=", "True", "hparams", ".", "batch_size", "=", "32", "hparams"...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_resnet_tiny
Catch bugs locally...
tensor2tensor/models/mtf_resnet.py
def mtf_resnet_tiny(): """Catch bugs locally...""" hparams = mtf_resnet_base() hparams.num_layers = 2 hparams.hidden_size = 64 hparams.filter_size = 64 hparams.batch_size = 16 # data parallelism and model-parallelism hparams.col_blocks = 1 hparams.mesh_shape = "batch:2" hparams.layout = "batch:batch" hparams.layer_sizes = [1, 2, 3] hparams.filter_sizes = [64, 64, 64] return hparams
def mtf_resnet_tiny(): """Catch bugs locally...""" hparams = mtf_resnet_base() hparams.num_layers = 2 hparams.hidden_size = 64 hparams.filter_size = 64 hparams.batch_size = 16 # data parallelism and model-parallelism hparams.col_blocks = 1 hparams.mesh_shape = "batch:2" hparams.layout = "batch:batch" hparams.layer_sizes = [1, 2, 3] hparams.filter_sizes = [64, 64, 64] return hparams
[ "Catch", "bugs", "locally", "..." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L380-L393
[ "def", "mtf_resnet_tiny", "(", ")", ":", "hparams", "=", "mtf_resnet_base", "(", ")", "hparams", ".", "num_layers", "=", "2", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "filter_size", "=", "64", "hparams", ".", "batch_size", "=", "16", "# da...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_resnet_single
Small single parameters.
tensor2tensor/models/mtf_resnet.py
def mtf_resnet_single(): """Small single parameters.""" hparams = mtf_resnet_tiny() hparams.mesh_shape = "" hparams.layout = "" hparams.hidden_size = 32 hparams.filter_size = 32 hparams.batch_size = 1 hparams.num_encoder_layers = 1 hparams.num_layers = 1 hparams.block_length = 16 return hparams
def mtf_resnet_single(): """Small single parameters.""" hparams = mtf_resnet_tiny() hparams.mesh_shape = "" hparams.layout = "" hparams.hidden_size = 32 hparams.filter_size = 32 hparams.batch_size = 1 hparams.num_encoder_layers = 1 hparams.num_layers = 1 hparams.block_length = 16 return hparams
[ "Small", "single", "parameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L397-L408
[ "def", "mtf_resnet_single", "(", ")", ":", "hparams", "=", "mtf_resnet_tiny", "(", ")", "hparams", ".", "mesh_shape", "=", "\"\"", "hparams", ".", "layout", "=", "\"\"", "hparams", ".", "hidden_size", "=", "32", "hparams", ".", "filter_size", "=", "32", "h...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_resnet_base_single
Small single parameters.
tensor2tensor/models/mtf_resnet.py
def mtf_resnet_base_single(): """Small single parameters.""" hparams = mtf_resnet_base() hparams.num_layers = 6 hparams.filter_size = 256 hparams.block_length = 128 hparams.mesh_shape = "" hparams.layout = "" return hparams
def mtf_resnet_base_single(): """Small single parameters.""" hparams = mtf_resnet_base() hparams.num_layers = 6 hparams.filter_size = 256 hparams.block_length = 128 hparams.mesh_shape = "" hparams.layout = "" return hparams
[ "Small", "single", "parameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L412-L420
[ "def", "mtf_resnet_base_single", "(", ")", ":", "hparams", "=", "mtf_resnet_base", "(", ")", "hparams", ".", "num_layers", "=", "6", "hparams", ".", "filter_size", "=", "256", "hparams", ".", "block_length", "=", "128", "hparams", ".", "mesh_shape", "=", "\"...
272500b6efe353aeb638d2745ed56e519462ca31
train
mtf_resnet_base_cifar
Data parallel CIFAR parameters.
tensor2tensor/models/mtf_resnet.py
def mtf_resnet_base_cifar(): """Data parallel CIFAR parameters.""" hparams = mtf_resnet_base() hparams.mesh_shape = "batch:32" hparams.layoyt = "batch:batch" hparams.batch_size = 8 hparams.num_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.learning_rate = 0.5 hparams.learning_rate_warmup_steps = 4000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 hparams.unconditional = True return hparams
def mtf_resnet_base_cifar(): """Data parallel CIFAR parameters.""" hparams = mtf_resnet_base() hparams.mesh_shape = "batch:32" hparams.layoyt = "batch:batch" hparams.batch_size = 8 hparams.num_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.learning_rate = 0.5 hparams.learning_rate_warmup_steps = 4000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 hparams.unconditional = True return hparams
[ "Data", "parallel", "CIFAR", "parameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_resnet.py#L424-L440
[ "def", "mtf_resnet_base_cifar", "(", ")", ":", "hparams", "=", "mtf_resnet_base", "(", ")", "hparams", ".", "mesh_shape", "=", "\"batch:32\"", "hparams", ".", "layoyt", "=", "\"batch:batch\"", "hparams", ".", "batch_size", "=", "8", "hparams", ".", "num_layers",...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_encoder
Universal Transformer encoder function. Prepares all the arguments and the inputs and passes it to a universal_transformer_layer to encode the encoder_input. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convoltutional layers. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors as the output of the encoder extra_output: which can be used to pass extra information to the body
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """Universal Transformer encoder function. Prepares all the arguments and the inputs and passes it to a universal_transformer_layer to encode the encoder_input. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convoltutional layers. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors as the output of the encoder extra_output: which can be used to pass extra information to the body """ x = encoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding else: padding = common_attention.attention_bias_to_padding( encoder_self_attention_bias) nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover and not common_layers.is_xla_compiled(): pad_remover = expert_utils.PadRemover(padding) ffn_unit = functools.partial( transformer_encoder_ffn_unit, hparams=hparams, nonpadding_mask=nonpadding, pad_remover=pad_remover) attention_unit = functools.partial( transformer_encoder_attention_unit, hparams=hparams, encoder_self_attention_bias=encoder_self_attention_bias, attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x, extra_output = universal_transformer_layer( x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover) return common_layers.layer_preprocess(x, hparams), extra_output
def universal_transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """Universal Transformer encoder function. Prepares all the arguments and the inputs and passes it to a universal_transformer_layer to encode the encoder_input. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convoltutional layers. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors as the output of the encoder extra_output: which can be used to pass extra information to the body """ x = encoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding else: padding = common_attention.attention_bias_to_padding( encoder_self_attention_bias) nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover and not common_layers.is_xla_compiled(): pad_remover = expert_utils.PadRemover(padding) ffn_unit = functools.partial( transformer_encoder_ffn_unit, hparams=hparams, nonpadding_mask=nonpadding, pad_remover=pad_remover) attention_unit = functools.partial( transformer_encoder_attention_unit, hparams=hparams, encoder_self_attention_bias=encoder_self_attention_bias, attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x, extra_output = universal_transformer_layer( x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover) return common_layers.layer_preprocess(x, hparams), extra_output
[ "Universal", "Transformer", "encoder", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L62-L128
[ "def", "universal_transformer_encoder", "(", "encoder_input", ",", "encoder_self_attention_bias", ",", "hparams", ",", "name", "=", "\"encoder\"", ",", "nonpadding", "=", "None", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ")", ":", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_layer
Core function applying the universal transformer layer. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor, extra output (can be memory, ponder time, etc.) Raises: ValueError: Unknown recurrence type
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None): """Core function applying the universal transformer layer. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor, extra output (can be memory, ponder time, etc.) Raises: ValueError: Unknown recurrence type """ def add_vanilla_transformer_layer(x, num_layers, name): """Passes the input through num_layers of vanilla transformer layers. Args: x: input num_layers: number of layers name: string, prefix of layer names Returns: output of vanilla_transformer_layer """ if hparams.add_position_timing_signal: # In case of add_position_timing_signal=true, we set hparams.pos=None # and add position timing signal at the beginning of each step, so for # the vanilla transformer, we need to add timing signal here. x = common_attention.add_timing_signal_1d(x) for layer in range(num_layers): with tf.variable_scope(name + "layer_%d" % layer): x = ffn_unit(attention_unit(x)) return x with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type): if (hparams.mix_with_transformer and "before_ut" in hparams.mix_with_transformer): x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers, "before_ut_") if hparams.recurrence_type == "act": output, extra_output = universal_transformer_act( x, hparams, ffn_unit, attention_unit) else: # for all the other recurrency types with fixed number of steps ut_function, initializer = get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover) output, _, extra_output = tf.foldl( ut_function, tf.range(hparams.num_rec_steps), initializer=initializer) # Right now, this is only possible when the transition function is an lstm if (hparams.recurrence_type == "lstm" and hparams.get("use_memory_as_final_state", False)): output = extra_output if (hparams.mix_with_transformer and "after_ut" in hparams.mix_with_transformer): output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers, "after_ut_") return output, extra_output
def universal_transformer_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None): """Core function applying the universal transformer layer. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor, extra output (can be memory, ponder time, etc.) Raises: ValueError: Unknown recurrence type """ def add_vanilla_transformer_layer(x, num_layers, name): """Passes the input through num_layers of vanilla transformer layers. Args: x: input num_layers: number of layers name: string, prefix of layer names Returns: output of vanilla_transformer_layer """ if hparams.add_position_timing_signal: # In case of add_position_timing_signal=true, we set hparams.pos=None # and add position timing signal at the beginning of each step, so for # the vanilla transformer, we need to add timing signal here. x = common_attention.add_timing_signal_1d(x) for layer in range(num_layers): with tf.variable_scope(name + "layer_%d" % layer): x = ffn_unit(attention_unit(x)) return x with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type): if (hparams.mix_with_transformer and "before_ut" in hparams.mix_with_transformer): x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers, "before_ut_") if hparams.recurrence_type == "act": output, extra_output = universal_transformer_act( x, hparams, ffn_unit, attention_unit) else: # for all the other recurrency types with fixed number of steps ut_function, initializer = get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover) output, _, extra_output = tf.foldl( ut_function, tf.range(hparams.num_rec_steps), initializer=initializer) # Right now, this is only possible when the transition function is an lstm if (hparams.recurrence_type == "lstm" and hparams.get("use_memory_as_final_state", False)): output = extra_output if (hparams.mix_with_transformer and "after_ut" in hparams.mix_with_transformer): output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers, "after_ut_") return output, extra_output
[ "Core", "function", "applying", "the", "universal", "transformer", "layer", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L194-L265
[ "def", "universal_transformer_layer", "(", "x", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ",", "pad_remover", "=", "None", ")", ":", "def", "add_vanilla_transformer_layer", "(", "x", ",", "num_layers", ",", "name", ")", ":", "\"\"\"Passes the input t...
272500b6efe353aeb638d2745ed56e519462ca31
train
get_ut_layer
Provides the function that is used in universal transforemr steps. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: ut_function and the ut_initializer Raises: ValueError: Unknown recurrence type
tensor2tensor/models/research/universal_transformer_util.py
def get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None): """Provides the function that is used in universal transforemr steps. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: ut_function and the ut_initializer Raises: ValueError: Unknown recurrence type """ if hparams.recurrence_type == "basic": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_basic, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit) elif hparams.recurrence_type == "highway": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_highway, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif hparams.recurrence_type == "skip": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_skip, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif hparams.recurrence_type == "dwa": # memory contains the original input + all the states memory_size = hparams.num_rec_steps + 1 # prepare initializer: memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x)) # filling the first slot with the original input memory = fill_memory_slot(memory_empty, x, 0) ut_initializer = (x, x, memory) # (state, input, memory) ut_function = functools.partial( universal_transformer_depthwise_attention, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit) elif hparams.recurrence_type == "gru": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_with_gru_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif hparams.recurrence_type == "lstm": memory = tf.zeros(common_layers.shape_list(x)) ut_initializer = (x, x, memory) # (state, input, memory) ut_function = functools.partial( universal_transformer_with_lstm_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) else: raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type) return ut_function, ut_initializer
def get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None): """Provides the function that is used in universal transforemr steps. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: ut_function and the ut_initializer Raises: ValueError: Unknown recurrence type """ if hparams.recurrence_type == "basic": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_basic, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit) elif hparams.recurrence_type == "highway": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_highway, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif hparams.recurrence_type == "skip": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_skip, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif hparams.recurrence_type == "dwa": # memory contains the original input + all the states memory_size = hparams.num_rec_steps + 1 # prepare initializer: memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x)) # filling the first slot with the original input memory = fill_memory_slot(memory_empty, x, 0) ut_initializer = (x, x, memory) # (state, input, memory) ut_function = functools.partial( universal_transformer_depthwise_attention, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit) elif hparams.recurrence_type == "gru": ut_initializer = (x, x, x) # (state, input, memory) ut_function = functools.partial( universal_transformer_with_gru_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif hparams.recurrence_type == "lstm": memory = tf.zeros(common_layers.shape_list(x)) ut_initializer = (x, x, memory) # (state, input, memory) ut_function = functools.partial( universal_transformer_with_lstm_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) else: raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type) return ut_function, ut_initializer
[ "Provides", "the", "function", "that", "is", "used", "in", "universal", "transforemr", "steps", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L268-L354
[ "def", "get_ut_layer", "(", "x", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ",", "pad_remover", "=", "None", ")", ":", "if", "hparams", ".", "recurrence_type", "==", "\"basic\"", ":", "ut_initializer", "=", "(", "x", ",", "x", ",", "x", ")",...
272500b6efe353aeb638d2745ed56e519462ca31
train
transformer_encoder_ffn_unit
Applies a feed-forward function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters nonpadding_mask: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convoltutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor
tensor2tensor/models/research/universal_transformer_util.py
def transformer_encoder_ffn_unit(x, hparams, nonpadding_mask=None, pad_remover=None): """Applies a feed-forward function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters nonpadding_mask: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convoltutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor """ with tf.variable_scope("ffn"): if hparams.transformer_ffn_type == "fc": y = transformer.transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, pad_remover, conv_padding="SAME", nonpadding_mask=nonpadding_mask) if hparams.transformer_ffn_type == "sepconv": assert nonpadding_mask is not None, ( "The nonpadding_mask should be provided, otherwise the model uses " "the leaked padding information to estimate the length!") y = common_layers.sepconv_relu_sepconv( common_layers.layer_preprocess(x, hparams), filter_size=hparams.filter_size, output_size=hparams.hidden_size, first_kernel_size=(3, 1), second_kernel_size=(5, 1), padding="SAME", nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return x
def transformer_encoder_ffn_unit(x, hparams, nonpadding_mask=None, pad_remover=None): """Applies a feed-forward function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters nonpadding_mask: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convoltutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor """ with tf.variable_scope("ffn"): if hparams.transformer_ffn_type == "fc": y = transformer.transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, pad_remover, conv_padding="SAME", nonpadding_mask=nonpadding_mask) if hparams.transformer_ffn_type == "sepconv": assert nonpadding_mask is not None, ( "The nonpadding_mask should be provided, otherwise the model uses " "the leaked padding information to estimate the length!") y = common_layers.sepconv_relu_sepconv( common_layers.layer_preprocess(x, hparams), filter_size=hparams.filter_size, output_size=hparams.hidden_size, first_kernel_size=(3, 1), second_kernel_size=(5, 1), padding="SAME", nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return x
[ "Applies", "a", "feed", "-", "forward", "function", "which", "is", "parametrised", "for", "encoding", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L357-L402
[ "def", "transformer_encoder_ffn_unit", "(", "x", ",", "hparams", ",", "nonpadding_mask", "=", "None", ",", "pad_remover", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"ffn\"", ")", ":", "if", "hparams", ".", "transformer_ffn_type", "==", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
transformer_encoder_attention_unit
Applies multihead attention function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters encoder_self_attention_bias: a bias tensor for use in encoder self-attention attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: the output tensor
tensor2tensor/models/research/universal_transformer_util.py
def transformer_encoder_attention_unit(x, hparams, encoder_self_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True): """Applies multihead attention function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters encoder_self_attention_bias: a bias tensor for use in encoder self-attention attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: the output tensor """ with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) return x
def transformer_encoder_attention_unit(x, hparams, encoder_self_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True): """Applies multihead attention function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters encoder_self_attention_bias: a bias tensor for use in encoder self-attention attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: the output tensor """ with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) return x
[ "Applies", "multihead", "attention", "function", "which", "is", "parametrised", "for", "encoding", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L405-L446
[ "def", "transformer_encoder_attention_unit", "(", "x", ",", "hparams", ",", "encoder_self_attention_bias", ",", "attention_dropout_broadcast_dims", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ")", ":", "with", "tf", ".", "variable_scope"...
272500b6efe353aeb638d2745ed56e519462ca31
train
transformer_decoder_attention_unit
Applies multihead attention function which is parametrised for decoding. Args: x: input (decoder input) hparams: model hyper-parameters encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: The output tensor
tensor2tensor/models/research/universal_transformer_util.py
def transformer_decoder_attention_unit(x, hparams, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True): """Applies multihead attention function which is parametrised for decoding. Args: x: input (decoder input) hparams: model hyper-parameters encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: The output tensor """ with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, cache=None, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) return x
def transformer_decoder_attention_unit(x, hparams, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True): """Applies multihead attention function which is parametrised for decoding. Args: x: input (decoder input) hparams: model hyper-parameters encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: The output tensor """ with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, cache=None, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) return x
[ "Applies", "multihead", "attention", "function", "which", "is", "parametrised", "for", "decoding", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L492-L556
[ "def", "transformer_decoder_attention_unit", "(", "x", ",", "hparams", ",", "encoder_output", ",", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "attention_dropout_broadcast_dims", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_basic
Basic Universal Transformer. This model is pretty similar to the vanilla transformer in which weights are shared between layers. For some tasks, this simple idea brings a generalization that is not achievable by playing with the size of the model or drop_out parameters in the vanilla transformer. Args: layer_inputs: - state: state step: indicates number of steps taken so far hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_basic(layer_inputs, step, hparams, ffn_unit, attention_unit): """Basic Universal Transformer. This model is pretty similar to the vanilla transformer in which weights are shared between layers. For some tasks, this simple idea brings a generalization that is not achievable by playing with the size of the model or drop_out parameters in the vanilla transformer. Args: layer_inputs: - state: state step: indicates number of steps taken so far hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state """ state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0, name="unstack") new_state = step_preprocess(state, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): new_state = ffn_unit(attention_unit(new_state)) return new_state, inputs, memory
def universal_transformer_basic(layer_inputs, step, hparams, ffn_unit, attention_unit): """Basic Universal Transformer. This model is pretty similar to the vanilla transformer in which weights are shared between layers. For some tasks, this simple idea brings a generalization that is not achievable by playing with the size of the model or drop_out parameters in the vanilla transformer. Args: layer_inputs: - state: state step: indicates number of steps taken so far hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state """ state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0, name="unstack") new_state = step_preprocess(state, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): new_state = ffn_unit(attention_unit(new_state)) return new_state, inputs, memory
[ "Basic", "Universal", "Transformer", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L559-L590
[ "def", "universal_transformer_basic", "(", "layer_inputs", ",", "step", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ")", ":", "state", ",", "inputs", ",", "memory", "=", "tf", ".", "unstack", "(", "layer_inputs", ",", "num", "=", "None", ",", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_highway
Universal Transformer with highway connection. It transforms the state using a block contaaining sel-attention and transition function and wrap the whole block with a highway connection. (the new state is a combination of the state and the transformed-state based on cary/transform gates.) Interesting observation: Controlling the cary/transform gate with the original inputs works usually better (i.e. hparams.gates_inputs="i") Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step)
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_highway(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): """Universal Transformer with highway connection. It transforms the state using a block contaaining sel-attention and transition function and wrap the whole block with a highway connection. (the new state is a combination of the state and the transformed-state based on cary/transform gates.) Interesting observation: Controlling the cary/transform gate with the original inputs works usually better (i.e. hparams.gates_inputs="i") Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step) """ state, inputs, memory = layer_inputs new_state = step_preprocess(state, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): new_state = ffn_unit(attention_unit(new_state)) transformed_state = new_state gate_inputs = [] if "s" in hparams.gates_inputs: gate_inputs.append(state) if "t" in hparams.gates_inputs: gate_inputs.append(transformed_state) if "i" in hparams.gates_inputs: gate_inputs.append(inputs) gate_ffn_layer = hparams.gate_ffn_layer transform_gate = _ffn_layer_multi_inputs( gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name="transform", bias_initializer=tf.constant_initializer(hparams.transform_bias_init), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True) if hparams.couple_carry_transform_gates: carry_gate = tf.subtract(1.0, transform_gate, name="carry") else: carry_gate = _ffn_layer_multi_inputs( gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name="carry", bias_initializer=tf.constant_initializer(-hparams.transform_bias_init), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True) new_state = state * carry_gate + transformed_state * transform_gate tf.contrib.summary.scalar("highway_transform_gate_layer", tf.reduce_mean(transform_gate)) tf.contrib.summary.scalar("highway_carry_gate_layer", tf.reduce_mean(carry_gate)) return new_state, inputs, memory
def universal_transformer_highway(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): """Universal Transformer with highway connection. It transforms the state using a block contaaining sel-attention and transition function and wrap the whole block with a highway connection. (the new state is a combination of the state and the transformed-state based on cary/transform gates.) Interesting observation: Controlling the cary/transform gate with the original inputs works usually better (i.e. hparams.gates_inputs="i") Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step) """ state, inputs, memory = layer_inputs new_state = step_preprocess(state, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): new_state = ffn_unit(attention_unit(new_state)) transformed_state = new_state gate_inputs = [] if "s" in hparams.gates_inputs: gate_inputs.append(state) if "t" in hparams.gates_inputs: gate_inputs.append(transformed_state) if "i" in hparams.gates_inputs: gate_inputs.append(inputs) gate_ffn_layer = hparams.gate_ffn_layer transform_gate = _ffn_layer_multi_inputs( gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name="transform", bias_initializer=tf.constant_initializer(hparams.transform_bias_init), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True) if hparams.couple_carry_transform_gates: carry_gate = tf.subtract(1.0, transform_gate, name="carry") else: carry_gate = _ffn_layer_multi_inputs( gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name="carry", bias_initializer=tf.constant_initializer(-hparams.transform_bias_init), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True) new_state = state * carry_gate + transformed_state * transform_gate tf.contrib.summary.scalar("highway_transform_gate_layer", tf.reduce_mean(transform_gate)) tf.contrib.summary.scalar("highway_carry_gate_layer", tf.reduce_mean(carry_gate)) return new_state, inputs, memory
[ "Universal", "Transformer", "with", "highway", "connection", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L593-L682
[ "def", "universal_transformer_highway", "(", "layer_inputs", ",", "step", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ",", "pad_remover", "=", "None", ")", ":", "state", ",", "inputs", ",", "memory", "=", "layer_inputs", "new_state", "=", "step_prepr...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_depthwise_attention
universal_transformer with depth-wise attention. It uses an attention mechanism-flipped vertically- over all the states from previous steps to generate the new_state. Args: layer_inputs: - state: state - memory: contains states from all the previous steps. step: indicating number of steps take so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state memory: contains states from all the previous steps.
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_depthwise_attention(layer_inputs, step, hparams, ffn_unit, attention_unit): """universal_transformer with depth-wise attention. It uses an attention mechanism-flipped vertically- over all the states from previous steps to generate the new_state. Args: layer_inputs: - state: state - memory: contains states from all the previous steps. step: indicating number of steps take so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state memory: contains states from all the previous steps. """ _, inputs, memory = layer_inputs all_states = memory # add depth signal if hparams.depth_embedding: all_states = add_depth_embedding(all_states) # get the states up to the current step (non-zero part of the memory) states_so_far = all_states[:step, :, :, :] states_so_far_weights = tf.nn.softmax( common_layers.dense( states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1), activation=None, use_bias=True), axis=-1) # prepare the state tensor that will be transformed state_to_be_transformed = tf.reduce_sum( (states_so_far * states_so_far_weights), axis=0) new_state = step_preprocess(state_to_be_transformed, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): new_state = ffn_unit(attention_unit(new_state)) # add the new state to the memory memory = fill_memory_slot(memory, new_state, step + 1) return new_state, inputs, memory
def universal_transformer_depthwise_attention(layer_inputs, step, hparams, ffn_unit, attention_unit): """universal_transformer with depth-wise attention. It uses an attention mechanism-flipped vertically- over all the states from previous steps to generate the new_state. Args: layer_inputs: - state: state - memory: contains states from all the previous steps. step: indicating number of steps take so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state memory: contains states from all the previous steps. """ _, inputs, memory = layer_inputs all_states = memory # add depth signal if hparams.depth_embedding: all_states = add_depth_embedding(all_states) # get the states up to the current step (non-zero part of the memory) states_so_far = all_states[:step, :, :, :] states_so_far_weights = tf.nn.softmax( common_layers.dense( states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1), activation=None, use_bias=True), axis=-1) # prepare the state tensor that will be transformed state_to_be_transformed = tf.reduce_sum( (states_so_far * states_so_far_weights), axis=0) new_state = step_preprocess(state_to_be_transformed, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): new_state = ffn_unit(attention_unit(new_state)) # add the new state to the memory memory = fill_memory_slot(memory, new_state, step + 1) return new_state, inputs, memory
[ "universal_transformer", "with", "depth", "-", "wise", "attention", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L777-L832
[ "def", "universal_transformer_depthwise_attention", "(", "layer_inputs", ",", "step", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ")", ":", "_", ",", "inputs", ",", "memory", "=", "layer_inputs", "all_states", "=", "memory", "# add depth signal", "if", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_with_gru_as_transition_function
Universal Transformer which uses a gru as transition function. It's kind of like having a gru, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: not used here - memory: not used here step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: not uesed memory: not used
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_with_gru_as_transition_function( layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): """Universal Transformer which uses a gru as transition function. It's kind of like having a gru, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: not used here - memory: not used here step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: not uesed memory: not used """ state, unused_inputs, unused_memory = tf.unstack( layer_inputs, num=None, axis=0, name="unstack") # state (ut_state): output of the gru in the previous step # Multi_head_attention: assert not hparams.add_step_timing_signal # Let gru count for us! mh_attention_input = step_preprocess(state, step, hparams) transition_function_input = attention_unit(mh_attention_input) # Transition Function: if hparams.add_ffn_unit_to_the_transition_function: transition_function_input = ffn_unit(transition_function_input) transition_function_input = common_layers.layer_preprocess( transition_function_input, hparams) with tf.variable_scope("gru"): # gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1}) transition_function_update_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="update", bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("gru_update_gate", tf.reduce_mean(transition_function_update_gate)) # gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1}) transition_function_reset_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="reset", bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("gru_reset_gate", tf.reduce_mean(transition_function_reset_gate)) reset_state = transition_function_reset_gate * state # gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1}) transition_function_candidate = _ffn_layer_multi_inputs( [transition_function_input, reset_state], hparams, name="candidate", bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False) transition_function_output = ( (1 - transition_function_update_gate) * transition_function_input + transition_function_update_gate * transition_function_candidate) transition_function_output = common_layers.layer_preprocess( transition_function_output, hparams) return transition_function_output, unused_inputs, unused_memory
def universal_transformer_with_gru_as_transition_function( layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): """Universal Transformer which uses a gru as transition function. It's kind of like having a gru, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: not used here - memory: not used here step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: not uesed memory: not used """ state, unused_inputs, unused_memory = tf.unstack( layer_inputs, num=None, axis=0, name="unstack") # state (ut_state): output of the gru in the previous step # Multi_head_attention: assert not hparams.add_step_timing_signal # Let gru count for us! mh_attention_input = step_preprocess(state, step, hparams) transition_function_input = attention_unit(mh_attention_input) # Transition Function: if hparams.add_ffn_unit_to_the_transition_function: transition_function_input = ffn_unit(transition_function_input) transition_function_input = common_layers.layer_preprocess( transition_function_input, hparams) with tf.variable_scope("gru"): # gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1}) transition_function_update_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="update", bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("gru_update_gate", tf.reduce_mean(transition_function_update_gate)) # gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1}) transition_function_reset_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="reset", bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("gru_reset_gate", tf.reduce_mean(transition_function_reset_gate)) reset_state = transition_function_reset_gate * state # gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1}) transition_function_candidate = _ffn_layer_multi_inputs( [transition_function_input, reset_state], hparams, name="candidate", bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False) transition_function_output = ( (1 - transition_function_update_gate) * transition_function_input + transition_function_update_gate * transition_function_candidate) transition_function_output = common_layers.layer_preprocess( transition_function_output, hparams) return transition_function_output, unused_inputs, unused_memory
[ "Universal", "Transformer", "which", "uses", "a", "gru", "as", "transition", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L835-L924
[ "def", "universal_transformer_with_gru_as_transition_function", "(", "layer_inputs", ",", "step", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ",", "pad_remover", "=", "None", ")", ":", "state", ",", "unused_inputs", ",", "unused_memory", "=", "tf", ".", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_with_lstm_as_transition_function
Universal Transformer which uses a lstm as transition function. It's kind of like having a lstm, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) - memory: memory used in lstm. step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step) memory: contains information of state from all the previous steps.
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_with_lstm_as_transition_function( layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): """Universal Transformer which uses a lstm as transition function. It's kind of like having a lstm, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) - memory: memory used in lstm. step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step) memory: contains information of state from all the previous steps. """ state, unused_inputs, memory = tf.unstack( layer_inputs, num=None, axis=0, name="unstack") # NOTE: # state (ut_state): output of the lstm in the previous step # inputs (ut_input): original input --> we don't use it here # memory: lstm memory # Multi_head_attention: assert not hparams.add_step_timing_signal # Let lstm count for us! mh_attention_input = step_preprocess(state, step, hparams) transition_function_input = attention_unit(mh_attention_input) # Transition Function: if hparams.add_ffn_unit_to_the_transition_function: transition_function_input = ffn_unit(transition_function_input) transition_function_input = common_layers.layer_preprocess( transition_function_input, hparams) with tf.variable_scope("lstm"): # lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1}) transition_function_input_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="input", bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("lstm_input_gate", tf.reduce_mean(transition_function_input_gate)) # lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1}) transition_function_forget_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="forget", bias_initializer=tf.zeros_initializer(), activation=None, pad_remover=pad_remover, preprocess=False, postprocess=False) forget_bias_tensor = tf.constant(hparams.lstm_forget_bias) transition_function_forget_gate = tf.sigmoid( transition_function_forget_gate + forget_bias_tensor) tf.contrib.summary.scalar("lstm_forget_gate", tf.reduce_mean(transition_function_forget_gate)) # lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1}) transition_function_output_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="output", bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("lstm_output_gate", tf.reduce_mean(transition_function_output_gate)) # lstm input modulation transition_function_input_modulation = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="input_modulation", bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False) transition_function_memory = ( memory * transition_function_forget_gate + transition_function_input_gate * transition_function_input_modulation) transition_function_output = ( tf.tanh(transition_function_memory) * transition_function_output_gate) transition_function_output = common_layers.layer_preprocess( transition_function_output, hparams) return transition_function_output, unused_inputs, transition_function_memory
def universal_transformer_with_lstm_as_transition_function( layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): """Universal Transformer which uses a lstm as transition function. It's kind of like having a lstm, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) - memory: memory used in lstm. step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step) memory: contains information of state from all the previous steps. """ state, unused_inputs, memory = tf.unstack( layer_inputs, num=None, axis=0, name="unstack") # NOTE: # state (ut_state): output of the lstm in the previous step # inputs (ut_input): original input --> we don't use it here # memory: lstm memory # Multi_head_attention: assert not hparams.add_step_timing_signal # Let lstm count for us! mh_attention_input = step_preprocess(state, step, hparams) transition_function_input = attention_unit(mh_attention_input) # Transition Function: if hparams.add_ffn_unit_to_the_transition_function: transition_function_input = ffn_unit(transition_function_input) transition_function_input = common_layers.layer_preprocess( transition_function_input, hparams) with tf.variable_scope("lstm"): # lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1}) transition_function_input_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="input", bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("lstm_input_gate", tf.reduce_mean(transition_function_input_gate)) # lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1}) transition_function_forget_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="forget", bias_initializer=tf.zeros_initializer(), activation=None, pad_remover=pad_remover, preprocess=False, postprocess=False) forget_bias_tensor = tf.constant(hparams.lstm_forget_bias) transition_function_forget_gate = tf.sigmoid( transition_function_forget_gate + forget_bias_tensor) tf.contrib.summary.scalar("lstm_forget_gate", tf.reduce_mean(transition_function_forget_gate)) # lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1}) transition_function_output_gate = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="output", bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar("lstm_output_gate", tf.reduce_mean(transition_function_output_gate)) # lstm input modulation transition_function_input_modulation = _ffn_layer_multi_inputs( [transition_function_input, state], hparams, name="input_modulation", bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False) transition_function_memory = ( memory * transition_function_forget_gate + transition_function_input_gate * transition_function_input_modulation) transition_function_output = ( tf.tanh(transition_function_memory) * transition_function_output_gate) transition_function_output = common_layers.layer_preprocess( transition_function_output, hparams) return transition_function_output, unused_inputs, transition_function_memory
[ "Universal", "Transformer", "which", "uses", "a", "lstm", "as", "transition", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L927-L1037
[ "def", "universal_transformer_with_lstm_as_transition_function", "(", "layer_inputs", ",", "step", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ",", "pad_remover", "=", "None", ")", ":", "state", ",", "unused_inputs", ",", "memory", "=", "tf", ".", "uns...
272500b6efe353aeb638d2745ed56e519462ca31
train
universal_transformer_act
ACT based models. Implementations of all act models are based on craffel@'s cl/160711592. (1) Basic AUT based on remainder-distribution ACT (position-wise). (2) AUT with global halting probability (not position-wise). (3) AUT with random halting probability (not position-wise). (4) AUT with final state as accumulation of all states. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: the output tensor, (ponder_times, remainders) Raises: ValueError: Unknown act type
tensor2tensor/models/research/universal_transformer_util.py
def universal_transformer_act(x, hparams, ffn_unit, attention_unit): """ACT based models. Implementations of all act models are based on craffel@'s cl/160711592. (1) Basic AUT based on remainder-distribution ACT (position-wise). (2) AUT with global halting probability (not position-wise). (3) AUT with random halting probability (not position-wise). (4) AUT with final state as accumulation of all states. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: the output tensor, (ponder_times, remainders) Raises: ValueError: Unknown act type """ if hparams.act_type not in ["basic", "global", "random", "accumulated"]: raise ValueError("Unknown act type: %s" % hparams.act_type) state = x act_max_steps = hparams.act_max_steps threshold = 1.0 - hparams.act_epsilon state_shape_static = state.get_shape() state_slice = slice(0, 2) if hparams.act_type == "global": state_slice = slice(0, 1) # Dynamic shape for update tensors below update_shape = tf.shape(state)[state_slice] # Halting probabilities (p_t^n in the paper) halting_probability = tf.zeros(update_shape, name="halting_probability") # Remainders (R(t) in the paper) remainders = tf.zeros(update_shape, name="remainder") # Number of updates performed (N(t) in the paper) n_updates = tf.zeros(update_shape, name="n_updates") # Previous cell states (s_t in the paper) previous_state = tf.zeros_like(state, name="previous_state") step = tf.constant(0, dtype=tf.int32) def ut_function(state, step, halting_probability, remainders, n_updates, previous_state): """implements act (position-wise halting). Args: state: 3-D Tensor: [batch_size, length, channel] step: indicates number of steps taken so far halting_probability: halting probability remainders: act remainders n_updates: act n_updates previous_state: previous state Returns: transformed_state: transformed state step: step+1 halting_probability: halting probability remainders: act remainders n_updates: act n_updates new_state: new state """ state = step_preprocess(state, step, hparams) if hparams.act_type == "random": # random as halting probability p = tf.random_uniform( shape=common_layers.shape_list(halting_probability)) else: with tf.variable_scope("sigmoid_activation_for_pondering"): p = common_layers.dense( state, 1, activation=tf.nn.sigmoid, use_bias=True, bias_initializer=tf.constant_initializer( hparams.act_halting_bias_init)) if hparams.act_type == "global": # average over all positions (as a global halting prob) p = tf.reduce_mean(p, axis=1) p = tf.squeeze(p) else: # maintain position-wise probabilities p = tf.squeeze(p, axis=-1) # Mask for inputs which have not halted yet still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32) # Mask of inputs which halted at this step new_halted = tf.cast( tf.greater(halting_probability + p * still_running, threshold), tf.float32) * still_running # Mask of inputs which haven't halted, and didn't halt this step still_running = tf.cast( tf.less_equal(halting_probability + p * still_running, threshold), tf.float32) * still_running # Add the halting probability for this step to the halting # probabilities for those input which haven't halted yet halting_probability += p * still_running # Compute remainders for the inputs which halted at this step remainders += new_halted * (1 - halting_probability) # Add the remainders to those inputs which halted at this step halting_probability += new_halted * remainders # Increment n_updates for all inputs which are still running n_updates += still_running + new_halted # Compute the weight to be applied to the new state and output # 0 when the input has already halted # p when the input hasn't halted yet # the remainders when it halted this step update_weights = tf.expand_dims( p * still_running + new_halted * remainders, -1) if hparams.act_type == "global": update_weights = tf.expand_dims(update_weights, -1) # apply transformation on the state transformed_state = state for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): transformed_state = ffn_unit(attention_unit(transformed_state)) # update running part in the weighted state and keep the rest new_state = ((transformed_state * update_weights) + (previous_state * (1 - update_weights))) if hparams.act_type == "accumulated": # Add in the weighted state new_state = (transformed_state * update_weights) + previous_state # remind TensorFlow of everything's shape transformed_state.set_shape(state_shape_static) for x in [halting_probability, remainders, n_updates]: x.set_shape(state_shape_static[state_slice]) new_state.set_shape(state_shape_static) step += 1 return (transformed_state, step, halting_probability, remainders, n_updates, new_state) # While loop stops when this predicate is FALSE. # Ie all (probability < 1-eps AND counter < N) are false. def should_continue(u0, u1, halting_probability, u2, n_updates, u3): del u0, u1, u2, u3 return tf.reduce_any( tf.logical_and( tf.less(halting_probability, threshold), tf.less(n_updates, act_max_steps))) # Do while loop iterations until predicate above is false. (_, _, _, remainder, n_updates, new_state) = tf.while_loop( should_continue, ut_function, (state, step, halting_probability, remainders, n_updates, previous_state), maximum_iterations=act_max_steps + 1) ponder_times = n_updates remainders = remainder tf.contrib.summary.scalar("ponder_times", tf.reduce_mean(ponder_times)) return new_state, (ponder_times, remainders)
def universal_transformer_act(x, hparams, ffn_unit, attention_unit): """ACT based models. Implementations of all act models are based on craffel@'s cl/160711592. (1) Basic AUT based on remainder-distribution ACT (position-wise). (2) AUT with global halting probability (not position-wise). (3) AUT with random halting probability (not position-wise). (4) AUT with final state as accumulation of all states. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: the output tensor, (ponder_times, remainders) Raises: ValueError: Unknown act type """ if hparams.act_type not in ["basic", "global", "random", "accumulated"]: raise ValueError("Unknown act type: %s" % hparams.act_type) state = x act_max_steps = hparams.act_max_steps threshold = 1.0 - hparams.act_epsilon state_shape_static = state.get_shape() state_slice = slice(0, 2) if hparams.act_type == "global": state_slice = slice(0, 1) # Dynamic shape for update tensors below update_shape = tf.shape(state)[state_slice] # Halting probabilities (p_t^n in the paper) halting_probability = tf.zeros(update_shape, name="halting_probability") # Remainders (R(t) in the paper) remainders = tf.zeros(update_shape, name="remainder") # Number of updates performed (N(t) in the paper) n_updates = tf.zeros(update_shape, name="n_updates") # Previous cell states (s_t in the paper) previous_state = tf.zeros_like(state, name="previous_state") step = tf.constant(0, dtype=tf.int32) def ut_function(state, step, halting_probability, remainders, n_updates, previous_state): """implements act (position-wise halting). Args: state: 3-D Tensor: [batch_size, length, channel] step: indicates number of steps taken so far halting_probability: halting probability remainders: act remainders n_updates: act n_updates previous_state: previous state Returns: transformed_state: transformed state step: step+1 halting_probability: halting probability remainders: act remainders n_updates: act n_updates new_state: new state """ state = step_preprocess(state, step, hparams) if hparams.act_type == "random": # random as halting probability p = tf.random_uniform( shape=common_layers.shape_list(halting_probability)) else: with tf.variable_scope("sigmoid_activation_for_pondering"): p = common_layers.dense( state, 1, activation=tf.nn.sigmoid, use_bias=True, bias_initializer=tf.constant_initializer( hparams.act_halting_bias_init)) if hparams.act_type == "global": # average over all positions (as a global halting prob) p = tf.reduce_mean(p, axis=1) p = tf.squeeze(p) else: # maintain position-wise probabilities p = tf.squeeze(p, axis=-1) # Mask for inputs which have not halted yet still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32) # Mask of inputs which halted at this step new_halted = tf.cast( tf.greater(halting_probability + p * still_running, threshold), tf.float32) * still_running # Mask of inputs which haven't halted, and didn't halt this step still_running = tf.cast( tf.less_equal(halting_probability + p * still_running, threshold), tf.float32) * still_running # Add the halting probability for this step to the halting # probabilities for those input which haven't halted yet halting_probability += p * still_running # Compute remainders for the inputs which halted at this step remainders += new_halted * (1 - halting_probability) # Add the remainders to those inputs which halted at this step halting_probability += new_halted * remainders # Increment n_updates for all inputs which are still running n_updates += still_running + new_halted # Compute the weight to be applied to the new state and output # 0 when the input has already halted # p when the input hasn't halted yet # the remainders when it halted this step update_weights = tf.expand_dims( p * still_running + new_halted * remainders, -1) if hparams.act_type == "global": update_weights = tf.expand_dims(update_weights, -1) # apply transformation on the state transformed_state = state for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope("rec_layer_%d" % i): transformed_state = ffn_unit(attention_unit(transformed_state)) # update running part in the weighted state and keep the rest new_state = ((transformed_state * update_weights) + (previous_state * (1 - update_weights))) if hparams.act_type == "accumulated": # Add in the weighted state new_state = (transformed_state * update_weights) + previous_state # remind TensorFlow of everything's shape transformed_state.set_shape(state_shape_static) for x in [halting_probability, remainders, n_updates]: x.set_shape(state_shape_static[state_slice]) new_state.set_shape(state_shape_static) step += 1 return (transformed_state, step, halting_probability, remainders, n_updates, new_state) # While loop stops when this predicate is FALSE. # Ie all (probability < 1-eps AND counter < N) are false. def should_continue(u0, u1, halting_probability, u2, n_updates, u3): del u0, u1, u2, u3 return tf.reduce_any( tf.logical_and( tf.less(halting_probability, threshold), tf.less(n_updates, act_max_steps))) # Do while loop iterations until predicate above is false. (_, _, _, remainder, n_updates, new_state) = tf.while_loop( should_continue, ut_function, (state, step, halting_probability, remainders, n_updates, previous_state), maximum_iterations=act_max_steps + 1) ponder_times = n_updates remainders = remainder tf.contrib.summary.scalar("ponder_times", tf.reduce_mean(ponder_times)) return new_state, (ponder_times, remainders)
[ "ACT", "based", "models", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1040-L1212
[ "def", "universal_transformer_act", "(", "x", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ")", ":", "if", "hparams", ".", "act_type", "not", "in", "[", "\"basic\"", ",", "\"global\"", ",", "\"random\"", ",", "\"accumulated\"", "]", ":", "raise", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
_ffn_layer_multi_inputs
Implements a Feed-forward layer with multiple inputs, pad-removing, etc. Args: inputs_list: list of input tensors hparams: hyper-parameters ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense name: name kernel_initializer: kernel initializer bias_initializer: bias initializer activation: activation function pad_remover: pad remover preprocess: if preprocess the input postprocess: if postprocess the output Returns: a tensor Raises: ValueError: Unknown ffn_layer type.
tensor2tensor/models/research/universal_transformer_util.py
def _ffn_layer_multi_inputs(inputs_list, hparams, ffn_layer_type="dense", name="ffn", kernel_initializer=None, bias_initializer=None, activation=None, pad_remover=None, preprocess=False, postprocess=False): """Implements a Feed-forward layer with multiple inputs, pad-removing, etc. Args: inputs_list: list of input tensors hparams: hyper-parameters ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense name: name kernel_initializer: kernel initializer bias_initializer: bias initializer activation: activation function pad_remover: pad remover preprocess: if preprocess the input postprocess: if postprocess the output Returns: a tensor Raises: ValueError: Unknown ffn_layer type. """ # need at least one inputs num_inputs = len(inputs_list) assert num_inputs > 0 if preprocess and num_inputs == 1: inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams) if postprocess: original_inputs = inputs_list[0] # the output size is the hidden size of the main inputs main_input = inputs_list[0] original_shape = common_layers.shape_list(main_input) assert hparams.hidden_size == common_layers.shape_list(main_input)[-1] # all the inputs are in the same shape with main inputs for inputs in inputs_list: main_input.get_shape().assert_is_compatible_with(inputs.get_shape()) def remove_pads(x): original_shape = common_layers.shape_list(x) # Collapse `x` across examples, and remove padding positions. x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) return x if pad_remover: for i, inputs in enumerate(inputs_list): inputs_list[i] = remove_pads(inputs) ffn_inputs = inputs_list[0] if len(inputs_list) != 1: ffn_inputs = tf.concat(inputs_list, axis=-1) if ffn_layer_type == "dense": output = common_layers.dense( ffn_inputs, hparams.hidden_size, name=name, activation=activation, use_bias=True, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer) elif ffn_layer_type == "dense_dropconnect": output = common_layers.dense_dropconnect( ffn_inputs, hparams.hidden_size, name=name, dropconnect_dropout=hparams.dropconnect_dropout, output_activation=activation) postprocess = False # no dropout on the output unit elif ffn_layer_type == "dense_relu_dense": output = common_layers.dense_relu_dense( ffn_inputs, hparams.filter_size, hparams.hidden_size, name=name, dropout=hparams.relu_dropout, output_activation=activation, ) else: raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type) if pad_remover: # Restore `output` to the original shape of `x`, including padding. output = tf.reshape( pad_remover.restore(tf.squeeze(output, axis=0)), original_shape) if postprocess: if num_inputs == 1: output = common_layers.layer_postprocess(original_inputs, output, hparams) else: # only dropout (no residual)x hp = copy.copy(hparams) hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace( "a", "") output = common_layers.layer_postprocess(original_inputs, output, hp) return output
def _ffn_layer_multi_inputs(inputs_list, hparams, ffn_layer_type="dense", name="ffn", kernel_initializer=None, bias_initializer=None, activation=None, pad_remover=None, preprocess=False, postprocess=False): """Implements a Feed-forward layer with multiple inputs, pad-removing, etc. Args: inputs_list: list of input tensors hparams: hyper-parameters ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense name: name kernel_initializer: kernel initializer bias_initializer: bias initializer activation: activation function pad_remover: pad remover preprocess: if preprocess the input postprocess: if postprocess the output Returns: a tensor Raises: ValueError: Unknown ffn_layer type. """ # need at least one inputs num_inputs = len(inputs_list) assert num_inputs > 0 if preprocess and num_inputs == 1: inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams) if postprocess: original_inputs = inputs_list[0] # the output size is the hidden size of the main inputs main_input = inputs_list[0] original_shape = common_layers.shape_list(main_input) assert hparams.hidden_size == common_layers.shape_list(main_input)[-1] # all the inputs are in the same shape with main inputs for inputs in inputs_list: main_input.get_shape().assert_is_compatible_with(inputs.get_shape()) def remove_pads(x): original_shape = common_layers.shape_list(x) # Collapse `x` across examples, and remove padding positions. x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) return x if pad_remover: for i, inputs in enumerate(inputs_list): inputs_list[i] = remove_pads(inputs) ffn_inputs = inputs_list[0] if len(inputs_list) != 1: ffn_inputs = tf.concat(inputs_list, axis=-1) if ffn_layer_type == "dense": output = common_layers.dense( ffn_inputs, hparams.hidden_size, name=name, activation=activation, use_bias=True, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer) elif ffn_layer_type == "dense_dropconnect": output = common_layers.dense_dropconnect( ffn_inputs, hparams.hidden_size, name=name, dropconnect_dropout=hparams.dropconnect_dropout, output_activation=activation) postprocess = False # no dropout on the output unit elif ffn_layer_type == "dense_relu_dense": output = common_layers.dense_relu_dense( ffn_inputs, hparams.filter_size, hparams.hidden_size, name=name, dropout=hparams.relu_dropout, output_activation=activation, ) else: raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type) if pad_remover: # Restore `output` to the original shape of `x`, including padding. output = tf.reshape( pad_remover.restore(tf.squeeze(output, axis=0)), original_shape) if postprocess: if num_inputs == 1: output = common_layers.layer_postprocess(original_inputs, output, hparams) else: # only dropout (no residual)x hp = copy.copy(hparams) hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace( "a", "") output = common_layers.layer_postprocess(original_inputs, output, hp) return output
[ "Implements", "a", "Feed", "-", "forward", "layer", "with", "multiple", "inputs", "pad", "-", "removing", "etc", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1215-L1326
[ "def", "_ffn_layer_multi_inputs", "(", "inputs_list", ",", "hparams", ",", "ffn_layer_type", "=", "\"dense\"", ",", "name", "=", "\"ffn\"", ",", "kernel_initializer", "=", "None", ",", "bias_initializer", "=", "None", ",", "activation", "=", "None", ",", "pad_re...
272500b6efe353aeb638d2745ed56e519462ca31
train
fill_memory_slot
Fills the memory slot at a particular index with the given value. Args: memory: a 4-d tensor [memory_size, batch, length, channel] containing the state of all steps value: a 3-d tensor [batch, length, channel] as the sate index: integer in [0, memory_size) Returns: filled memory
tensor2tensor/models/research/universal_transformer_util.py
def fill_memory_slot(memory, value, index): """Fills the memory slot at a particular index with the given value. Args: memory: a 4-d tensor [memory_size, batch, length, channel] containing the state of all steps value: a 3-d tensor [batch, length, channel] as the sate index: integer in [0, memory_size) Returns: filled memory """ mask = tf.to_float( tf.one_hot(index, tf.shape(memory)[0])[:, None, None, None]) fill_memory = (1 - mask) * memory + mask * value[None, ...] return fill_memory
def fill_memory_slot(memory, value, index): """Fills the memory slot at a particular index with the given value. Args: memory: a 4-d tensor [memory_size, batch, length, channel] containing the state of all steps value: a 3-d tensor [batch, length, channel] as the sate index: integer in [0, memory_size) Returns: filled memory """ mask = tf.to_float( tf.one_hot(index, tf.shape(memory)[0])[:, None, None, None]) fill_memory = (1 - mask) * memory + mask * value[None, ...] return fill_memory
[ "Fills", "the", "memory", "slot", "at", "a", "particular", "index", "with", "the", "given", "value", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1329-L1346
[ "def", "fill_memory_slot", "(", "memory", ",", "value", ",", "index", ")", ":", "mask", "=", "tf", ".", "to_float", "(", "tf", ".", "one_hot", "(", "index", ",", "tf", ".", "shape", "(", "memory", ")", "[", "0", "]", ")", "[", ":", ",", "None", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
add_depth_embedding
Add n-dimensional embedding as the depth embedding (timing signal). Adds embeddings to represent the position of the step in the recurrent tower. Args: x: a tensor with shape [max_step, batch, length, depth] Returns: a Tensor the same shape as x.
tensor2tensor/models/research/universal_transformer_util.py
def add_depth_embedding(x): """Add n-dimensional embedding as the depth embedding (timing signal). Adds embeddings to represent the position of the step in the recurrent tower. Args: x: a tensor with shape [max_step, batch, length, depth] Returns: a Tensor the same shape as x. """ x_shape = common_layers.shape_list(x) depth = x_shape[-1] num_steps = x_shape[0] shape = [num_steps, 1, 1, depth] depth_embedding = ( tf.get_variable( "depth_embedding", shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth** 0.5)) x += depth_embedding return x
def add_depth_embedding(x): """Add n-dimensional embedding as the depth embedding (timing signal). Adds embeddings to represent the position of the step in the recurrent tower. Args: x: a tensor with shape [max_step, batch, length, depth] Returns: a Tensor the same shape as x. """ x_shape = common_layers.shape_list(x) depth = x_shape[-1] num_steps = x_shape[0] shape = [num_steps, 1, 1, depth] depth_embedding = ( tf.get_variable( "depth_embedding", shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth** 0.5)) x += depth_embedding return x
[ "Add", "n", "-", "dimensional", "embedding", "as", "the", "depth", "embedding", "(", "timing", "signal", ")", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1349-L1373
[ "def", "add_depth_embedding", "(", "x", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "depth", "=", "x_shape", "[", "-", "1", "]", "num_steps", "=", "x_shape", "[", "0", "]", "shape", "=", "[", "num_steps", ",", "1", ",...
272500b6efe353aeb638d2745ed56e519462ca31
train
step_preprocess
Preprocess the input at the beginning of each step. Args: x: input tensor step: step hparams: model hyper-parameters Returns: preprocessed input.
tensor2tensor/models/research/universal_transformer_util.py
def step_preprocess(x, step, hparams): """Preprocess the input at the beginning of each step. Args: x: input tensor step: step hparams: model hyper-parameters Returns: preprocessed input. """ original_channel_size = common_layers.shape_list(x)[-1] if hparams.add_position_timing_signal: x = add_position_timing_signal(x, step, hparams) if hparams.add_step_timing_signal: x = add_step_timing_signal(x, step, hparams) if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal) and hparams.add_or_concat_timing_signal == "concat"): # linear projection to the original dimension of x x = common_layers.dense( x, original_channel_size, activation=None, use_bias=False) if hparams.add_sru: x = common_layers.sru(x) return x
def step_preprocess(x, step, hparams): """Preprocess the input at the beginning of each step. Args: x: input tensor step: step hparams: model hyper-parameters Returns: preprocessed input. """ original_channel_size = common_layers.shape_list(x)[-1] if hparams.add_position_timing_signal: x = add_position_timing_signal(x, step, hparams) if hparams.add_step_timing_signal: x = add_step_timing_signal(x, step, hparams) if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal) and hparams.add_or_concat_timing_signal == "concat"): # linear projection to the original dimension of x x = common_layers.dense( x, original_channel_size, activation=None, use_bias=False) if hparams.add_sru: x = common_layers.sru(x) return x
[ "Preprocess", "the", "input", "at", "the", "beginning", "of", "each", "step", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1376-L1405
[ "def", "step_preprocess", "(", "x", ",", "step", ",", "hparams", ")", ":", "original_channel_size", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "-", "1", "]", "if", "hparams", ".", "add_position_timing_signal", ":", "x", "=", "add_position_t...
272500b6efe353aeb638d2745ed56e519462ca31
train
add_position_timing_signal
Add n-dimensional embedding as the position (horizontal) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x.
tensor2tensor/models/research/universal_transformer_util.py
def add_position_timing_signal(x, step, hparams): """Add n-dimensional embedding as the position (horizontal) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x. """ if not hparams.position_start_index: index = 0 elif hparams.position_start_index == "random": # Shift all positions randomly # TODO(dehghani): What would be reasonable for max number of shift? index = tf.random_uniform( [], maxval=common_layers.shape_list(x)[1], dtype=tf.int32) elif hparams.position_start_index == "step": # Shift positions based on the step if hparams.recurrence_type == "act": num_steps = hparams.act_max_steps else: num_steps = hparams.num_rec_steps index = tf.cast( common_layers.shape_list(x)[1] * step / num_steps, dtype=tf.int32) # No need for the timing signal in the encoder/decoder input preparation assert hparams.pos is None length = common_layers.shape_list(x)[1] channels = common_layers.shape_list(x)[2] signal = common_attention.get_timing_signal_1d( length, channels, start_index=index) if hparams.add_or_concat_timing_signal == "add": x_with_timing = x + common_layers.cast_like(signal, x) elif hparams.add_or_concat_timing_signal == "concat": batch_size = common_layers.shape_list(x)[0] signal_tiled = tf.tile(signal, [batch_size, 1, 1]) x_with_timing = tf.concat((x, signal_tiled), axis=-1) return x_with_timing
def add_position_timing_signal(x, step, hparams): """Add n-dimensional embedding as the position (horizontal) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x. """ if not hparams.position_start_index: index = 0 elif hparams.position_start_index == "random": # Shift all positions randomly # TODO(dehghani): What would be reasonable for max number of shift? index = tf.random_uniform( [], maxval=common_layers.shape_list(x)[1], dtype=tf.int32) elif hparams.position_start_index == "step": # Shift positions based on the step if hparams.recurrence_type == "act": num_steps = hparams.act_max_steps else: num_steps = hparams.num_rec_steps index = tf.cast( common_layers.shape_list(x)[1] * step / num_steps, dtype=tf.int32) # No need for the timing signal in the encoder/decoder input preparation assert hparams.pos is None length = common_layers.shape_list(x)[1] channels = common_layers.shape_list(x)[2] signal = common_attention.get_timing_signal_1d( length, channels, start_index=index) if hparams.add_or_concat_timing_signal == "add": x_with_timing = x + common_layers.cast_like(signal, x) elif hparams.add_or_concat_timing_signal == "concat": batch_size = common_layers.shape_list(x)[0] signal_tiled = tf.tile(signal, [batch_size, 1, 1]) x_with_timing = tf.concat((x, signal_tiled), axis=-1) return x_with_timing
[ "Add", "n", "-", "dimensional", "embedding", "as", "the", "position", "(", "horizontal", ")", "timing", "signal", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1408-L1455
[ "def", "add_position_timing_signal", "(", "x", ",", "step", ",", "hparams", ")", ":", "if", "not", "hparams", ".", "position_start_index", ":", "index", "=", "0", "elif", "hparams", ".", "position_start_index", "==", "\"random\"", ":", "# Shift all positions rando...
272500b6efe353aeb638d2745ed56e519462ca31
train
add_step_timing_signal
Add n-dimensional embedding as the step (vertical) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x.
tensor2tensor/models/research/universal_transformer_util.py
def add_step_timing_signal(x, step, hparams): """Add n-dimensional embedding as the step (vertical) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x. """ if hparams.recurrence_type == "act": num_steps = hparams.act_max_steps else: num_steps = hparams.num_rec_steps channels = common_layers.shape_list(x)[-1] if hparams.step_timing_signal_type == "learned": signal = common_attention.get_layer_timing_signal_learned_1d( channels, step, num_steps) elif hparams.step_timing_signal_type == "sinusoid": signal = common_attention.get_layer_timing_signal_sinusoid_1d( channels, step, num_steps) if hparams.add_or_concat_timing_signal == "add": x_with_timing = x + common_layers.cast_like(signal, x) elif hparams.add_or_concat_timing_signal == "concat": batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] signal_tiled = tf.tile(signal, [batch_size, length, 1]) x_with_timing = tf.concat((x, signal_tiled), axis=-1) return x_with_timing
def add_step_timing_signal(x, step, hparams): """Add n-dimensional embedding as the step (vertical) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x. """ if hparams.recurrence_type == "act": num_steps = hparams.act_max_steps else: num_steps = hparams.num_rec_steps channels = common_layers.shape_list(x)[-1] if hparams.step_timing_signal_type == "learned": signal = common_attention.get_layer_timing_signal_learned_1d( channels, step, num_steps) elif hparams.step_timing_signal_type == "sinusoid": signal = common_attention.get_layer_timing_signal_sinusoid_1d( channels, step, num_steps) if hparams.add_or_concat_timing_signal == "add": x_with_timing = x + common_layers.cast_like(signal, x) elif hparams.add_or_concat_timing_signal == "concat": batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] signal_tiled = tf.tile(signal, [batch_size, length, 1]) x_with_timing = tf.concat((x, signal_tiled), axis=-1) return x_with_timing
[ "Add", "n", "-", "dimensional", "embedding", "as", "the", "step", "(", "vertical", ")", "timing", "signal", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L1458-L1493
[ "def", "add_step_timing_signal", "(", "x", ",", "step", ",", "hparams", ")", ":", "if", "hparams", ".", "recurrence_type", "==", "\"act\"", ":", "num_steps", "=", "hparams", ".", "act_max_steps", "else", ":", "num_steps", "=", "hparams", ".", "num_rec_steps", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
wet_records_from_file_obj
Iterate through records in WET file object.
tensor2tensor/data_generators/wikisum/utils.py
def wet_records_from_file_obj(f, take_ownership=False): """Iterate through records in WET file object.""" while True: record = WETRecord.read(f) if record is None: break if not record.url: continue yield record if take_ownership: f.close()
def wet_records_from_file_obj(f, take_ownership=False): """Iterate through records in WET file object.""" while True: record = WETRecord.read(f) if record is None: break if not record.url: continue yield record if take_ownership: f.close()
[ "Iterate", "through", "records", "in", "WET", "file", "object", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/utils.py#L101-L115
[ "def", "wet_records_from_file_obj", "(", "f", ",", "take_ownership", "=", "False", ")", ":", "while", "True", ":", "record", "=", "WETRecord", ".", "read", "(", "f", ")", "if", "record", "is", "None", ":", "break", "if", "not", "record", ".", "url", ":...
272500b6efe353aeb638d2745ed56e519462ca31
train
wet_records
Generate WETRecords from filepath.
tensor2tensor/data_generators/wikisum/utils.py
def wet_records(wet_filepath): """Generate WETRecords from filepath.""" if wet_filepath.endswith('.gz'): fopen = gzip.open else: fopen = tf.gfile.GFile with fopen(wet_filepath) as f: for record in wet_records_from_file_obj(f): yield record
def wet_records(wet_filepath): """Generate WETRecords from filepath.""" if wet_filepath.endswith('.gz'): fopen = gzip.open else: fopen = tf.gfile.GFile with fopen(wet_filepath) as f: for record in wet_records_from_file_obj(f): yield record
[ "Generate", "WETRecords", "from", "filepath", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/utils.py#L118-L127
[ "def", "wet_records", "(", "wet_filepath", ")", ":", "if", "wet_filepath", ".", "endswith", "(", "'.gz'", ")", ":", "fopen", "=", "gzip", ".", "open", "else", ":", "fopen", "=", "tf", ".", "gfile", ".", "GFile", "with", "fopen", "(", "wet_filepath", ")...
272500b6efe353aeb638d2745ed56e519462ca31
train
filter_paragraph
Simple filter to remove obviously bad paragraphs (bad text extraction). Note this needs to run very quickly as it is applied to every paragraph in the corpus, so nothing fancy! This whole method should be linear expected time in len(p). Args: p: string, paragraph Returns: True if we should remove the paragraph.
tensor2tensor/data_generators/wikisum/utils.py
def filter_paragraph(p): """Simple filter to remove obviously bad paragraphs (bad text extraction). Note this needs to run very quickly as it is applied to every paragraph in the corpus, so nothing fancy! This whole method should be linear expected time in len(p). Args: p: string, paragraph Returns: True if we should remove the paragraph. """ # Expect a minimum number of words. tokens = p.split() if len(tokens) < 6: return True # Require some letters. if not re.search(_SOME_ALPHA_RE, p): return True # Keep this one at the end, probably the most complicated logic. # We try to detect sentences, which should have a minimum of 3 tokens # with only alphabetic characters. last = 0 found_sentence = False num_alpha = 0 for i, x in enumerate(tokens): if x == '.': if i - last > 3 and num_alpha >= 3: found_sentence = True break last = i num_alpha = 0 if re.match(_ONLY_ALPHA_RE, x): num_alpha += 1 if not found_sentence: return True return False
def filter_paragraph(p): """Simple filter to remove obviously bad paragraphs (bad text extraction). Note this needs to run very quickly as it is applied to every paragraph in the corpus, so nothing fancy! This whole method should be linear expected time in len(p). Args: p: string, paragraph Returns: True if we should remove the paragraph. """ # Expect a minimum number of words. tokens = p.split() if len(tokens) < 6: return True # Require some letters. if not re.search(_SOME_ALPHA_RE, p): return True # Keep this one at the end, probably the most complicated logic. # We try to detect sentences, which should have a minimum of 3 tokens # with only alphabetic characters. last = 0 found_sentence = False num_alpha = 0 for i, x in enumerate(tokens): if x == '.': if i - last > 3 and num_alpha >= 3: found_sentence = True break last = i num_alpha = 0 if re.match(_ONLY_ALPHA_RE, x): num_alpha += 1 if not found_sentence: return True return False
[ "Simple", "filter", "to", "remove", "obviously", "bad", "paragraphs", "(", "bad", "text", "extraction", ")", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/utils.py#L214-L254
[ "def", "filter_paragraph", "(", "p", ")", ":", "# Expect a minimum number of words.", "tokens", "=", "p", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", "<", "6", ":", "return", "True", "# Require some letters.", "if", "not", "re", ".", "search", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
timing
Log start, end, and duration.
tensor2tensor/data_generators/wikisum/utils.py
def timing(name=''): """Log start, end, and duration.""" start = datetime.datetime.now() timestamp = start.strftime('%H:%M') tf.logging.info('Starting job [%s] at %s', name, timestamp) yield end = datetime.datetime.now() timestamp = end.strftime('%H:%M') tf.logging.info('Finished job [%s] at %s', name, timestamp) duration = end - start duration_mins = duration.total_seconds() / 60 tf.logging.info('Total time [%s] (m): %d', name, int(duration_mins))
def timing(name=''): """Log start, end, and duration.""" start = datetime.datetime.now() timestamp = start.strftime('%H:%M') tf.logging.info('Starting job [%s] at %s', name, timestamp) yield end = datetime.datetime.now() timestamp = end.strftime('%H:%M') tf.logging.info('Finished job [%s] at %s', name, timestamp) duration = end - start duration_mins = duration.total_seconds() / 60 tf.logging.info('Total time [%s] (m): %d', name, int(duration_mins))
[ "Log", "start", "end", "and", "duration", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/utils.py#L258-L269
[ "def", "timing", "(", "name", "=", "''", ")", ":", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "timestamp", "=", "start", ".", "strftime", "(", "'%H:%M'", ")", "tf", ".", "logging", ".", "info", "(", "'Starting job [%s] at %s'", ",...
272500b6efe353aeb638d2745ed56e519462ca31
train
WETHeader.read
Read header from file. Headers end with length and then 1 blank line.
tensor2tensor/data_generators/wikisum/utils.py
def read(cls, f): """Read header from file. Headers end with length and then 1 blank line.""" url = None line = f.readline() if not line: # EOF return None while not line.startswith(cls.LENGTH_HEADER): if line.startswith(cls.URI_HEADER): url = line[len(cls.URI_HEADER):].strip() line = f.readline() # Consume empty separator f.readline() # Read content length = int(line.split(':')[1]) return cls(url, length)
def read(cls, f): """Read header from file. Headers end with length and then 1 blank line.""" url = None line = f.readline() if not line: # EOF return None while not line.startswith(cls.LENGTH_HEADER): if line.startswith(cls.URI_HEADER): url = line[len(cls.URI_HEADER):].strip() line = f.readline() # Consume empty separator f.readline() # Read content length = int(line.split(':')[1]) return cls(url, length)
[ "Read", "header", "from", "file", ".", "Headers", "end", "with", "length", "and", "then", "1", "blank", "line", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/utils.py#L61-L80
[ "def", "read", "(", "cls", ",", "f", ")", ":", "url", "=", "None", "line", "=", "f", ".", "readline", "(", ")", "if", "not", "line", ":", "# EOF", "return", "None", "while", "not", "line", ".", "startswith", "(", "cls", ".", "LENGTH_HEADER", ")", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
WETRecord.read
Read WETRecord from file. Records end with 2 blank lines.
tensor2tensor/data_generators/wikisum/utils.py
def read(cls, f): """Read WETRecord from file. Records end with 2 blank lines.""" header = WETHeader.read(f) if header is None: # EOF return None content = f.read(header.length) # Consume empty separators f.readline() f.readline() return cls(header.url, content)
def read(cls, f): """Read WETRecord from file. Records end with 2 blank lines.""" header = WETHeader.read(f) if header is None: # EOF return None content = f.read(header.length) # Consume empty separators f.readline() f.readline() return cls(header.url, content)
[ "Read", "WETRecord", "from", "file", ".", "Records", "end", "with", "2", "blank", "lines", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/utils.py#L86-L98
[ "def", "read", "(", "cls", ",", "f", ")", ":", "header", "=", "WETHeader", ".", "read", "(", "f", ")", "if", "header", "is", "None", ":", "# EOF", "return", "None", "content", "=", "f", ".", "read", "(", "header", ".", "length", ")", "# Consume emp...
272500b6efe353aeb638d2745ed56e519462ca31
train
MLP
Multi-layer feed-forward neural network with non-linear activations.
tensor2tensor/trax/models/mlp.py
def MLP(num_hidden_layers=2, hidden_size=512, activation_fn=layers.Relu, num_output_classes=10, mode="train"): """Multi-layer feed-forward neural network with non-linear activations.""" del mode cur_layers = [layers.Flatten()] for _ in range(num_hidden_layers): cur_layers += [layers.Dense(hidden_size), activation_fn()] cur_layers += [layers.Dense(num_output_classes), layers.LogSoftmax()] return layers.Serial(*cur_layers)
def MLP(num_hidden_layers=2, hidden_size=512, activation_fn=layers.Relu, num_output_classes=10, mode="train"): """Multi-layer feed-forward neural network with non-linear activations.""" del mode cur_layers = [layers.Flatten()] for _ in range(num_hidden_layers): cur_layers += [layers.Dense(hidden_size), activation_fn()] cur_layers += [layers.Dense(num_output_classes), layers.LogSoftmax()] return layers.Serial(*cur_layers)
[ "Multi", "-", "layer", "feed", "-", "forward", "neural", "network", "with", "non", "-", "linear", "activations", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/mlp.py#L25-L36
[ "def", "MLP", "(", "num_hidden_layers", "=", "2", ",", "hidden_size", "=", "512", ",", "activation_fn", "=", "layers", ".", "Relu", ",", "num_output_classes", "=", "10", ",", "mode", "=", "\"train\"", ")", ":", "del", "mode", "cur_layers", "=", "[", "lay...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem._verify_same_spaces
Verifies that all the envs have the same observation and action space.
tensor2tensor/envs/env_problem.py
def _verify_same_spaces(self): """Verifies that all the envs have the same observation and action space.""" # Pre-conditions: self._envs is initialized. if self._envs is None: raise ValueError("Environments not initialized.") if not isinstance(self._envs, list): tf.logging.warning("Not checking observation and action space " "compatibility across envs, since there is just one.") return # NOTE: We compare string representations of observation_space and # action_space because compositional classes like space.Tuple don't return # true on object comparison. if not all( str(env.observation_space) == str(self.observation_space) for env in self._envs): err_str = ("All environments should have the same observation space, but " "don't.") tf.logging.error(err_str) # Log all observation spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has observation space [%s]", i, env.observation_space) raise ValueError(err_str) if not all( str(env.action_space) == str(self.action_space) for env in self._envs): err_str = "All environments should have the same action space, but don't." tf.logging.error(err_str) # Log all action spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has action space [%s]", i, env.action_space) raise ValueError(err_str)
def _verify_same_spaces(self): """Verifies that all the envs have the same observation and action space.""" # Pre-conditions: self._envs is initialized. if self._envs is None: raise ValueError("Environments not initialized.") if not isinstance(self._envs, list): tf.logging.warning("Not checking observation and action space " "compatibility across envs, since there is just one.") return # NOTE: We compare string representations of observation_space and # action_space because compositional classes like space.Tuple don't return # true on object comparison. if not all( str(env.observation_space) == str(self.observation_space) for env in self._envs): err_str = ("All environments should have the same observation space, but " "don't.") tf.logging.error(err_str) # Log all observation spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has observation space [%s]", i, env.observation_space) raise ValueError(err_str) if not all( str(env.action_space) == str(self.action_space) for env in self._envs): err_str = "All environments should have the same action space, but don't." tf.logging.error(err_str) # Log all action spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has action space [%s]", i, env.action_space) raise ValueError(err_str)
[ "Verifies", "that", "all", "the", "envs", "have", "the", "same", "observation", "and", "action", "space", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L199-L235
[ "def", "_verify_same_spaces", "(", "self", ")", ":", "# Pre-conditions: self._envs is initialized.", "if", "self", ".", "_envs", "is", "None", ":", "raise", "ValueError", "(", "\"Environments not initialized.\"", ")", "if", "not", "isinstance", "(", "self", ".", "_e...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem.initialize_environments
Initializes the environments and trajectories. Subclasses can override this if they don't want a default implementation which initializes `batch_size` environments, but must take care to initialize self._trajectories (this is checked in __init__ anyways). Args: batch_size: (int) Number of `self.base_env_name` envs to initialize.
tensor2tensor/envs/env_problem.py
def initialize_environments(self, batch_size=1): """Initializes the environments and trajectories. Subclasses can override this if they don't want a default implementation which initializes `batch_size` environments, but must take care to initialize self._trajectories (this is checked in __init__ anyways). Args: batch_size: (int) Number of `self.base_env_name` envs to initialize. """ assert batch_size >= 1 self._batch_size = batch_size self._envs = [gym.make(self.base_env_name) for _ in range(batch_size)] if self._env_wrapper_fn is not None: self._envs = list(map(self._env_wrapper_fn, self._envs)) # If self.observation_space and self.action_space aren't None, then it means # that this is a re-initialization of this class, in that case make sure # that this matches our previous behaviour. if self._observation_space: assert str(self._observation_space) == str( self._envs[0].observation_space) else: # This means that we are initializing this class for the first time. # # We set this equal to the first env's observation space, later on we'll # verify that all envs have the same observation space. self._observation_space = self._envs[0].observation_space # Similarly for action_space if self._action_space: assert str(self._action_space) == str(self._envs[0].action_space) else: self._action_space = self._envs[0].action_space self._verify_same_spaces() # If self.reward_range is None, i.e. this means that we should take the # reward range of the env. if self.reward_range is None: self._reward_range = self._envs[0].reward_range # This data structure stores the history of each env. # # NOTE: Even if the env is a NN and can step in all batches concurrently, it # is still valuable to store the trajectories separately. self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size)
def initialize_environments(self, batch_size=1): """Initializes the environments and trajectories. Subclasses can override this if they don't want a default implementation which initializes `batch_size` environments, but must take care to initialize self._trajectories (this is checked in __init__ anyways). Args: batch_size: (int) Number of `self.base_env_name` envs to initialize. """ assert batch_size >= 1 self._batch_size = batch_size self._envs = [gym.make(self.base_env_name) for _ in range(batch_size)] if self._env_wrapper_fn is not None: self._envs = list(map(self._env_wrapper_fn, self._envs)) # If self.observation_space and self.action_space aren't None, then it means # that this is a re-initialization of this class, in that case make sure # that this matches our previous behaviour. if self._observation_space: assert str(self._observation_space) == str( self._envs[0].observation_space) else: # This means that we are initializing this class for the first time. # # We set this equal to the first env's observation space, later on we'll # verify that all envs have the same observation space. self._observation_space = self._envs[0].observation_space # Similarly for action_space if self._action_space: assert str(self._action_space) == str(self._envs[0].action_space) else: self._action_space = self._envs[0].action_space self._verify_same_spaces() # If self.reward_range is None, i.e. this means that we should take the # reward range of the env. if self.reward_range is None: self._reward_range = self._envs[0].reward_range # This data structure stores the history of each env. # # NOTE: Even if the env is a NN and can step in all batches concurrently, it # is still valuable to store the trajectories separately. self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size)
[ "Initializes", "the", "environments", "and", "trajectories", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L248-L295
[ "def", "initialize_environments", "(", "self", ",", "batch_size", "=", "1", ")", ":", "assert", "batch_size", ">=", "1", "self", ".", "_batch_size", "=", "batch_size", "self", ".", "_envs", "=", "[", "gym", ".", "make", "(", "self", ".", "base_env_name", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem.process_rewards
Clips, rounds, and changes to integer type. Args: rewards: numpy array of raw (float) rewards. Returns: processed_rewards: numpy array of np.int64
tensor2tensor/envs/env_problem.py
def process_rewards(self, rewards): """Clips, rounds, and changes to integer type. Args: rewards: numpy array of raw (float) rewards. Returns: processed_rewards: numpy array of np.int64 """ min_reward, max_reward = self.reward_range # Clips at min and max reward. rewards = np.clip(rewards, min_reward, max_reward) # Round to (nearest) int and convert to integral type. rewards = np.around(rewards, decimals=0).astype(np.int64) return rewards
def process_rewards(self, rewards): """Clips, rounds, and changes to integer type. Args: rewards: numpy array of raw (float) rewards. Returns: processed_rewards: numpy array of np.int64 """ min_reward, max_reward = self.reward_range # Clips at min and max reward. rewards = np.clip(rewards, min_reward, max_reward) # Round to (nearest) int and convert to integral type. rewards = np.around(rewards, decimals=0).astype(np.int64) return rewards
[ "Clips", "rounds", "and", "changes", "to", "integer", "type", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L352-L368
[ "def", "process_rewards", "(", "self", ",", "rewards", ")", ":", "min_reward", ",", "max_reward", "=", "self", ".", "reward_range", "# Clips at min and max reward.", "rewards", "=", "np", ".", "clip", "(", "rewards", ",", "min_reward", ",", "max_reward", ")", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem.num_rewards
Returns the number of distinct rewards. Returns: Returns None if the reward range is infinite or the processed rewards aren't discrete, otherwise returns the number of distinct rewards.
tensor2tensor/envs/env_problem.py
def num_rewards(self): """Returns the number of distinct rewards. Returns: Returns None if the reward range is infinite or the processed rewards aren't discrete, otherwise returns the number of distinct rewards. """ # Pre-conditions: reward range is finite. # : processed rewards are discrete. if not self.is_reward_range_finite: tf.logging.error("Infinite reward range, `num_rewards returning None`") return None if not self.is_processed_rewards_discrete: tf.logging.error( "Processed rewards are not discrete, `num_rewards` returning None") return None min_reward, max_reward = self.reward_range return max_reward - min_reward + 1
def num_rewards(self): """Returns the number of distinct rewards. Returns: Returns None if the reward range is infinite or the processed rewards aren't discrete, otherwise returns the number of distinct rewards. """ # Pre-conditions: reward range is finite. # : processed rewards are discrete. if not self.is_reward_range_finite: tf.logging.error("Infinite reward range, `num_rewards returning None`") return None if not self.is_processed_rewards_discrete: tf.logging.error( "Processed rewards are not discrete, `num_rewards` returning None") return None min_reward, max_reward = self.reward_range return max_reward - min_reward + 1
[ "Returns", "the", "number", "of", "distinct", "rewards", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L380-L399
[ "def", "num_rewards", "(", "self", ")", ":", "# Pre-conditions: reward range is finite.", "# : processed rewards are discrete.", "if", "not", "self", ".", "is_reward_range_finite", ":", "tf", ".", "logging", ".", "error", "(", "\"Infinite reward range, `num_rewa...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem._reset
Resets environments at indices shouldn't pre-process or record. Subclasses should override this to do the actual reset if something other than the default implementation is desired. Args: indices: list of indices of underlying envs to call reset on. Returns: np.ndarray of stacked observations from the reset-ed envs.
tensor2tensor/envs/env_problem.py
def _reset(self, indices): """Resets environments at indices shouldn't pre-process or record. Subclasses should override this to do the actual reset if something other than the default implementation is desired. Args: indices: list of indices of underlying envs to call reset on. Returns: np.ndarray of stacked observations from the reset-ed envs. """ # Pre-conditions: common_preconditions, see `assert_common_preconditions`. self.assert_common_preconditions() # This returns a numpy array with first dimension `len(indices)` and the # rest being the dimensionality of the observation. return np.stack([self._envs[index].reset() for index in indices])
def _reset(self, indices): """Resets environments at indices shouldn't pre-process or record. Subclasses should override this to do the actual reset if something other than the default implementation is desired. Args: indices: list of indices of underlying envs to call reset on. Returns: np.ndarray of stacked observations from the reset-ed envs. """ # Pre-conditions: common_preconditions, see `assert_common_preconditions`. self.assert_common_preconditions() # This returns a numpy array with first dimension `len(indices)` and the # rest being the dimensionality of the observation. return np.stack([self._envs[index].reset() for index in indices])
[ "Resets", "environments", "at", "indices", "shouldn", "t", "pre", "-", "process", "or", "record", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L454-L472
[ "def", "_reset", "(", "self", ",", "indices", ")", ":", "# Pre-conditions: common_preconditions, see `assert_common_preconditions`.", "self", ".", "assert_common_preconditions", "(", ")", "# This returns a numpy array with first dimension `len(indices)` and the", "# rest being the dime...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem.reset
Resets environments at given indices. Subclasses should override _reset to do the actual reset if something other than the default implementation is desired. Args: indices: Indices of environments to reset. If None all envs are reset. Returns: Batch of initial observations of reset environments.
tensor2tensor/envs/env_problem.py
def reset(self, indices=None): """Resets environments at given indices. Subclasses should override _reset to do the actual reset if something other than the default implementation is desired. Args: indices: Indices of environments to reset. If None all envs are reset. Returns: Batch of initial observations of reset environments. """ if indices is None: indices = np.arange(self.trajectories.batch_size) # If this is empty (not None) then don't do anything, no env was done. if indices.size == 0: tf.logging.warning( "`reset` called with empty indices array, this is a no-op.") return None observations = self._reset(indices) processed_observations = self.process_observations(observations) # Record history. self.trajectories.reset(indices, observations) return processed_observations
def reset(self, indices=None): """Resets environments at given indices. Subclasses should override _reset to do the actual reset if something other than the default implementation is desired. Args: indices: Indices of environments to reset. If None all envs are reset. Returns: Batch of initial observations of reset environments. """ if indices is None: indices = np.arange(self.trajectories.batch_size) # If this is empty (not None) then don't do anything, no env was done. if indices.size == 0: tf.logging.warning( "`reset` called with empty indices array, this is a no-op.") return None observations = self._reset(indices) processed_observations = self.process_observations(observations) # Record history. self.trajectories.reset(indices, observations) return processed_observations
[ "Resets", "environments", "at", "given", "indices", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L474-L502
[ "def", "reset", "(", "self", ",", "indices", "=", "None", ")", ":", "if", "indices", "is", "None", ":", "indices", "=", "np", ".", "arange", "(", "self", ".", "trajectories", ".", "batch_size", ")", "# If this is empty (not None) then don't do anything, no env w...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem._step
Takes a step in all environments, shouldn't pre-process or record. Subclasses should override this to do the actual step if something other than the default implementation is desired. Args: actions: (np.ndarray) with first dimension equal to the batch size. Returns: a tuple of stacked raw observations, raw rewards, dones and infos.
tensor2tensor/envs/env_problem.py
def _step(self, actions): """Takes a step in all environments, shouldn't pre-process or record. Subclasses should override this to do the actual step if something other than the default implementation is desired. Args: actions: (np.ndarray) with first dimension equal to the batch size. Returns: a tuple of stacked raw observations, raw rewards, dones and infos. """ # Pre-conditions: common_preconditions, see `assert_common_preconditions`. # : len(actions) == len(self._envs) self.assert_common_preconditions() assert len(actions) == len(self._envs) observations = [] rewards = [] dones = [] infos = [] # Take steps in all environments. for env, action in zip(self._envs, actions): observation, reward, done, info = env.step(action) observations.append(observation) rewards.append(reward) dones.append(done) infos.append(info) # Convert each list (observations, rewards, ...) into np.array and return a # tuple. return tuple(map(np.stack, [observations, rewards, dones, infos]))
def _step(self, actions): """Takes a step in all environments, shouldn't pre-process or record. Subclasses should override this to do the actual step if something other than the default implementation is desired. Args: actions: (np.ndarray) with first dimension equal to the batch size. Returns: a tuple of stacked raw observations, raw rewards, dones and infos. """ # Pre-conditions: common_preconditions, see `assert_common_preconditions`. # : len(actions) == len(self._envs) self.assert_common_preconditions() assert len(actions) == len(self._envs) observations = [] rewards = [] dones = [] infos = [] # Take steps in all environments. for env, action in zip(self._envs, actions): observation, reward, done, info = env.step(action) observations.append(observation) rewards.append(reward) dones.append(done) infos.append(info) # Convert each list (observations, rewards, ...) into np.array and return a # tuple. return tuple(map(np.stack, [observations, rewards, dones, infos]))
[ "Takes", "a", "step", "in", "all", "environments", "shouldn", "t", "pre", "-", "process", "or", "record", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L504-L538
[ "def", "_step", "(", "self", ",", "actions", ")", ":", "# Pre-conditions: common_preconditions, see `assert_common_preconditions`.", "# : len(actions) == len(self._envs)", "self", ".", "assert_common_preconditions", "(", ")", "assert", "len", "(", "actions", ")", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem.step
Takes a step in all environments. Subclasses should override _step to do the actual reset if something other than the default implementation is desired. Args: actions: Batch of actions. Returns: (preprocessed_observations, processed_rewards, dones, infos).
tensor2tensor/envs/env_problem.py
def step(self, actions): """Takes a step in all environments. Subclasses should override _step to do the actual reset if something other than the default implementation is desired. Args: actions: Batch of actions. Returns: (preprocessed_observations, processed_rewards, dones, infos). """ observations, raw_rewards, dones, infos = self._step(actions) # Process rewards. raw_rewards = raw_rewards.astype(np.float32) processed_rewards = self.process_rewards(raw_rewards) # Process observations. processed_observations = self.process_observations(observations) # Record history. self.trajectories.step(processed_observations, raw_rewards, processed_rewards, dones, actions) return processed_observations, processed_rewards, dones, infos
def step(self, actions): """Takes a step in all environments. Subclasses should override _step to do the actual reset if something other than the default implementation is desired. Args: actions: Batch of actions. Returns: (preprocessed_observations, processed_rewards, dones, infos). """ observations, raw_rewards, dones, infos = self._step(actions) # Process rewards. raw_rewards = raw_rewards.astype(np.float32) processed_rewards = self.process_rewards(raw_rewards) # Process observations. processed_observations = self.process_observations(observations) # Record history. self.trajectories.step(processed_observations, raw_rewards, processed_rewards, dones, actions) return processed_observations, processed_rewards, dones, infos
[ "Takes", "a", "step", "in", "all", "environments", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L540-L566
[ "def", "step", "(", "self", ",", "actions", ")", ":", "observations", ",", "raw_rewards", ",", "dones", ",", "infos", "=", "self", ".", "_step", "(", "actions", ")", "# Process rewards.", "raw_rewards", "=", "raw_rewards", ".", "astype", "(", "np", ".", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem.example_reading_spec
Data fields to store on disk and their decoders.
tensor2tensor/envs/env_problem.py
def example_reading_spec(self): """Data fields to store on disk and their decoders.""" # Subclasses can override and/or extend. processed_reward_type = tf.float32 if self.is_processed_rewards_discrete: processed_reward_type = tf.int64 data_fields = { TIMESTEP_FIELD: tf.FixedLenFeature((1,), tf.int64), RAW_REWARD_FIELD: tf.FixedLenFeature((1,), tf.float32), PROCESSED_REWARD_FIELD: tf.FixedLenFeature((1,), processed_reward_type), DONE_FIELD: tf.FixedLenFeature((1,), tf.int64), # we wrote this as int. # Special treatment because we need to determine type and shape, also # enables classes to override. OBSERVATION_FIELD: self.observation_spec, ACTION_FIELD: self.action_spec, } data_items_to_decoders = { field: tf.contrib.slim.tfexample_decoder.Tensor(field) for field in data_fields } return data_fields, data_items_to_decoders
def example_reading_spec(self): """Data fields to store on disk and their decoders.""" # Subclasses can override and/or extend. processed_reward_type = tf.float32 if self.is_processed_rewards_discrete: processed_reward_type = tf.int64 data_fields = { TIMESTEP_FIELD: tf.FixedLenFeature((1,), tf.int64), RAW_REWARD_FIELD: tf.FixedLenFeature((1,), tf.float32), PROCESSED_REWARD_FIELD: tf.FixedLenFeature((1,), processed_reward_type), DONE_FIELD: tf.FixedLenFeature((1,), tf.int64), # we wrote this as int. # Special treatment because we need to determine type and shape, also # enables classes to override. OBSERVATION_FIELD: self.observation_spec, ACTION_FIELD: self.action_spec, } data_items_to_decoders = { field: tf.contrib.slim.tfexample_decoder.Tensor(field) for field in data_fields } return data_fields, data_items_to_decoders
[ "Data", "fields", "to", "store", "on", "disk", "and", "their", "decoders", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L568-L594
[ "def", "example_reading_spec", "(", "self", ")", ":", "# Subclasses can override and/or extend.", "processed_reward_type", "=", "tf", ".", "float32", "if", "self", ".", "is_processed_rewards_discrete", ":", "processed_reward_type", "=", "tf", ".", "int64", "data_fields", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
EnvProblem._generate_time_steps
A generator to yield single time-steps from a list of trajectories.
tensor2tensor/envs/env_problem.py
def _generate_time_steps(self, trajectory_list): """A generator to yield single time-steps from a list of trajectories.""" for single_trajectory in trajectory_list: assert isinstance(single_trajectory, trajectory.Trajectory) # Skip writing trajectories that have only a single time-step -- this # could just be a repeated reset. if single_trajectory.num_time_steps <= 1: continue for index, time_step in enumerate(single_trajectory.time_steps): # The first time-step doesn't have reward/processed_reward, if so, just # setting it to 0.0 / 0 should be OK. raw_reward = time_step.raw_reward if not raw_reward: raw_reward = 0.0 processed_reward = time_step.processed_reward if not processed_reward: processed_reward = 0 action = time_step.action if action is None: # The last time-step doesn't have action, and this action shouldn't be # used, gym's spaces have a `sample` function, so let's just sample an # action and use that. action = self.action_space.sample() action = gym_spaces_utils.gym_space_encode(self.action_space, action) if six.PY3: # py3 complains that, to_example cannot handle np.int64 ! action_dtype = self.action_space.dtype if action_dtype in [np.int64, np.int32]: action = list(map(int, action)) elif action_dtype in [np.float64, np.float32]: action = list(map(float, action)) # same with processed_reward. processed_reward = int(processed_reward) assert time_step.observation is not None yield { TIMESTEP_FIELD: [index], ACTION_FIELD: action, # to_example errors on np.float32 RAW_REWARD_FIELD: [float(raw_reward)], PROCESSED_REWARD_FIELD: [processed_reward], # to_example doesn't know bools DONE_FIELD: [int(time_step.done)], OBSERVATION_FIELD: gym_spaces_utils.gym_space_encode(self.observation_space, time_step.observation), }
def _generate_time_steps(self, trajectory_list): """A generator to yield single time-steps from a list of trajectories.""" for single_trajectory in trajectory_list: assert isinstance(single_trajectory, trajectory.Trajectory) # Skip writing trajectories that have only a single time-step -- this # could just be a repeated reset. if single_trajectory.num_time_steps <= 1: continue for index, time_step in enumerate(single_trajectory.time_steps): # The first time-step doesn't have reward/processed_reward, if so, just # setting it to 0.0 / 0 should be OK. raw_reward = time_step.raw_reward if not raw_reward: raw_reward = 0.0 processed_reward = time_step.processed_reward if not processed_reward: processed_reward = 0 action = time_step.action if action is None: # The last time-step doesn't have action, and this action shouldn't be # used, gym's spaces have a `sample` function, so let's just sample an # action and use that. action = self.action_space.sample() action = gym_spaces_utils.gym_space_encode(self.action_space, action) if six.PY3: # py3 complains that, to_example cannot handle np.int64 ! action_dtype = self.action_space.dtype if action_dtype in [np.int64, np.int32]: action = list(map(int, action)) elif action_dtype in [np.float64, np.float32]: action = list(map(float, action)) # same with processed_reward. processed_reward = int(processed_reward) assert time_step.observation is not None yield { TIMESTEP_FIELD: [index], ACTION_FIELD: action, # to_example errors on np.float32 RAW_REWARD_FIELD: [float(raw_reward)], PROCESSED_REWARD_FIELD: [processed_reward], # to_example doesn't know bools DONE_FIELD: [int(time_step.done)], OBSERVATION_FIELD: gym_spaces_utils.gym_space_encode(self.observation_space, time_step.observation), }
[ "A", "generator", "to", "yield", "single", "time", "-", "steps", "from", "a", "list", "of", "trajectories", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L656-L713
[ "def", "_generate_time_steps", "(", "self", ",", "trajectory_list", ")", ":", "for", "single_trajectory", "in", "trajectory_list", ":", "assert", "isinstance", "(", "single_trajectory", ",", "trajectory", ".", "Trajectory", ")", "# Skip writing trajectories that have only...
272500b6efe353aeb638d2745ed56e519462ca31
train
init_vq_bottleneck
Get lookup table for VQ bottleneck.
tensor2tensor/models/research/transformer_nat.py
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
[ "Get", "lookup", "table", "for", "VQ", "bottleneck", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L31-L48
[ "def", "init_vq_bottleneck", "(", "bottleneck_size", ",", "hidden_size", ")", ":", "means", "=", "tf", ".", "get_variable", "(", "name", "=", "\"means\"", ",", "shape", "=", "[", "bottleneck_size", ",", "hidden_size", "]", ",", "initializer", "=", "tf", ".",...
272500b6efe353aeb638d2745ed56e519462ca31
train
vq_nearest_neighbor
Find the nearest element in means to elements in x.
tensor2tensor/models/research/transformer_nat.py
def vq_nearest_neighbor(x, hparams): """Find the nearest element in means to elements in x.""" bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if hparams.bottleneck_kind == "em": x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=bottleneck_size) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: x_means_idx = tf.argmax(-dist, axis=-1) x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size) x_means = tf.matmul(x_means_hot, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, e_loss
def vq_nearest_neighbor(x, hparams): """Find the nearest element in means to elements in x.""" bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if hparams.bottleneck_kind == "em": x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=bottleneck_size) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: x_means_idx = tf.argmax(-dist, axis=-1) x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size) x_means = tf.matmul(x_means_hot, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, e_loss
[ "Find", "the", "nearest", "element", "in", "means", "to", "elements", "in", "x", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L51-L69
[ "def", "vq_nearest_neighbor", "(", "x", ",", "hparams", ")", ":", "bottleneck_size", "=", "2", "**", "hparams", ".", "bottleneck_bits", "means", "=", "hparams", ".", "means", "x_norm_sq", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "x", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
vq_discrete_bottleneck
Simple vector quantized discrete bottleneck.
tensor2tensor/models/research/transformer_nat.py
def vq_discrete_bottleneck(x, hparams): """Simple vector quantized discrete bottleneck.""" tf.logging.info("Using EMA with beta = {}".format(hparams.beta)) bottleneck_size = 2**hparams.bottleneck_bits x_shape = common_layers.shape_list(x) x = tf.reshape(x, [-1, hparams.hidden_size]) x_means_hot, e_loss = vq_nearest_neighbor( x, hparams) means, ema_means, ema_count = (hparams.means, hparams.ema_means, hparams.ema_count) # Update the ema variables updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum(x_means_hot, axis=0), hparams.decay, zero_debias=False) dw = tf.matmul(x_means_hot, x, transpose_a=True) updated_ema_means = moving_averages.assign_moving_average( ema_means, dw, hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + hparams.epsilon) / (n + bottleneck_size * hparams.epsilon) * n) # pylint: disable=g-no-augmented-assignment updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) # pylint: enable=g-no-augmented-assignment with tf.control_dependencies([e_loss]): update_means = tf.assign(means, updated_ema_means) with tf.control_dependencies([update_means]): loss = hparams.beta * e_loss discrete = tf.reshape(x_means_hot, x_shape[:-1] + [bottleneck_size]) return discrete, loss
def vq_discrete_bottleneck(x, hparams): """Simple vector quantized discrete bottleneck.""" tf.logging.info("Using EMA with beta = {}".format(hparams.beta)) bottleneck_size = 2**hparams.bottleneck_bits x_shape = common_layers.shape_list(x) x = tf.reshape(x, [-1, hparams.hidden_size]) x_means_hot, e_loss = vq_nearest_neighbor( x, hparams) means, ema_means, ema_count = (hparams.means, hparams.ema_means, hparams.ema_count) # Update the ema variables updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum(x_means_hot, axis=0), hparams.decay, zero_debias=False) dw = tf.matmul(x_means_hot, x, transpose_a=True) updated_ema_means = moving_averages.assign_moving_average( ema_means, dw, hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + hparams.epsilon) / (n + bottleneck_size * hparams.epsilon) * n) # pylint: disable=g-no-augmented-assignment updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) # pylint: enable=g-no-augmented-assignment with tf.control_dependencies([e_loss]): update_means = tf.assign(means, updated_ema_means) with tf.control_dependencies([update_means]): loss = hparams.beta * e_loss discrete = tf.reshape(x_means_hot, x_shape[:-1] + [bottleneck_size]) return discrete, loss
[ "Simple", "vector", "quantized", "discrete", "bottleneck", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L72-L107
[ "def", "vq_discrete_bottleneck", "(", "x", ",", "hparams", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Using EMA with beta = {}\"", ".", "format", "(", "hparams", ".", "beta", ")", ")", "bottleneck_size", "=", "2", "**", "hparams", ".", "bottleneck_...
272500b6efe353aeb638d2745ed56e519462ca31
train
vq_discrete_unbottleneck
Simple undiscretization from vector quantized representation.
tensor2tensor/models/research/transformer_nat.py
def vq_discrete_unbottleneck(x, hparams): """Simple undiscretization from vector quantized representation.""" x_shape = common_layers.shape_list(x) bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_flat = tf.reshape(x, [-1, bottleneck_size]) result = tf.matmul(x_flat, means) result = tf.reshape(result, x_shape[:-1] + [hparams.hidden_size]) return result
def vq_discrete_unbottleneck(x, hparams): """Simple undiscretization from vector quantized representation.""" x_shape = common_layers.shape_list(x) bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_flat = tf.reshape(x, [-1, bottleneck_size]) result = tf.matmul(x_flat, means) result = tf.reshape(result, x_shape[:-1] + [hparams.hidden_size]) return result
[ "Simple", "undiscretization", "from", "vector", "quantized", "representation", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L110-L118
[ "def", "vq_discrete_unbottleneck", "(", "x", ",", "hparams", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "bottleneck_size", "=", "2", "**", "hparams", ".", "bottleneck_bits", "means", "=", "hparams", ".", "means", "x_flat", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
residual_conv
A stack of convolution blocks with residual connections.
tensor2tensor/models/research/transformer_nat.py
def residual_conv(x, repeat, k, hparams, name, reuse=None): """A stack of convolution blocks with residual connections.""" with tf.variable_scope(name, reuse=reuse): dilations_and_kernels = [((1, 1), k) for _ in range(3)] for i in range(repeat): with tf.variable_scope("repeat_%d" % i): y = common_layers.conv_block( common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, dilations_and_kernels, padding="SAME", name="residual_conv") y = tf.nn.dropout(y, 1.0 - hparams.dropout) x += y return x
def residual_conv(x, repeat, k, hparams, name, reuse=None): """A stack of convolution blocks with residual connections.""" with tf.variable_scope(name, reuse=reuse): dilations_and_kernels = [((1, 1), k) for _ in range(3)] for i in range(repeat): with tf.variable_scope("repeat_%d" % i): y = common_layers.conv_block( common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, dilations_and_kernels, padding="SAME", name="residual_conv") y = tf.nn.dropout(y, 1.0 - hparams.dropout) x += y return x
[ "A", "stack", "of", "convolution", "blocks", "with", "residual", "connections", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L121-L135
[ "def", "residual_conv", "(", "x", ",", "repeat", ",", "k", ",", "hparams", ",", "name", ",", "reuse", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "reuse", ")", ":", "dilations_and_kernels", "=", "[", "(...
272500b6efe353aeb638d2745ed56e519462ca31
train
decompress_step
Decompression function.
tensor2tensor/models/research/transformer_nat.py
def decompress_step(source, hparams, first_relu, name): """Decompression function.""" with tf.variable_scope(name): shape = common_layers.shape_list(source) multiplier = 2 kernel = (1, 1) thicker = common_layers.conv_block( source, hparams.hidden_size * multiplier, [((1, 1), kernel)], first_relu=first_relu, name="decompress_conv") return tf.reshape(thicker, [shape[0], shape[1] * 2, 1, hparams.hidden_size])
def decompress_step(source, hparams, first_relu, name): """Decompression function.""" with tf.variable_scope(name): shape = common_layers.shape_list(source) multiplier = 2 kernel = (1, 1) thicker = common_layers.conv_block( source, hparams.hidden_size * multiplier, [((1, 1), kernel)], first_relu=first_relu, name="decompress_conv") return tf.reshape(thicker, [shape[0], shape[1] * 2, 1, hparams.hidden_size])
[ "Decompression", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L138-L149
[ "def", "decompress_step", "(", "source", ",", "hparams", ",", "first_relu", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "shape", "=", "common_layers", ".", "shape_list", "(", "source", ")", "multiplier", "=", "2", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
compress
Compress.
tensor2tensor/models/research/transformer_nat.py
def compress(x, hparams, name): """Compress.""" with tf.variable_scope(name): # Run compression by strided convs. cur = x k1 = (3, 1) k2 = (2, 1) cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc") for i in range(hparams.num_compress_steps): cur = common_layers.conv_block( cur, hparams.hidden_size, [((1, 1), k2)], strides=k2, name="compress_%d" % i) return cur
def compress(x, hparams, name): """Compress.""" with tf.variable_scope(name): # Run compression by strided convs. cur = x k1 = (3, 1) k2 = (2, 1) cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc") for i in range(hparams.num_compress_steps): cur = common_layers.conv_block( cur, hparams.hidden_size, [((1, 1), k2)], strides=k2, name="compress_%d" % i) return cur
[ "Compress", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L152-L166
[ "def", "compress", "(", "x", ",", "hparams", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "# Run compression by strided convs.", "cur", "=", "x", "k1", "=", "(", "3", ",", "1", ")", "k2", "=", "(", "2", ",", "1...
272500b6efe353aeb638d2745ed56e519462ca31
train
encode
Transformer preparations and encoder.
tensor2tensor/models/research/transformer_nat.py
def encode(x, x_space, hparams, name): """Transformer preparations and encoder.""" with tf.variable_scope(name): (encoder_input, encoder_self_attention_bias, ed) = transformer.transformer_prepare_encoder(x, x_space, hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) return transformer.transformer_encoder( encoder_input, encoder_self_attention_bias, hparams), ed
def encode(x, x_space, hparams, name): """Transformer preparations and encoder.""" with tf.variable_scope(name): (encoder_input, encoder_self_attention_bias, ed) = transformer.transformer_prepare_encoder(x, x_space, hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) return transformer.transformer_encoder( encoder_input, encoder_self_attention_bias, hparams), ed
[ "Transformer", "preparations", "and", "encoder", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L169-L176
[ "def", "encode", "(", "x", ",", "x_space", ",", "hparams", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "(", "encoder_input", ",", "encoder_self_attention_bias", ",", "ed", ")", "=", "transformer", ".", "transformer_pr...
272500b6efe353aeb638d2745ed56e519462ca31
train
decode_transformer
Original Transformer decoder.
tensor2tensor/models/research/transformer_nat.py
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets, hparams, name): """Original Transformer decoder.""" with tf.variable_scope(name): targets = common_layers.flatten4d3d(targets) decoder_input, decoder_self_bias = ( transformer.transformer_prepare_decoder(targets, hparams)) decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout) decoder_output = transformer.transformer_decoder( decoder_input, encoder_output, decoder_self_bias, encoder_decoder_attention_bias, hparams) decoder_output = tf.expand_dims(decoder_output, axis=2) decoder_output_shape = common_layers.shape_list(decoder_output) decoder_output = tf.reshape( decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size]) # Expand since t2t expects 4d tensors. return decoder_output
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets, hparams, name): """Original Transformer decoder.""" with tf.variable_scope(name): targets = common_layers.flatten4d3d(targets) decoder_input, decoder_self_bias = ( transformer.transformer_prepare_decoder(targets, hparams)) decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout) decoder_output = transformer.transformer_decoder( decoder_input, encoder_output, decoder_self_bias, encoder_decoder_attention_bias, hparams) decoder_output = tf.expand_dims(decoder_output, axis=2) decoder_output_shape = common_layers.shape_list(decoder_output) decoder_output = tf.reshape( decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size]) # Expand since t2t expects 4d tensors. return decoder_output
[ "Original", "Transformer", "decoder", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L179-L199
[ "def", "decode_transformer", "(", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "targets", ",", "hparams", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "targets", "=", "common_layers", ".", "flatten4d3d", "("...
272500b6efe353aeb638d2745ed56e519462ca31
train
get_latent_pred_loss
Latent prediction and loss.
tensor2tensor/models/research/transformer_nat.py
def get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams): """Latent prediction and loss.""" latents_logits = tf.layers.dense( latents_pred, 2**hparams.bottleneck_bits, name="extra_logits") loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=tf.stop_gradient(latents_discrete_hot), logits=latents_logits) return loss
def get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams): """Latent prediction and loss.""" latents_logits = tf.layers.dense( latents_pred, 2**hparams.bottleneck_bits, name="extra_logits") loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=tf.stop_gradient(latents_discrete_hot), logits=latents_logits) return loss
[ "Latent", "prediction", "and", "loss", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L202-L208
[ "def", "get_latent_pred_loss", "(", "latents_pred", ",", "latents_discrete_hot", ",", "hparams", ")", ":", "latents_logits", "=", "tf", ".", "layers", ".", "dense", "(", "latents_pred", ",", "2", "**", "hparams", ".", "bottleneck_bits", ",", "name", "=", "\"ex...
272500b6efe353aeb638d2745ed56e519462ca31
train
ae_transformer_internal
Main step used for training.
tensor2tensor/models/research/transformer_nat.py
def ae_transformer_internal(inputs, targets, target_space, hparams, cache=None): """Main step used for training.""" # Encoder. inputs = common_layers.flatten4d3d(inputs) inputs, ed = encode(inputs, target_space, hparams, "input_enc") # Autoencoding. losses = {"extra": tf.constant(0.0), "latent_pred": tf.constant(0.0)} max_targets_len_from_inputs = tf.concat([inputs, inputs], axis=1) targets, _ = common_layers.pad_to_same_length( targets, max_targets_len_from_inputs, final_length_divisible_by=2**hparams.num_compress_steps) targets_c = compress(targets, hparams, "compress") if hparams.mode != tf.estimator.ModeKeys.PREDICT: # Compress and bottleneck. latents_discrete_hot, extra_loss = vq_discrete_bottleneck( x=targets_c, hparams=hparams) latents_dense = vq_discrete_unbottleneck( latents_discrete_hot, hparams=hparams) latents_dense = targets_c + tf.stop_gradient(latents_dense - targets_c) latents_discrete = tf.argmax(latents_discrete_hot, axis=-1) tf.summary.histogram("codes", tf.reshape(latents_discrete[:, 0, :], [-1])) losses["extra"] = extra_loss # Extra loss predicting latent code from input. latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, "extra") latent_pred_loss = get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams) losses["latent_pred"] = tf.reduce_mean(latent_pred_loss) else: latent_len = common_layers.shape_list(targets_c)[1] embed = functools.partial(vq_discrete_unbottleneck, hparams=hparams) latents_dense = tf.zeros_like(targets_c[:, :latent_len, :, :]) if cache is None: cache = ae_latent_sample_beam(latents_dense, inputs, ed, embed, hparams) cache_hot = tf.one_hot(cache, depth=2**hparams.bottleneck_bits) latents_dense = embed(cache_hot) # Postprocess. d = latents_dense pos = tf.get_variable("pos", [1, 1000, 1, hparams.hidden_size]) pos = pos[:, :common_layers.shape_list(latents_dense)[1] + 1, :, :] latents_dense = tf.pad(latents_dense, [[0, 0], [1, 0], [0, 0], [0, 0]]) + pos # Decompressing the dense latents for i in range(hparams.num_compress_steps): j = hparams.num_compress_steps - i - 1 d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) d = decompress_step(d, hparams, i > 0, "decompress_%d" % j) masking = common_layers.inverse_lin_decay(hparams.mask_startup_steps) masking *= common_layers.inverse_exp_decay( hparams.mask_startup_steps // 4) # Not much at start. masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) if hparams.mode == tf.estimator.ModeKeys.PREDICT: masking = 1.0 mask = tf.less(masking, tf.random_uniform(common_layers.shape_list(targets)[:-1])) mask = tf.expand_dims(tf.to_float(mask), 3) # targets is always [batch, length, 1, depth] targets = mask * targets + (1.0 - mask) * d res = decode_transformer(inputs, ed, targets, hparams, "decoder") latent_time = tf.less(hparams.mask_startup_steps, tf.to_int32(tf.train.get_global_step())) losses["latent_pred"] *= tf.to_float(latent_time) return res, losses, cache
def ae_transformer_internal(inputs, targets, target_space, hparams, cache=None): """Main step used for training.""" # Encoder. inputs = common_layers.flatten4d3d(inputs) inputs, ed = encode(inputs, target_space, hparams, "input_enc") # Autoencoding. losses = {"extra": tf.constant(0.0), "latent_pred": tf.constant(0.0)} max_targets_len_from_inputs = tf.concat([inputs, inputs], axis=1) targets, _ = common_layers.pad_to_same_length( targets, max_targets_len_from_inputs, final_length_divisible_by=2**hparams.num_compress_steps) targets_c = compress(targets, hparams, "compress") if hparams.mode != tf.estimator.ModeKeys.PREDICT: # Compress and bottleneck. latents_discrete_hot, extra_loss = vq_discrete_bottleneck( x=targets_c, hparams=hparams) latents_dense = vq_discrete_unbottleneck( latents_discrete_hot, hparams=hparams) latents_dense = targets_c + tf.stop_gradient(latents_dense - targets_c) latents_discrete = tf.argmax(latents_discrete_hot, axis=-1) tf.summary.histogram("codes", tf.reshape(latents_discrete[:, 0, :], [-1])) losses["extra"] = extra_loss # Extra loss predicting latent code from input. latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, "extra") latent_pred_loss = get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams) losses["latent_pred"] = tf.reduce_mean(latent_pred_loss) else: latent_len = common_layers.shape_list(targets_c)[1] embed = functools.partial(vq_discrete_unbottleneck, hparams=hparams) latents_dense = tf.zeros_like(targets_c[:, :latent_len, :, :]) if cache is None: cache = ae_latent_sample_beam(latents_dense, inputs, ed, embed, hparams) cache_hot = tf.one_hot(cache, depth=2**hparams.bottleneck_bits) latents_dense = embed(cache_hot) # Postprocess. d = latents_dense pos = tf.get_variable("pos", [1, 1000, 1, hparams.hidden_size]) pos = pos[:, :common_layers.shape_list(latents_dense)[1] + 1, :, :] latents_dense = tf.pad(latents_dense, [[0, 0], [1, 0], [0, 0], [0, 0]]) + pos # Decompressing the dense latents for i in range(hparams.num_compress_steps): j = hparams.num_compress_steps - i - 1 d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) d = decompress_step(d, hparams, i > 0, "decompress_%d" % j) masking = common_layers.inverse_lin_decay(hparams.mask_startup_steps) masking *= common_layers.inverse_exp_decay( hparams.mask_startup_steps // 4) # Not much at start. masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) if hparams.mode == tf.estimator.ModeKeys.PREDICT: masking = 1.0 mask = tf.less(masking, tf.random_uniform(common_layers.shape_list(targets)[:-1])) mask = tf.expand_dims(tf.to_float(mask), 3) # targets is always [batch, length, 1, depth] targets = mask * targets + (1.0 - mask) * d res = decode_transformer(inputs, ed, targets, hparams, "decoder") latent_time = tf.less(hparams.mask_startup_steps, tf.to_int32(tf.train.get_global_step())) losses["latent_pred"] *= tf.to_float(latent_time) return res, losses, cache
[ "Main", "step", "used", "for", "training", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L245-L316
[ "def", "ae_transformer_internal", "(", "inputs", ",", "targets", ",", "target_space", ",", "hparams", ",", "cache", "=", "None", ")", ":", "# Encoder.", "inputs", "=", "common_layers", ".", "flatten4d3d", "(", "inputs", ")", "inputs", ",", "ed", "=", "encode...
272500b6efe353aeb638d2745ed56e519462ca31
train
transformer_nat_small
Set of hyperparameters.
tensor2tensor/models/research/transformer_nat.py
def transformer_nat_small(): """Set of hyperparameters.""" hparams = transformer.transformer_small() hparams.batch_size = 2048 hparams.learning_rate = 0.2 hparams.learning_rate_warmup_steps = 4000 hparams.num_hidden_layers = 3 hparams.hidden_size = 384 hparams.filter_size = 2048 hparams.label_smoothing = 0.0 hparams.force_full_predict = True hparams.optimizer = "adam" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.997 hparams.add_hparam("bottleneck_kind", "vq") hparams.add_hparam("bottleneck_bits", 12) hparams.add_hparam("num_compress_steps", 3) hparams.add_hparam("beta", 0.25) hparams.add_hparam("epsilon", 1e-5) hparams.add_hparam("decay", 0.999) hparams.add_hparam("num_samples", 10) hparams.add_hparam("mask_startup_steps", 50000) return hparams
def transformer_nat_small(): """Set of hyperparameters.""" hparams = transformer.transformer_small() hparams.batch_size = 2048 hparams.learning_rate = 0.2 hparams.learning_rate_warmup_steps = 4000 hparams.num_hidden_layers = 3 hparams.hidden_size = 384 hparams.filter_size = 2048 hparams.label_smoothing = 0.0 hparams.force_full_predict = True hparams.optimizer = "adam" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.997 hparams.add_hparam("bottleneck_kind", "vq") hparams.add_hparam("bottleneck_bits", 12) hparams.add_hparam("num_compress_steps", 3) hparams.add_hparam("beta", 0.25) hparams.add_hparam("epsilon", 1e-5) hparams.add_hparam("decay", 0.999) hparams.add_hparam("num_samples", 10) hparams.add_hparam("mask_startup_steps", 50000) return hparams
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L384-L407
[ "def", "transformer_nat_small", "(", ")", ":", "hparams", "=", "transformer", ".", "transformer_small", "(", ")", "hparams", ".", "batch_size", "=", "2048", "hparams", ".", "learning_rate", "=", "0.2", "hparams", ".", "learning_rate_warmup_steps", "=", "4000", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
transformer_nat_base
Set of hyperparameters.
tensor2tensor/models/research/transformer_nat.py
def transformer_nat_base(): """Set of hyperparameters.""" hparams = transformer_nat_small() hparams.batch_size = 2048 hparams.hidden_size = 512 hparams.filter_size = 4096 hparams.num_hidden_layers = 6 return hparams
def transformer_nat_base(): """Set of hyperparameters.""" hparams = transformer_nat_small() hparams.batch_size = 2048 hparams.hidden_size = 512 hparams.filter_size = 4096 hparams.num_hidden_layers = 6 return hparams
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L411-L418
[ "def", "transformer_nat_base", "(", ")", ":", "hparams", "=", "transformer_nat_small", "(", ")", "hparams", ".", "batch_size", "=", "2048", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "4096", "hparams", ".", "num_hidden_layers"...
272500b6efe353aeb638d2745ed56e519462ca31
train
transformer_nat_big
Set of hyperparameters.
tensor2tensor/models/research/transformer_nat.py
def transformer_nat_big(): """Set of hyperparameters.""" hparams = transformer_nat_small() hparams.batch_size = 2048 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_hidden_layers = 6 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 return hparams
def transformer_nat_big(): """Set of hyperparameters.""" hparams = transformer_nat_small() hparams.batch_size = 2048 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_hidden_layers = 6 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 return hparams
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L422-L431
[ "def", "transformer_nat_big", "(", ")", ":", "hparams", "=", "transformer_nat_small", "(", ")", "hparams", ".", "batch_size", "=", "2048", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "filter_size", "=", "4096", "hparams", ".", "num_hidden_layers"...
272500b6efe353aeb638d2745ed56e519462ca31
train
policy_net
A policy net function.
tensor2tensor/trax/rlax/ppo.py
def policy_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): """A policy net function.""" # Use the bottom_layers as the bottom part of the network and just add the # required layers on top of it. if bottom_layers is None: bottom_layers = [] # NOTE: The LogSoftmax instead of the Softmax. bottom_layers.extend([layers.Dense(num_actions), layers.LogSoftmax()]) net = layers.Serial(*bottom_layers) return net.initialize(batch_observations_shape, rng_key), net
def policy_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): """A policy net function.""" # Use the bottom_layers as the bottom part of the network and just add the # required layers on top of it. if bottom_layers is None: bottom_layers = [] # NOTE: The LogSoftmax instead of the Softmax. bottom_layers.extend([layers.Dense(num_actions), layers.LogSoftmax()]) net = layers.Serial(*bottom_layers) return net.initialize(batch_observations_shape, rng_key), net
[ "A", "policy", "net", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L78-L92
[ "def", "policy_net", "(", "rng_key", ",", "batch_observations_shape", ",", "num_actions", ",", "bottom_layers", "=", "None", ")", ":", "# Use the bottom_layers as the bottom part of the network and just add the", "# required layers on top of it.", "if", "bottom_layers", "is", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
value_net
A value net function.
tensor2tensor/trax/rlax/ppo.py
def value_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): """A value net function.""" del num_actions if bottom_layers is None: bottom_layers = [] bottom_layers.extend([ layers.Dense(1), ]) net = layers.Serial(*bottom_layers) return net.initialize(batch_observations_shape, rng_key), net
def value_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): """A value net function.""" del num_actions if bottom_layers is None: bottom_layers = [] bottom_layers.extend([ layers.Dense(1), ]) net = layers.Serial(*bottom_layers) return net.initialize(batch_observations_shape, rng_key), net
[ "A", "value", "net", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L95-L108
[ "def", "value_net", "(", "rng_key", ",", "batch_observations_shape", ",", "num_actions", ",", "bottom_layers", "=", "None", ")", ":", "del", "num_actions", "if", "bottom_layers", "is", "None", ":", "bottom_layers", "=", "[", "]", "bottom_layers", ".", "extend", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
policy_and_value_net
A policy and value net function.
tensor2tensor/trax/rlax/ppo.py
def policy_and_value_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): """A policy and value net function.""" # Layers. cur_layers = [] if bottom_layers is not None: cur_layers.extend(bottom_layers) # Now, with the current logits, one head computes action probabilities and the # other computes the value function. # NOTE: The LogSoftmax instead of the Softmax because of numerical stability. cur_layers.extend([layers.Branch(), layers.Parallel( layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()), layers.Dense(1) )]) net = layers.Serial(*cur_layers) return net.initialize(batch_observations_shape, rng_key), net
def policy_and_value_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): """A policy and value net function.""" # Layers. cur_layers = [] if bottom_layers is not None: cur_layers.extend(bottom_layers) # Now, with the current logits, one head computes action probabilities and the # other computes the value function. # NOTE: The LogSoftmax instead of the Softmax because of numerical stability. cur_layers.extend([layers.Branch(), layers.Parallel( layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()), layers.Dense(1) )]) net = layers.Serial(*cur_layers) return net.initialize(batch_observations_shape, rng_key), net
[ "A", "policy", "and", "value", "net", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L111-L130
[ "def", "policy_and_value_net", "(", "rng_key", ",", "batch_observations_shape", ",", "num_actions", ",", "bottom_layers", "=", "None", ")", ":", "# Layers.", "cur_layers", "=", "[", "]", "if", "bottom_layers", "is", "not", "None", ":", "cur_layers", ".", "extend...
272500b6efe353aeb638d2745ed56e519462ca31
train
log_params
Dumps the params with `logging.error`.
tensor2tensor/trax/rlax/ppo.py
def log_params(params, name="params"): """Dumps the params with `logging.error`.""" for i, param in enumerate(params): if not param: # Empty tuple. continue if not isinstance(param, (list, tuple)): logging.error( "%s[%d] : (%s) = [%s]", name, i, param.shape, onp.array(param)) else: for j, p in enumerate(param): logging.error( "\t%s[%d, %d] = [%s]", name, i, j, onp.array(p))
def log_params(params, name="params"): """Dumps the params with `logging.error`.""" for i, param in enumerate(params): if not param: # Empty tuple. continue if not isinstance(param, (list, tuple)): logging.error( "%s[%d] : (%s) = [%s]", name, i, param.shape, onp.array(param)) else: for j, p in enumerate(param): logging.error( "\t%s[%d, %d] = [%s]", name, i, j, onp.array(p))
[ "Dumps", "the", "params", "with", "logging", ".", "error", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L140-L152
[ "def", "log_params", "(", "params", ",", "name", "=", "\"params\"", ")", ":", "for", "i", ",", "param", "in", "enumerate", "(", "params", ")", ":", "if", "not", "param", ":", "# Empty tuple.", "continue", "if", "not", "isinstance", "(", "param", ",", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
collect_trajectories
Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. epsilon: float, the epsilon for `epsilon-greedy` policy. Returns: trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i)
tensor2tensor/trax/rlax/ppo.py
def collect_trajectories(env, policy_fun, num_trajectories=1, policy="greedy", max_timestep=None, epsilon=0.1): """Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. epsilon: float, the epsilon for `epsilon-greedy` policy. Returns: trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i) """ trajectories = [] for t in range(num_trajectories): t_start = time.time() rewards = [] actions = [] done = False observation = env.reset() # This is currently shaped (1, 1) + OBS, but new observations will keep # getting added to it, making it eventually (1, T+1) + OBS observation_history = observation[np.newaxis, np.newaxis, :] # Run either till we're done OR if max_timestep is defined only till that # timestep. ts = 0 while ((not done) and (not max_timestep or observation_history.shape[1] < max_timestep)): ts_start = time.time() # Run the policy, to pick an action, shape is (1, t, A) because # observation_history is shaped (1, t) + OBS predictions = policy_fun(observation_history) # We need the predictions for the last time-step, so squeeze the batch # dimension and take the last time-step. predictions = np.squeeze(predictions, axis=0)[-1] # Policy can be run in one of the following ways: # - Greedy # - Epsilon-Greedy # - Categorical-Sampling action = None if policy == "greedy": action = np.argmax(predictions) elif policy == "epsilon-greedy": # A schedule for epsilon is 1/k where k is the episode number sampled. if onp.random.random() < epsilon: # Choose an action at random. action = onp.random.randint(0, high=len(predictions)) else: # Return the best action. action = np.argmax(predictions) elif policy == "categorical-sampling": # NOTE: The predictions aren't probabilities but log-probabilities # instead, since they were computed with LogSoftmax. # So just np.exp them to make them probabilities. predictions = np.exp(predictions) action = onp.argwhere(onp.random.multinomial(1, predictions) == 1) else: raise ValueError("Unknown policy: %s" % policy) # NOTE: Assumption, single batch. try: action = int(action) except TypeError as err: # Let's dump some information before we die off. logging.error("Cannot convert action into an integer: [%s]", err) logging.error("action.shape: [%s]", action.shape) logging.error("action: [%s]", action) logging.error("predictions.shape: [%s]", predictions.shape) logging.error("predictions: [%s]", predictions) logging.error("observation_history: [%s]", observation_history) raise err observation, reward, done, _ = env.step(action) # observation is of shape OBS, so add extra dims and concatenate on the # time dimension. observation_history = np.concatenate( [observation_history, observation[np.newaxis, np.newaxis, :]], axis=1) rewards.append(reward) actions.append(action) ts += 1 logging.vlog( 2, " Collected time-step[ %5d] of trajectory[ %5d] in [%0.2f] msec.", ts, t, get_time(ts_start)) logging.vlog( 2, " Collected trajectory[ %5d] in [%0.2f] msec.", t, get_time(t_start)) # This means we are done we're been terminated early. assert done or ( max_timestep and max_timestep >= observation_history.shape[1]) # observation_history is (1, T+1) + OBS, lets squeeze out the batch dim. observation_history = np.squeeze(observation_history, axis=0) trajectories.append( (observation_history, np.stack(actions), np.stack(rewards))) return trajectories
def collect_trajectories(env, policy_fun, num_trajectories=1, policy="greedy", max_timestep=None, epsilon=0.1): """Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. epsilon: float, the epsilon for `epsilon-greedy` policy. Returns: trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i) """ trajectories = [] for t in range(num_trajectories): t_start = time.time() rewards = [] actions = [] done = False observation = env.reset() # This is currently shaped (1, 1) + OBS, but new observations will keep # getting added to it, making it eventually (1, T+1) + OBS observation_history = observation[np.newaxis, np.newaxis, :] # Run either till we're done OR if max_timestep is defined only till that # timestep. ts = 0 while ((not done) and (not max_timestep or observation_history.shape[1] < max_timestep)): ts_start = time.time() # Run the policy, to pick an action, shape is (1, t, A) because # observation_history is shaped (1, t) + OBS predictions = policy_fun(observation_history) # We need the predictions for the last time-step, so squeeze the batch # dimension and take the last time-step. predictions = np.squeeze(predictions, axis=0)[-1] # Policy can be run in one of the following ways: # - Greedy # - Epsilon-Greedy # - Categorical-Sampling action = None if policy == "greedy": action = np.argmax(predictions) elif policy == "epsilon-greedy": # A schedule for epsilon is 1/k where k is the episode number sampled. if onp.random.random() < epsilon: # Choose an action at random. action = onp.random.randint(0, high=len(predictions)) else: # Return the best action. action = np.argmax(predictions) elif policy == "categorical-sampling": # NOTE: The predictions aren't probabilities but log-probabilities # instead, since they were computed with LogSoftmax. # So just np.exp them to make them probabilities. predictions = np.exp(predictions) action = onp.argwhere(onp.random.multinomial(1, predictions) == 1) else: raise ValueError("Unknown policy: %s" % policy) # NOTE: Assumption, single batch. try: action = int(action) except TypeError as err: # Let's dump some information before we die off. logging.error("Cannot convert action into an integer: [%s]", err) logging.error("action.shape: [%s]", action.shape) logging.error("action: [%s]", action) logging.error("predictions.shape: [%s]", predictions.shape) logging.error("predictions: [%s]", predictions) logging.error("observation_history: [%s]", observation_history) raise err observation, reward, done, _ = env.step(action) # observation is of shape OBS, so add extra dims and concatenate on the # time dimension. observation_history = np.concatenate( [observation_history, observation[np.newaxis, np.newaxis, :]], axis=1) rewards.append(reward) actions.append(action) ts += 1 logging.vlog( 2, " Collected time-step[ %5d] of trajectory[ %5d] in [%0.2f] msec.", ts, t, get_time(ts_start)) logging.vlog( 2, " Collected trajectory[ %5d] in [%0.2f] msec.", t, get_time(t_start)) # This means we are done we're been terminated early. assert done or ( max_timestep and max_timestep >= observation_history.shape[1]) # observation_history is (1, T+1) + OBS, lets squeeze out the batch dim. observation_history = np.squeeze(observation_history, axis=0) trajectories.append( (observation_history, np.stack(actions), np.stack(rewards))) return trajectories
[ "Collect", "trajectories", "with", "the", "given", "policy", "net", "and", "behaviour", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L159-L275
[ "def", "collect_trajectories", "(", "env", ",", "policy_fun", ",", "num_trajectories", "=", "1", ",", "policy", "=", "\"greedy\"", ",", "max_timestep", "=", "None", ",", "epsilon", "=", "0.1", ")", ":", "trajectories", "=", "[", "]", "for", "t", "in", "r...
272500b6efe353aeb638d2745ed56e519462ca31