id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
22,600
tensorflow/tensor2tensor
tensor2tensor/data_generators/common_voice.py
_is_relative
def _is_relative(path, filename): """Checks if the filename is relative, not absolute.""" return os.path.abspath(os.path.join(path, filename)).startswith(path)
python
def _is_relative(path, filename): """Checks if the filename is relative, not absolute.""" return os.path.abspath(os.path.join(path, filename)).startswith(path)
[ "def", "_is_relative", "(", "path", ",", "filename", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", ".", "startswith", "(", "path", ")" ]
Checks if the filename is relative, not absolute.
[ "Checks", "if", "the", "filename", "is", "relative", "not", "absolute", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/common_voice.py#L74-L76
22,601
tensorflow/tensor2tensor
tensor2tensor/rl/ppo.py
define_ppo_step
def define_ppo_step(data_points, hparams, action_space, lr): """Define ppo step.""" observation, action, discounted_reward, norm_advantage, old_pdf = data_points obs_shape = common_layers.shape_list(observation) observation = tf.reshape( observation, [obs_shape[0] * obs_shape[1]] + obs_shape[2:] ) (logits, new_value) = get_policy(observation, hparams, action_space) logits = tf.reshape(logits, obs_shape[:2] + [action_space.n]) new_value = tf.reshape(new_value, obs_shape[:2]) new_policy_dist = tfp.distributions.Categorical(logits=logits) new_pdf = new_policy_dist.prob(action) ratio = new_pdf / old_pdf clipped_ratio = tf.clip_by_value(ratio, 1 - hparams.clipping_coef, 1 + hparams.clipping_coef) surrogate_objective = tf.minimum(clipped_ratio * norm_advantage, ratio * norm_advantage) policy_loss = -tf.reduce_mean(surrogate_objective) value_error = new_value - discounted_reward value_loss = hparams.value_loss_coef * tf.reduce_mean(value_error ** 2) entropy = new_policy_dist.entropy() entropy_loss = -hparams.entropy_loss_coef * tf.reduce_mean(entropy) losses = [policy_loss, value_loss, entropy_loss] loss = sum(losses) variables = tf.global_variables(hparams.policy_network + "/.*") train_op = optimize.optimize(loss, lr, hparams, variables=variables) with tf.control_dependencies([train_op]): return [tf.identity(x) for x in losses]
python
def define_ppo_step(data_points, hparams, action_space, lr): """Define ppo step.""" observation, action, discounted_reward, norm_advantage, old_pdf = data_points obs_shape = common_layers.shape_list(observation) observation = tf.reshape( observation, [obs_shape[0] * obs_shape[1]] + obs_shape[2:] ) (logits, new_value) = get_policy(observation, hparams, action_space) logits = tf.reshape(logits, obs_shape[:2] + [action_space.n]) new_value = tf.reshape(new_value, obs_shape[:2]) new_policy_dist = tfp.distributions.Categorical(logits=logits) new_pdf = new_policy_dist.prob(action) ratio = new_pdf / old_pdf clipped_ratio = tf.clip_by_value(ratio, 1 - hparams.clipping_coef, 1 + hparams.clipping_coef) surrogate_objective = tf.minimum(clipped_ratio * norm_advantage, ratio * norm_advantage) policy_loss = -tf.reduce_mean(surrogate_objective) value_error = new_value - discounted_reward value_loss = hparams.value_loss_coef * tf.reduce_mean(value_error ** 2) entropy = new_policy_dist.entropy() entropy_loss = -hparams.entropy_loss_coef * tf.reduce_mean(entropy) losses = [policy_loss, value_loss, entropy_loss] loss = sum(losses) variables = tf.global_variables(hparams.policy_network + "/.*") train_op = optimize.optimize(loss, lr, hparams, variables=variables) with tf.control_dependencies([train_op]): return [tf.identity(x) for x in losses]
[ "def", "define_ppo_step", "(", "data_points", ",", "hparams", ",", "action_space", ",", "lr", ")", ":", "observation", ",", "action", ",", "discounted_reward", ",", "norm_advantage", ",", "old_pdf", "=", "data_points", "obs_shape", "=", "common_layers", ".", "shape_list", "(", "observation", ")", "observation", "=", "tf", ".", "reshape", "(", "observation", ",", "[", "obs_shape", "[", "0", "]", "*", "obs_shape", "[", "1", "]", "]", "+", "obs_shape", "[", "2", ":", "]", ")", "(", "logits", ",", "new_value", ")", "=", "get_policy", "(", "observation", ",", "hparams", ",", "action_space", ")", "logits", "=", "tf", ".", "reshape", "(", "logits", ",", "obs_shape", "[", ":", "2", "]", "+", "[", "action_space", ".", "n", "]", ")", "new_value", "=", "tf", ".", "reshape", "(", "new_value", ",", "obs_shape", "[", ":", "2", "]", ")", "new_policy_dist", "=", "tfp", ".", "distributions", ".", "Categorical", "(", "logits", "=", "logits", ")", "new_pdf", "=", "new_policy_dist", ".", "prob", "(", "action", ")", "ratio", "=", "new_pdf", "/", "old_pdf", "clipped_ratio", "=", "tf", ".", "clip_by_value", "(", "ratio", ",", "1", "-", "hparams", ".", "clipping_coef", ",", "1", "+", "hparams", ".", "clipping_coef", ")", "surrogate_objective", "=", "tf", ".", "minimum", "(", "clipped_ratio", "*", "norm_advantage", ",", "ratio", "*", "norm_advantage", ")", "policy_loss", "=", "-", "tf", ".", "reduce_mean", "(", "surrogate_objective", ")", "value_error", "=", "new_value", "-", "discounted_reward", "value_loss", "=", "hparams", ".", "value_loss_coef", "*", "tf", ".", "reduce_mean", "(", "value_error", "**", "2", ")", "entropy", "=", "new_policy_dist", ".", "entropy", "(", ")", "entropy_loss", "=", "-", "hparams", ".", "entropy_loss_coef", "*", "tf", ".", "reduce_mean", "(", "entropy", ")", "losses", "=", "[", "policy_loss", ",", "value_loss", ",", "entropy_loss", "]", "loss", "=", "sum", "(", "losses", ")", "variables", "=", "tf", ".", "global_variables", "(", "hparams", ".", "policy_network", "+", "\"/.*\"", ")", "train_op", "=", "optimize", ".", "optimize", "(", "loss", ",", "lr", ",", "hparams", ",", "variables", "=", "variables", ")", "with", "tf", ".", "control_dependencies", "(", "[", "train_op", "]", ")", ":", "return", "[", "tf", ".", "identity", "(", "x", ")", "for", "x", "in", "losses", "]" ]
Define ppo step.
[ "Define", "ppo", "step", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo.py#L33-L68
22,602
tensorflow/tensor2tensor
tensor2tensor/rl/ppo.py
define_ppo_epoch
def define_ppo_epoch(memory, hparams, action_space, batch_size): """PPO epoch.""" observation, reward, done, action, old_pdf, value = memory # This is to avoid propagating gradients through simulated environment. observation = tf.stop_gradient(observation) action = tf.stop_gradient(action) reward = tf.stop_gradient(reward) if hasattr(hparams, "rewards_preprocessing_fun"): reward = hparams.rewards_preprocessing_fun(reward) done = tf.stop_gradient(done) value = tf.stop_gradient(value) old_pdf = tf.stop_gradient(old_pdf) advantage = calculate_generalized_advantage_estimator( reward, value, done, hparams.gae_gamma, hparams.gae_lambda) discounted_reward = tf.stop_gradient(advantage + value[:-1]) advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True) advantage_normalized = tf.stop_gradient( (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8)) add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)] number_of_batches = ((hparams.epoch_length-1) * hparams.optimization_epochs // hparams.optimization_batch_size) epoch_length = hparams.epoch_length if hparams.effective_num_agents is not None: number_of_batches *= batch_size number_of_batches //= hparams.effective_num_agents epoch_length //= hparams.effective_num_agents assert number_of_batches > 0, "Set the paremeters so that number_of_batches>0" lr = learning_rate.learning_rate_schedule(hparams) shuffled_indices = [tf.random.shuffle(tf.range(epoch_length - 1)) for _ in range(hparams.optimization_epochs)] shuffled_indices = tf.concat(shuffled_indices, axis=0) shuffled_indices = shuffled_indices[:number_of_batches * hparams.optimization_batch_size] indices_of_batches = tf.reshape(shuffled_indices, shape=(-1, hparams.optimization_batch_size)) input_tensors = [observation, action, discounted_reward, advantage_normalized, old_pdf] ppo_step_rets = tf.scan( lambda a, i: add_lists_elementwise( # pylint: disable=g-long-lambda a, define_ppo_step([tf.gather(t, indices_of_batches[i, :]) for t in input_tensors], hparams, action_space, lr )), tf.range(number_of_batches), [0., 0., 0.], parallel_iterations=1) ppo_summaries = [tf.reduce_mean(ret) / number_of_batches for ret in ppo_step_rets] ppo_summaries.append(lr) summaries_names = [ "policy_loss", "value_loss", "entropy_loss", "learning_rate" ] summaries = [tf.summary.scalar(summary_name, summary) for summary_name, summary in zip(summaries_names, ppo_summaries)] losses_summary = tf.summary.merge(summaries) for summary_name, summary in zip(summaries_names, ppo_summaries): losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ") return losses_summary
python
def define_ppo_epoch(memory, hparams, action_space, batch_size): """PPO epoch.""" observation, reward, done, action, old_pdf, value = memory # This is to avoid propagating gradients through simulated environment. observation = tf.stop_gradient(observation) action = tf.stop_gradient(action) reward = tf.stop_gradient(reward) if hasattr(hparams, "rewards_preprocessing_fun"): reward = hparams.rewards_preprocessing_fun(reward) done = tf.stop_gradient(done) value = tf.stop_gradient(value) old_pdf = tf.stop_gradient(old_pdf) advantage = calculate_generalized_advantage_estimator( reward, value, done, hparams.gae_gamma, hparams.gae_lambda) discounted_reward = tf.stop_gradient(advantage + value[:-1]) advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True) advantage_normalized = tf.stop_gradient( (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8)) add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)] number_of_batches = ((hparams.epoch_length-1) * hparams.optimization_epochs // hparams.optimization_batch_size) epoch_length = hparams.epoch_length if hparams.effective_num_agents is not None: number_of_batches *= batch_size number_of_batches //= hparams.effective_num_agents epoch_length //= hparams.effective_num_agents assert number_of_batches > 0, "Set the paremeters so that number_of_batches>0" lr = learning_rate.learning_rate_schedule(hparams) shuffled_indices = [tf.random.shuffle(tf.range(epoch_length - 1)) for _ in range(hparams.optimization_epochs)] shuffled_indices = tf.concat(shuffled_indices, axis=0) shuffled_indices = shuffled_indices[:number_of_batches * hparams.optimization_batch_size] indices_of_batches = tf.reshape(shuffled_indices, shape=(-1, hparams.optimization_batch_size)) input_tensors = [observation, action, discounted_reward, advantage_normalized, old_pdf] ppo_step_rets = tf.scan( lambda a, i: add_lists_elementwise( # pylint: disable=g-long-lambda a, define_ppo_step([tf.gather(t, indices_of_batches[i, :]) for t in input_tensors], hparams, action_space, lr )), tf.range(number_of_batches), [0., 0., 0.], parallel_iterations=1) ppo_summaries = [tf.reduce_mean(ret) / number_of_batches for ret in ppo_step_rets] ppo_summaries.append(lr) summaries_names = [ "policy_loss", "value_loss", "entropy_loss", "learning_rate" ] summaries = [tf.summary.scalar(summary_name, summary) for summary_name, summary in zip(summaries_names, ppo_summaries)] losses_summary = tf.summary.merge(summaries) for summary_name, summary in zip(summaries_names, ppo_summaries): losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ") return losses_summary
[ "def", "define_ppo_epoch", "(", "memory", ",", "hparams", ",", "action_space", ",", "batch_size", ")", ":", "observation", ",", "reward", ",", "done", ",", "action", ",", "old_pdf", ",", "value", "=", "memory", "# This is to avoid propagating gradients through simulated environment.", "observation", "=", "tf", ".", "stop_gradient", "(", "observation", ")", "action", "=", "tf", ".", "stop_gradient", "(", "action", ")", "reward", "=", "tf", ".", "stop_gradient", "(", "reward", ")", "if", "hasattr", "(", "hparams", ",", "\"rewards_preprocessing_fun\"", ")", ":", "reward", "=", "hparams", ".", "rewards_preprocessing_fun", "(", "reward", ")", "done", "=", "tf", ".", "stop_gradient", "(", "done", ")", "value", "=", "tf", ".", "stop_gradient", "(", "value", ")", "old_pdf", "=", "tf", ".", "stop_gradient", "(", "old_pdf", ")", "advantage", "=", "calculate_generalized_advantage_estimator", "(", "reward", ",", "value", ",", "done", ",", "hparams", ".", "gae_gamma", ",", "hparams", ".", "gae_lambda", ")", "discounted_reward", "=", "tf", ".", "stop_gradient", "(", "advantage", "+", "value", "[", ":", "-", "1", "]", ")", "advantage_mean", ",", "advantage_variance", "=", "tf", ".", "nn", ".", "moments", "(", "advantage", ",", "axes", "=", "[", "0", ",", "1", "]", ",", "keep_dims", "=", "True", ")", "advantage_normalized", "=", "tf", ".", "stop_gradient", "(", "(", "advantage", "-", "advantage_mean", ")", "/", "(", "tf", ".", "sqrt", "(", "advantage_variance", ")", "+", "1e-8", ")", ")", "add_lists_elementwise", "=", "lambda", "l1", ",", "l2", ":", "[", "x", "+", "y", "for", "x", ",", "y", "in", "zip", "(", "l1", ",", "l2", ")", "]", "number_of_batches", "=", "(", "(", "hparams", ".", "epoch_length", "-", "1", ")", "*", "hparams", ".", "optimization_epochs", "//", "hparams", ".", "optimization_batch_size", ")", "epoch_length", "=", "hparams", ".", "epoch_length", "if", "hparams", ".", "effective_num_agents", "is", "not", "None", ":", "number_of_batches", "*=", "batch_size", "number_of_batches", "//=", "hparams", ".", "effective_num_agents", "epoch_length", "//=", "hparams", ".", "effective_num_agents", "assert", "number_of_batches", ">", "0", ",", "\"Set the paremeters so that number_of_batches>0\"", "lr", "=", "learning_rate", ".", "learning_rate_schedule", "(", "hparams", ")", "shuffled_indices", "=", "[", "tf", ".", "random", ".", "shuffle", "(", "tf", ".", "range", "(", "epoch_length", "-", "1", ")", ")", "for", "_", "in", "range", "(", "hparams", ".", "optimization_epochs", ")", "]", "shuffled_indices", "=", "tf", ".", "concat", "(", "shuffled_indices", ",", "axis", "=", "0", ")", "shuffled_indices", "=", "shuffled_indices", "[", ":", "number_of_batches", "*", "hparams", ".", "optimization_batch_size", "]", "indices_of_batches", "=", "tf", ".", "reshape", "(", "shuffled_indices", ",", "shape", "=", "(", "-", "1", ",", "hparams", ".", "optimization_batch_size", ")", ")", "input_tensors", "=", "[", "observation", ",", "action", ",", "discounted_reward", ",", "advantage_normalized", ",", "old_pdf", "]", "ppo_step_rets", "=", "tf", ".", "scan", "(", "lambda", "a", ",", "i", ":", "add_lists_elementwise", "(", "# pylint: disable=g-long-lambda", "a", ",", "define_ppo_step", "(", "[", "tf", ".", "gather", "(", "t", ",", "indices_of_batches", "[", "i", ",", ":", "]", ")", "for", "t", "in", "input_tensors", "]", ",", "hparams", ",", "action_space", ",", "lr", ")", ")", ",", "tf", ".", "range", "(", "number_of_batches", ")", ",", "[", "0.", ",", "0.", ",", "0.", "]", ",", "parallel_iterations", "=", "1", ")", "ppo_summaries", "=", "[", "tf", ".", "reduce_mean", "(", "ret", ")", "/", "number_of_batches", "for", "ret", "in", "ppo_step_rets", "]", "ppo_summaries", ".", "append", "(", "lr", ")", "summaries_names", "=", "[", "\"policy_loss\"", ",", "\"value_loss\"", ",", "\"entropy_loss\"", ",", "\"learning_rate\"", "]", "summaries", "=", "[", "tf", ".", "summary", ".", "scalar", "(", "summary_name", ",", "summary", ")", "for", "summary_name", ",", "summary", "in", "zip", "(", "summaries_names", ",", "ppo_summaries", ")", "]", "losses_summary", "=", "tf", ".", "summary", ".", "merge", "(", "summaries", ")", "for", "summary_name", ",", "summary", "in", "zip", "(", "summaries_names", ",", "ppo_summaries", ")", ":", "losses_summary", "=", "tf", ".", "Print", "(", "losses_summary", ",", "[", "summary", "]", ",", "summary_name", "+", "\": \"", ")", "return", "losses_summary" ]
PPO epoch.
[ "PPO", "epoch", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo.py#L71-L142
22,603
tensorflow/tensor2tensor
tensor2tensor/rl/ppo.py
calculate_generalized_advantage_estimator
def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): # pylint: disable=g-doc-args """Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. """ # pylint: enable=g-doc-args next_value = value[1:, :] next_not_done = 1 - tf.cast(done[1:, :], tf.float32) delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done - value[:-1, :]) return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
python
def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): # pylint: disable=g-doc-args """Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. """ # pylint: enable=g-doc-args next_value = value[1:, :] next_not_done = 1 - tf.cast(done[1:, :], tf.float32) delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done - value[:-1, :]) return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
[ "def", "calculate_generalized_advantage_estimator", "(", "reward", ",", "value", ",", "done", ",", "gae_gamma", ",", "gae_lambda", ")", ":", "# pylint: disable=g-doc-args", "# pylint: enable=g-doc-args", "next_value", "=", "value", "[", "1", ":", ",", ":", "]", "next_not_done", "=", "1", "-", "tf", ".", "cast", "(", "done", "[", "1", ":", ",", ":", "]", ",", "tf", ".", "float32", ")", "delta", "=", "(", "reward", "[", ":", "-", "1", ",", ":", "]", "+", "gae_gamma", "*", "next_value", "*", "next_not_done", "-", "value", "[", ":", "-", "1", ",", ":", "]", ")", "return_", "=", "tf", ".", "reverse", "(", "tf", ".", "scan", "(", "lambda", "agg", ",", "cur", ":", "cur", "[", "0", "]", "+", "cur", "[", "1", "]", "*", "gae_gamma", "*", "gae_lambda", "*", "agg", ",", "[", "tf", ".", "reverse", "(", "delta", ",", "[", "0", "]", ")", ",", "tf", ".", "reverse", "(", "next_not_done", ",", "[", "0", "]", ")", "]", ",", "tf", ".", "zeros_like", "(", "delta", "[", "0", ",", ":", "]", ")", ",", "parallel_iterations", "=", "1", ")", ",", "[", "0", "]", ")", "return", "tf", ".", "check_numerics", "(", "return_", ",", "\"return\"", ")" ]
Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
[ "Generalized", "advantage", "estimator", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo.py#L145-L166
22,604
tensorflow/tensor2tensor
tensor2tensor/envs/gym_spaces_utils.py
gym_space_spec
def gym_space_spec(gym_space): """Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented. """ # First try to determine the type. try: tf_dtype = tf.as_dtype(gym_space.dtype) except TypeError as e: tf.logging.error("Cannot convert space's type [%s] to tf.dtype", gym_space.dtype) raise e # Now hand it over to the specialized functions. if isinstance(gym_space, Box): return box_space_spec(gym_space, tf_dtype) elif isinstance(gym_space, Discrete): return discrete_space_spec(gym_space, tf_dtype) else: raise NotImplementedError
python
def gym_space_spec(gym_space): """Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented. """ # First try to determine the type. try: tf_dtype = tf.as_dtype(gym_space.dtype) except TypeError as e: tf.logging.error("Cannot convert space's type [%s] to tf.dtype", gym_space.dtype) raise e # Now hand it over to the specialized functions. if isinstance(gym_space, Box): return box_space_spec(gym_space, tf_dtype) elif isinstance(gym_space, Discrete): return discrete_space_spec(gym_space, tf_dtype) else: raise NotImplementedError
[ "def", "gym_space_spec", "(", "gym_space", ")", ":", "# First try to determine the type.", "try", ":", "tf_dtype", "=", "tf", ".", "as_dtype", "(", "gym_space", ".", "dtype", ")", "except", "TypeError", "as", "e", ":", "tf", ".", "logging", ".", "error", "(", "\"Cannot convert space's type [%s] to tf.dtype\"", ",", "gym_space", ".", "dtype", ")", "raise", "e", "# Now hand it over to the specialized functions.", "if", "isinstance", "(", "gym_space", ",", "Box", ")", ":", "return", "box_space_spec", "(", "gym_space", ",", "tf_dtype", ")", "elif", "isinstance", "(", "gym_space", ",", "Discrete", ")", ":", "return", "discrete_space_spec", "(", "gym_space", ",", "tf_dtype", ")", "else", ":", "raise", "NotImplementedError" ]
Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented.
[ "Returns", "a", "reading", "spec", "of", "a", "gym", "space", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/gym_spaces_utils.py#L41-L69
22,605
tensorflow/tensor2tensor
tensor2tensor/envs/gym_spaces_utils.py
cardinality
def cardinality(gym_space): """Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it. """ if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64): tf.logging.error("Returning None for a float gym space's cardinality: ", gym_space) return None if isinstance(gym_space, Discrete): return gym_space.n if isinstance(gym_space, Box): # Construct a box with all possible values in this box and take a product. return np.prod(gym_space.high - gym_space.low + 1) raise NotImplementedError
python
def cardinality(gym_space): """Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it. """ if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64): tf.logging.error("Returning None for a float gym space's cardinality: ", gym_space) return None if isinstance(gym_space, Discrete): return gym_space.n if isinstance(gym_space, Box): # Construct a box with all possible values in this box and take a product. return np.prod(gym_space.high - gym_space.low + 1) raise NotImplementedError
[ "def", "cardinality", "(", "gym_space", ")", ":", "if", "(", "gym_space", ".", "dtype", "==", "np", ".", "float32", ")", "or", "(", "gym_space", ".", "dtype", "==", "np", ".", "float64", ")", ":", "tf", ".", "logging", ".", "error", "(", "\"Returning None for a float gym space's cardinality: \"", ",", "gym_space", ")", "return", "None", "if", "isinstance", "(", "gym_space", ",", "Discrete", ")", ":", "return", "gym_space", ".", "n", "if", "isinstance", "(", "gym_space", ",", "Box", ")", ":", "# Construct a box with all possible values in this box and take a product.", "return", "np", ".", "prod", "(", "gym_space", ".", "high", "-", "gym_space", ".", "low", "+", "1", ")", "raise", "NotImplementedError" ]
Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it.
[ "Number", "of", "elements", "that", "can", "be", "represented", "by", "the", "space", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/gym_spaces_utils.py#L83-L113
22,606
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
image_rmse
def image_rmse(predictions, labels, weights_fn=common_layers.weights_all): """RMSE but will argmax if last dim is not 1.""" if common_layers.shape_list(predictions)[-1] == 1: predictions = tf.squeeze(predictions, axis=[-1]) else: predictions = tf.argmax(predictions, axis=-1) return padded_rmse(predictions, labels, weights_fn)
python
def image_rmse(predictions, labels, weights_fn=common_layers.weights_all): """RMSE but will argmax if last dim is not 1.""" if common_layers.shape_list(predictions)[-1] == 1: predictions = tf.squeeze(predictions, axis=[-1]) else: predictions = tf.argmax(predictions, axis=-1) return padded_rmse(predictions, labels, weights_fn)
[ "def", "image_rmse", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_all", ")", ":", "if", "common_layers", ".", "shape_list", "(", "predictions", ")", "[", "-", "1", "]", "==", "1", ":", "predictions", "=", "tf", ".", "squeeze", "(", "predictions", ",", "axis", "=", "[", "-", "1", "]", ")", "else", ":", "predictions", "=", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", "return", "padded_rmse", "(", "predictions", ",", "labels", ",", "weights_fn", ")" ]
RMSE but will argmax if last dim is not 1.
[ "RMSE", "but", "will", "argmax", "if", "last", "dim", "is", "not", "1", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L69-L75
22,607
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
padded_variance_explained
def padded_variance_explained(predictions, labels, weights_fn=common_layers.weights_all): """Explained variance, also known as R^2.""" predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) y_bar = tf.reduce_mean(weights * targets) tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2)) res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2)) r2 = 1. - res_ss / tot_ss return r2, tf.reduce_sum(weights)
python
def padded_variance_explained(predictions, labels, weights_fn=common_layers.weights_all): """Explained variance, also known as R^2.""" predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) y_bar = tf.reduce_mean(weights * targets) tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2)) res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2)) r2 = 1. - res_ss / tot_ss return r2, tf.reduce_sum(weights)
[ "def", "padded_variance_explained", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_all", ")", ":", "predictions", ",", "labels", "=", "common_layers", ".", "pad_with_zeros", "(", "predictions", ",", "labels", ")", "targets", "=", "labels", "weights", "=", "weights_fn", "(", "targets", ")", "y_bar", "=", "tf", ".", "reduce_mean", "(", "weights", "*", "targets", ")", "tot_ss", "=", "tf", ".", "reduce_sum", "(", "weights", "*", "tf", ".", "pow", "(", "targets", "-", "y_bar", ",", "2", ")", ")", "res_ss", "=", "tf", ".", "reduce_sum", "(", "weights", "*", "tf", ".", "pow", "(", "targets", "-", "predictions", ",", "2", ")", ")", "r2", "=", "1.", "-", "res_ss", "/", "tot_ss", "return", "r2", ",", "tf", ".", "reduce_sum", "(", "weights", ")" ]
Explained variance, also known as R^2.
[ "Explained", "variance", "also", "known", "as", "R^2", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L109-L121
22,608
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
sequence_edit_distance
def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. """ if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("edit_distance", values=[predictions, labels]): # Transform logits into sequence classes by taking max at every step. predictions = tf.to_int32( tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) nonzero_idx = tf.where(tf.not_equal(predictions, 0)) sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64)) labels = tf.squeeze(labels, axis=(2, 3)) nonzero_idx = tf.where(tf.not_equal(labels, 0)) label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64)) distance = tf.reduce_sum( tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) return distance / reference_length, reference_length
python
def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. """ if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("edit_distance", values=[predictions, labels]): # Transform logits into sequence classes by taking max at every step. predictions = tf.to_int32( tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) nonzero_idx = tf.where(tf.not_equal(predictions, 0)) sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64)) labels = tf.squeeze(labels, axis=(2, 3)) nonzero_idx = tf.where(tf.not_equal(labels, 0)) label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64)) distance = tf.reduce_sum( tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) return distance / reference_length, reference_length
[ "def", "sequence_edit_distance", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "if", "weights_fn", "is", "not", "common_layers", ".", "weights_nonzero", ":", "raise", "ValueError", "(", "\"Only weights_nonzero can be used for this metric.\"", ")", "with", "tf", ".", "variable_scope", "(", "\"edit_distance\"", ",", "values", "=", "[", "predictions", ",", "labels", "]", ")", ":", "# Transform logits into sequence classes by taking max at every step.", "predictions", "=", "tf", ".", "to_int32", "(", "tf", ".", "squeeze", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ",", "axis", "=", "(", "2", ",", "3", ")", ")", ")", "nonzero_idx", "=", "tf", ".", "where", "(", "tf", ".", "not_equal", "(", "predictions", ",", "0", ")", ")", "sparse_outputs", "=", "tf", ".", "SparseTensor", "(", "nonzero_idx", ",", "tf", ".", "gather_nd", "(", "predictions", ",", "nonzero_idx", ")", ",", "tf", ".", "shape", "(", "predictions", ",", "out_type", "=", "tf", ".", "int64", ")", ")", "labels", "=", "tf", ".", "squeeze", "(", "labels", ",", "axis", "=", "(", "2", ",", "3", ")", ")", "nonzero_idx", "=", "tf", ".", "where", "(", "tf", ".", "not_equal", "(", "labels", ",", "0", ")", ")", "label_sparse_outputs", "=", "tf", ".", "SparseTensor", "(", "nonzero_idx", ",", "tf", ".", "gather_nd", "(", "labels", ",", "nonzero_idx", ")", ",", "tf", ".", "shape", "(", "labels", ",", "out_type", "=", "tf", ".", "int64", ")", ")", "distance", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "edit_distance", "(", "sparse_outputs", ",", "label_sparse_outputs", ",", "normalize", "=", "False", ")", ")", "reference_length", "=", "tf", ".", "to_float", "(", "common_layers", ".", "shape_list", "(", "nonzero_idx", ")", "[", "0", "]", ")", "return", "distance", "/", "reference_length", ",", "reference_length" ]
Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero.
[ "Average", "edit", "distance", "ignoring", "padding", "0s", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L200-L241
22,609
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
padded_neg_log_perplexity
def padded_neg_log_perplexity(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average log-perplexity exluding padding 0s. No smoothing.""" num, den = common_layers.padded_cross_entropy( predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False) return (-num, den)
python
def padded_neg_log_perplexity(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average log-perplexity exluding padding 0s. No smoothing.""" num, den = common_layers.padded_cross_entropy( predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False) return (-num, den)
[ "def", "padded_neg_log_perplexity", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "num", ",", "den", "=", "common_layers", ".", "padded_cross_entropy", "(", "predictions", ",", "labels", ",", "0.0", ",", "weights_fn", "=", "weights_fn", ",", "reduce_sum", "=", "False", ")", "return", "(", "-", "num", ",", "den", ")" ]
Average log-perplexity exluding padding 0s. No smoothing.
[ "Average", "log", "-", "perplexity", "exluding", "padding", "0s", ".", "No", "smoothing", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L244-L250
22,610
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
padded_neg_log_perplexity_with_masking
def padded_neg_log_perplexity_with_masking( predictions, labels, features, weights_fn=None): """Average log-perplexity with custom targets_mask.""" del weights_fn if "targets_mask" not in features: raise ValueError("masked_neg_log_perplexity requires targets_mask feature") # Features are 4 dimensional, so we need to reshape the targets_mask to match # the shape of the labels. A lot of models rely on these features being 4D, # so it's best to update the shape of the mask. extended_targets_mask_shape = common_layers.shape_list( features["targets_mask"]) extended_targets_mask_shape.extend([1, 1]) features["targets_mask"] = tf.reshape(features["targets_mask"], shape=extended_targets_mask_shape) mask_fn = lambda labels: features["targets_mask"] return padded_neg_log_perplexity(predictions, labels, mask_fn)
python
def padded_neg_log_perplexity_with_masking( predictions, labels, features, weights_fn=None): """Average log-perplexity with custom targets_mask.""" del weights_fn if "targets_mask" not in features: raise ValueError("masked_neg_log_perplexity requires targets_mask feature") # Features are 4 dimensional, so we need to reshape the targets_mask to match # the shape of the labels. A lot of models rely on these features being 4D, # so it's best to update the shape of the mask. extended_targets_mask_shape = common_layers.shape_list( features["targets_mask"]) extended_targets_mask_shape.extend([1, 1]) features["targets_mask"] = tf.reshape(features["targets_mask"], shape=extended_targets_mask_shape) mask_fn = lambda labels: features["targets_mask"] return padded_neg_log_perplexity(predictions, labels, mask_fn)
[ "def", "padded_neg_log_perplexity_with_masking", "(", "predictions", ",", "labels", ",", "features", ",", "weights_fn", "=", "None", ")", ":", "del", "weights_fn", "if", "\"targets_mask\"", "not", "in", "features", ":", "raise", "ValueError", "(", "\"masked_neg_log_perplexity requires targets_mask feature\"", ")", "# Features are 4 dimensional, so we need to reshape the targets_mask to match", "# the shape of the labels. A lot of models rely on these features being 4D,", "# so it's best to update the shape of the mask.", "extended_targets_mask_shape", "=", "common_layers", ".", "shape_list", "(", "features", "[", "\"targets_mask\"", "]", ")", "extended_targets_mask_shape", ".", "extend", "(", "[", "1", ",", "1", "]", ")", "features", "[", "\"targets_mask\"", "]", "=", "tf", ".", "reshape", "(", "features", "[", "\"targets_mask\"", "]", ",", "shape", "=", "extended_targets_mask_shape", ")", "mask_fn", "=", "lambda", "labels", ":", "features", "[", "\"targets_mask\"", "]", "return", "padded_neg_log_perplexity", "(", "predictions", ",", "labels", ",", "mask_fn", ")" ]
Average log-perplexity with custom targets_mask.
[ "Average", "log", "-", "perplexity", "with", "custom", "targets_mask", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L253-L273
22,611
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
multilabel_accuracy_matchk
def multilabel_accuracy_matchk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones. """ predictions = tf.to_int32(tf.argmax(predictions, axis=-1)) scores = tf.to_float(tf.equal(predictions, labels)) # those label == 0 do not count weights = weights_fn(labels) scores *= weights scores = tf.reduce_sum(scores, axis=[1, 2, 3]) scores = tf.minimum(scores / tf.to_float(k), 1) # every sample count weights = tf.ones(tf.shape(scores), dtype=tf.float32) return scores, weights
python
def multilabel_accuracy_matchk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones. """ predictions = tf.to_int32(tf.argmax(predictions, axis=-1)) scores = tf.to_float(tf.equal(predictions, labels)) # those label == 0 do not count weights = weights_fn(labels) scores *= weights scores = tf.reduce_sum(scores, axis=[1, 2, 3]) scores = tf.minimum(scores / tf.to_float(k), 1) # every sample count weights = tf.ones(tf.shape(scores), dtype=tf.float32) return scores, weights
[ "def", "multilabel_accuracy_matchk", "(", "predictions", ",", "labels", ",", "k", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "predictions", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "scores", "=", "tf", ".", "to_float", "(", "tf", ".", "equal", "(", "predictions", ",", "labels", ")", ")", "# those label == 0 do not count", "weights", "=", "weights_fn", "(", "labels", ")", "scores", "*=", "weights", "scores", "=", "tf", ".", "reduce_sum", "(", "scores", ",", "axis", "=", "[", "1", ",", "2", ",", "3", "]", ")", "scores", "=", "tf", ".", "minimum", "(", "scores", "/", "tf", ".", "to_float", "(", "k", ")", ",", "1", ")", "# every sample count", "weights", "=", "tf", ".", "ones", "(", "tf", ".", "shape", "(", "scores", ")", ",", "dtype", "=", "tf", ".", "float32", ")", "return", "scores", ",", "weights" ]
Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones.
[ "Used", "to", "evaluate", "the", "VQA", "accuracy", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L313-L343
22,612
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
set_precision
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
python
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
[ "def", "set_precision", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"set_precision\"", ",", "values", "=", "[", "predictions", ",", "labels", "]", ")", ":", "labels", "=", "tf", ".", "squeeze", "(", "labels", ",", "[", "2", ",", "3", "]", ")", "weights", "=", "weights_fn", "(", "labels", ")", "labels", "=", "tf", ".", "one_hot", "(", "labels", ",", "predictions", ".", "shape", "[", "-", "1", "]", ")", "labels", "=", "tf", ".", "reduce_max", "(", "labels", ",", "axis", "=", "1", ")", "labels", "=", "tf", ".", "cast", "(", "labels", ",", "tf", ".", "bool", ")", "return", "tf", ".", "to_float", "(", "tf", ".", "equal", "(", "labels", ",", "predictions", ")", ")", ",", "weights" ]
Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels].
[ "Precision", "of", "set", "predictions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L351-L371
22,613
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
image_summary
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
python
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
[ "def", "image_summary", "(", "predictions", ",", "targets", ",", "hparams", ")", ":", "del", "hparams", "results", "=", "tf", ".", "cast", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ",", "tf", ".", "uint8", ")", "gold", "=", "tf", ".", "cast", "(", "targets", ",", "tf", ".", "uint8", ")", "summary1", "=", "tf", ".", "summary", ".", "image", "(", "\"prediction\"", ",", "results", ",", "max_outputs", "=", "2", ")", "summary2", "=", "tf", ".", "summary", ".", "image", "(", "\"data\"", ",", "gold", ",", "max_outputs", "=", "2", ")", "summary", "=", "tf", ".", "summary", ".", "merge", "(", "[", "summary1", ",", "summary2", "]", ")", "return", "summary", ",", "tf", ".", "zeros_like", "(", "predictions", ")" ]
Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions.
[ "Reshapes", "predictions", "and", "passes", "it", "to", "tensorboard", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L396-L414
22,614
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
softmax_cross_entropy_one_hot
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights """ with tf.variable_scope("softmax_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
python
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights """ with tf.variable_scope("softmax_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
[ "def", "softmax_cross_entropy_one_hot", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"softmax_cross_entropy_one_hot\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "del", "weights_fn", "cross_entropy", "=", "tf", ".", "losses", ".", "softmax_cross_entropy", "(", "onehot_labels", "=", "labels", ",", "logits", "=", "logits", ")", "return", "cross_entropy", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights
[ "Calculate", "softmax", "cross", "entropy", "given", "one", "-", "hot", "labels", "and", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L417-L432
22,615
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
sigmoid_accuracy_one_hot
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): """Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights """ with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): del weights_fn predictions = tf.nn.sigmoid(logits) labels = tf.argmax(labels, -1) predictions = tf.argmax(predictions, -1) _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) return accuracy, tf.constant(1.0)
python
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): """Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights """ with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): del weights_fn predictions = tf.nn.sigmoid(logits) labels = tf.argmax(labels, -1) predictions = tf.argmax(predictions, -1) _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) return accuracy, tf.constant(1.0)
[ "def", "sigmoid_accuracy_one_hot", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"sigmoid_accuracy_one_hot\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "del", "weights_fn", "predictions", "=", "tf", ".", "nn", ".", "sigmoid", "(", "logits", ")", "labels", "=", "tf", ".", "argmax", "(", "labels", ",", "-", "1", ")", "predictions", "=", "tf", ".", "argmax", "(", "predictions", ",", "-", "1", ")", "_", ",", "accuracy", "=", "tf", ".", "metrics", ".", "accuracy", "(", "labels", "=", "labels", ",", "predictions", "=", "predictions", ")", "return", "accuracy", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights
[ "Calculate", "accuracy", "for", "a", "set", "given", "one", "-", "hot", "labels", "and", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L435-L451
22,616
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
sigmoid_recall_one_hot
def sigmoid_recall_one_hot(logits, labels, weights_fn=None): """Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights """ with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]): del weights_fn num_classes = logits.shape[-1] predictions = tf.nn.sigmoid(logits) predictions = tf.argmax(predictions, -1) predictions = tf.one_hot(predictions, num_classes) _, recall = tf.metrics.recall(labels=labels, predictions=predictions) return recall, tf.constant(1.0)
python
def sigmoid_recall_one_hot(logits, labels, weights_fn=None): """Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights """ with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]): del weights_fn num_classes = logits.shape[-1] predictions = tf.nn.sigmoid(logits) predictions = tf.argmax(predictions, -1) predictions = tf.one_hot(predictions, num_classes) _, recall = tf.metrics.recall(labels=labels, predictions=predictions) return recall, tf.constant(1.0)
[ "def", "sigmoid_recall_one_hot", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"sigmoid_recall_one_hot\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "del", "weights_fn", "num_classes", "=", "logits", ".", "shape", "[", "-", "1", "]", "predictions", "=", "tf", ".", "nn", ".", "sigmoid", "(", "logits", ")", "predictions", "=", "tf", ".", "argmax", "(", "predictions", ",", "-", "1", ")", "predictions", "=", "tf", ".", "one_hot", "(", "predictions", ",", "num_classes", ")", "_", ",", "recall", "=", "tf", ".", "metrics", ".", "recall", "(", "labels", "=", "labels", ",", "predictions", "=", "predictions", ")", "return", "recall", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights
[ "Calculate", "recall", "for", "a", "set", "given", "one", "-", "hot", "labels", "and", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L477-L497
22,617
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
sigmoid_cross_entropy_one_hot
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights """ with tf.variable_scope("sigmoid_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.sigmoid_cross_entropy( multi_class_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
python
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights """ with tf.variable_scope("sigmoid_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.sigmoid_cross_entropy( multi_class_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
[ "def", "sigmoid_cross_entropy_one_hot", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"sigmoid_cross_entropy_one_hot\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "del", "weights_fn", "cross_entropy", "=", "tf", ".", "losses", ".", "sigmoid_cross_entropy", "(", "multi_class_labels", "=", "labels", ",", "logits", "=", "logits", ")", "return", "cross_entropy", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights
[ "Calculate", "sigmoid", "cross", "entropy", "for", "one", "-", "hot", "lanels", "and", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L500-L515
22,618
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
roc_auc
def roc_auc(logits, labels, weights_fn=None): """Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights """ del weights_fn with tf.variable_scope("roc_auc", values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) _, auc = tf.metrics.auc(labels, predictions, curve="ROC") return auc, tf.constant(1.0)
python
def roc_auc(logits, labels, weights_fn=None): """Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights """ del weights_fn with tf.variable_scope("roc_auc", values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) _, auc = tf.metrics.auc(labels, predictions, curve="ROC") return auc, tf.constant(1.0)
[ "def", "roc_auc", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "del", "weights_fn", "with", "tf", ".", "variable_scope", "(", "\"roc_auc\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "predictions", "=", "tf", ".", "argmax", "(", "logits", ",", "axis", "=", "-", "1", ")", "_", ",", "auc", "=", "tf", ".", "metrics", ".", "auc", "(", "labels", ",", "predictions", ",", "curve", "=", "\"ROC\"", ")", "return", "auc", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights
[ "Calculate", "ROC", "AUC", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L518-L534
22,619
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
create_evaluation_metrics
def create_evaluation_metrics(problems, model_hparams): """Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum. """ def reduce_dimensions(predictions, labels): """Reduce dimensions for high-dimensional predictions and labels.""" # We will treat first dimensions as batch. One example are video frames. if len(predictions.get_shape()) > 5: predictions_shape = common_layers.shape_list(predictions) predictions = tf.reshape( predictions, [predictions_shape[0], predictions_shape[1], -1, predictions_shape[-1]]) labels_shape = common_layers.shape_list(labels) labels = tf.reshape( labels, [labels_shape[0], labels_shape[1], -1]) return predictions, labels def make_problem_specific_metric_fn(metric_fn, weights_fn): """Create a metric fn.""" def problem_metric_fn(predictions, features, labels): """Metric fn.""" # Send along the entire features dict if the metric fn has the kwarg # "features". kwargs = {} args, _, keywords, _ = inspect.getargspec(metric_fn) if ("features" in args) or keywords: kwargs["features"] = features predictions, labels = reduce_dimensions(predictions, labels) scores, weights = metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs) return tf.metrics.mean(scores, weights) return problem_metric_fn def make_image_wrapped_metric_fn(metric_fn): """Metric fn without tf.metrics.mean.""" def image_wrapped_metric_fn(predictions, features, labels, weights_fn=common_layers.weights_all): del weights_fn del features predictions, labels = reduce_dimensions(predictions, labels) return metric_fn(predictions, labels, model_hparams) return image_wrapped_metric_fn def weights_fn_for_mp(problem_task_id): return lambda x: common_layers.weights_multi_problem(x, problem_task_id) eval_metrics = {} for problem_instance in problems: problem_name = problem_instance.name if problem_instance.was_reversed: problem_name += "_rev" metrics = problem_instance.eval_metric_fns(model_hparams) if hasattr(model_hparams.problem, "task_list"): metrics = model_hparams.problem.eval_metric_fns(model_hparams) tm = problem_instance.get_hparams(model_hparams).modality["targets"] if not isinstance(tm, dict): tm = {"targets": tm} for target_name, modality in six.iteritems(tm): weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(modality)) if hasattr(model_hparams.problem, "task_list"): ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop weights_fn = weights_fn_for_mp(ptid) for metric, metric_fn in six.iteritems(metrics): overload_eval_metric_name = getattr( model_hparams, "overload_eval_metric_name", None) if len(problems) == 1 and overload_eval_metric_name: metric_name = "metrics-%s/%s/%s" % ( overload_eval_metric_name, target_name, metric) else: metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric) if metric == Metrics.IMAGE_SUMMARY: eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn) else: eval_metrics[metric_name] = make_problem_specific_metric_fn( metric_fn, weights_fn) return eval_metrics
python
def create_evaluation_metrics(problems, model_hparams): """Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum. """ def reduce_dimensions(predictions, labels): """Reduce dimensions for high-dimensional predictions and labels.""" # We will treat first dimensions as batch. One example are video frames. if len(predictions.get_shape()) > 5: predictions_shape = common_layers.shape_list(predictions) predictions = tf.reshape( predictions, [predictions_shape[0], predictions_shape[1], -1, predictions_shape[-1]]) labels_shape = common_layers.shape_list(labels) labels = tf.reshape( labels, [labels_shape[0], labels_shape[1], -1]) return predictions, labels def make_problem_specific_metric_fn(metric_fn, weights_fn): """Create a metric fn.""" def problem_metric_fn(predictions, features, labels): """Metric fn.""" # Send along the entire features dict if the metric fn has the kwarg # "features". kwargs = {} args, _, keywords, _ = inspect.getargspec(metric_fn) if ("features" in args) or keywords: kwargs["features"] = features predictions, labels = reduce_dimensions(predictions, labels) scores, weights = metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs) return tf.metrics.mean(scores, weights) return problem_metric_fn def make_image_wrapped_metric_fn(metric_fn): """Metric fn without tf.metrics.mean.""" def image_wrapped_metric_fn(predictions, features, labels, weights_fn=common_layers.weights_all): del weights_fn del features predictions, labels = reduce_dimensions(predictions, labels) return metric_fn(predictions, labels, model_hparams) return image_wrapped_metric_fn def weights_fn_for_mp(problem_task_id): return lambda x: common_layers.weights_multi_problem(x, problem_task_id) eval_metrics = {} for problem_instance in problems: problem_name = problem_instance.name if problem_instance.was_reversed: problem_name += "_rev" metrics = problem_instance.eval_metric_fns(model_hparams) if hasattr(model_hparams.problem, "task_list"): metrics = model_hparams.problem.eval_metric_fns(model_hparams) tm = problem_instance.get_hparams(model_hparams).modality["targets"] if not isinstance(tm, dict): tm = {"targets": tm} for target_name, modality in six.iteritems(tm): weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(modality)) if hasattr(model_hparams.problem, "task_list"): ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop weights_fn = weights_fn_for_mp(ptid) for metric, metric_fn in six.iteritems(metrics): overload_eval_metric_name = getattr( model_hparams, "overload_eval_metric_name", None) if len(problems) == 1 and overload_eval_metric_name: metric_name = "metrics-%s/%s/%s" % ( overload_eval_metric_name, target_name, metric) else: metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric) if metric == Metrics.IMAGE_SUMMARY: eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn) else: eval_metrics[metric_name] = make_problem_specific_metric_fn( metric_fn, weights_fn) return eval_metrics
[ "def", "create_evaluation_metrics", "(", "problems", ",", "model_hparams", ")", ":", "def", "reduce_dimensions", "(", "predictions", ",", "labels", ")", ":", "\"\"\"Reduce dimensions for high-dimensional predictions and labels.\"\"\"", "# We will treat first dimensions as batch. One example are video frames.", "if", "len", "(", "predictions", ".", "get_shape", "(", ")", ")", ">", "5", ":", "predictions_shape", "=", "common_layers", ".", "shape_list", "(", "predictions", ")", "predictions", "=", "tf", ".", "reshape", "(", "predictions", ",", "[", "predictions_shape", "[", "0", "]", ",", "predictions_shape", "[", "1", "]", ",", "-", "1", ",", "predictions_shape", "[", "-", "1", "]", "]", ")", "labels_shape", "=", "common_layers", ".", "shape_list", "(", "labels", ")", "labels", "=", "tf", ".", "reshape", "(", "labels", ",", "[", "labels_shape", "[", "0", "]", ",", "labels_shape", "[", "1", "]", ",", "-", "1", "]", ")", "return", "predictions", ",", "labels", "def", "make_problem_specific_metric_fn", "(", "metric_fn", ",", "weights_fn", ")", ":", "\"\"\"Create a metric fn.\"\"\"", "def", "problem_metric_fn", "(", "predictions", ",", "features", ",", "labels", ")", ":", "\"\"\"Metric fn.\"\"\"", "# Send along the entire features dict if the metric fn has the kwarg", "# \"features\".", "kwargs", "=", "{", "}", "args", ",", "_", ",", "keywords", ",", "_", "=", "inspect", ".", "getargspec", "(", "metric_fn", ")", "if", "(", "\"features\"", "in", "args", ")", "or", "keywords", ":", "kwargs", "[", "\"features\"", "]", "=", "features", "predictions", ",", "labels", "=", "reduce_dimensions", "(", "predictions", ",", "labels", ")", "scores", ",", "weights", "=", "metric_fn", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "weights_fn", ",", "*", "*", "kwargs", ")", "return", "tf", ".", "metrics", ".", "mean", "(", "scores", ",", "weights", ")", "return", "problem_metric_fn", "def", "make_image_wrapped_metric_fn", "(", "metric_fn", ")", ":", "\"\"\"Metric fn without tf.metrics.mean.\"\"\"", "def", "image_wrapped_metric_fn", "(", "predictions", ",", "features", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_all", ")", ":", "del", "weights_fn", "del", "features", "predictions", ",", "labels", "=", "reduce_dimensions", "(", "predictions", ",", "labels", ")", "return", "metric_fn", "(", "predictions", ",", "labels", ",", "model_hparams", ")", "return", "image_wrapped_metric_fn", "def", "weights_fn_for_mp", "(", "problem_task_id", ")", ":", "return", "lambda", "x", ":", "common_layers", ".", "weights_multi_problem", "(", "x", ",", "problem_task_id", ")", "eval_metrics", "=", "{", "}", "for", "problem_instance", "in", "problems", ":", "problem_name", "=", "problem_instance", ".", "name", "if", "problem_instance", ".", "was_reversed", ":", "problem_name", "+=", "\"_rev\"", "metrics", "=", "problem_instance", ".", "eval_metric_fns", "(", "model_hparams", ")", "if", "hasattr", "(", "model_hparams", ".", "problem", ",", "\"task_list\"", ")", ":", "metrics", "=", "model_hparams", ".", "problem", ".", "eval_metric_fns", "(", "model_hparams", ")", "tm", "=", "problem_instance", ".", "get_hparams", "(", "model_hparams", ")", ".", "modality", "[", "\"targets\"", "]", "if", "not", "isinstance", "(", "tm", ",", "dict", ")", ":", "tm", "=", "{", "\"targets\"", ":", "tm", "}", "for", "target_name", ",", "modality", "in", "six", ".", "iteritems", "(", "tm", ")", ":", "weights_fn", "=", "model_hparams", ".", "weights_fn", ".", "get", "(", "\"targets\"", ",", "modalities", ".", "get_weights_fn", "(", "modality", ")", ")", "if", "hasattr", "(", "model_hparams", ".", "problem", ",", "\"task_list\"", ")", ":", "ptid", "=", "problem_instance", ".", "task_id", "# pylint: disable=cell-var-from-loop", "weights_fn", "=", "weights_fn_for_mp", "(", "ptid", ")", "for", "metric", ",", "metric_fn", "in", "six", ".", "iteritems", "(", "metrics", ")", ":", "overload_eval_metric_name", "=", "getattr", "(", "model_hparams", ",", "\"overload_eval_metric_name\"", ",", "None", ")", "if", "len", "(", "problems", ")", "==", "1", "and", "overload_eval_metric_name", ":", "metric_name", "=", "\"metrics-%s/%s/%s\"", "%", "(", "overload_eval_metric_name", ",", "target_name", ",", "metric", ")", "else", ":", "metric_name", "=", "\"metrics-%s/%s/%s\"", "%", "(", "problem_name", ",", "target_name", ",", "metric", ")", "if", "metric", "==", "Metrics", ".", "IMAGE_SUMMARY", ":", "eval_metrics", "[", "metric_name", "]", "=", "make_image_wrapped_metric_fn", "(", "metric_fn", ")", "else", ":", "eval_metrics", "[", "metric_name", "]", "=", "make_problem_specific_metric_fn", "(", "metric_fn", ",", "weights_fn", ")", "return", "eval_metrics" ]
Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum.
[ "Creates", "the", "evaluation", "metrics", "for", "the", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L537-L638
22,620
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
create_eager_metrics_for_problem
def create_eager_metrics_for_problem(problem, model_hparams): """See create_eager_metrics.""" metric_fns = problem.eval_metric_fns(model_hparams) problem_hparams = problem.get_hparams(model_hparams) target_modality = problem_hparams.modality["targets"] weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(target_modality)) return create_eager_metrics_internal(metric_fns, weights_fn=weights_fn)
python
def create_eager_metrics_for_problem(problem, model_hparams): """See create_eager_metrics.""" metric_fns = problem.eval_metric_fns(model_hparams) problem_hparams = problem.get_hparams(model_hparams) target_modality = problem_hparams.modality["targets"] weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(target_modality)) return create_eager_metrics_internal(metric_fns, weights_fn=weights_fn)
[ "def", "create_eager_metrics_for_problem", "(", "problem", ",", "model_hparams", ")", ":", "metric_fns", "=", "problem", ".", "eval_metric_fns", "(", "model_hparams", ")", "problem_hparams", "=", "problem", ".", "get_hparams", "(", "model_hparams", ")", "target_modality", "=", "problem_hparams", ".", "modality", "[", "\"targets\"", "]", "weights_fn", "=", "model_hparams", ".", "weights_fn", ".", "get", "(", "\"targets\"", ",", "modalities", ".", "get_weights_fn", "(", "target_modality", ")", ")", "return", "create_eager_metrics_internal", "(", "metric_fns", ",", "weights_fn", "=", "weights_fn", ")" ]
See create_eager_metrics.
[ "See", "create_eager_metrics", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L641-L649
22,621
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
word_error_rate
def word_error_rate(raw_predictions, labels, lookup=None, weights_fn=common_layers.weights_nonzero): """Calculate word error rate. Args: raw_predictions: The raw predictions. labels: The actual labels. lookup: A tf.constant mapping indices to output tokens. weights_fn: Weighting function. Returns: The word error rate. """ def from_tokens(raw, lookup_): gathered = tf.gather(lookup_, tf.cast(raw, tf.int32)) joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b"<EOS>.*", b"") cleaned = tf.regex_replace(joined, b"_", b" ") tokens = tf.string_split(cleaned, " ") return tokens def from_characters(raw, lookup_): """Convert ascii+2 encoded codes to string-tokens.""" corrected = tf.bitcast( tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8) gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0] joined = tf.reduce_join(gathered, axis=1) cleaned = tf.regex_replace(joined, b"\0", b"") tokens = tf.string_split(cleaned, " ") return tokens if lookup is None: lookup = tf.constant([chr(i) for i in range(256)]) convert_fn = from_characters else: convert_fn = from_tokens if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("word_error_rate", values=[raw_predictions, labels]): raw_predictions = tf.squeeze( tf.argmax(raw_predictions, axis=-1), axis=(2, 3)) labels = tf.squeeze(labels, axis=(2, 3)) reference = convert_fn(labels, lookup) predictions = convert_fn(raw_predictions, lookup) distance = tf.reduce_sum( tf.edit_distance(predictions, reference, normalize=False)) reference_length = tf.cast( tf.size(reference.values, out_type=tf.int32), dtype=tf.float32) return distance / reference_length, reference_length
python
def word_error_rate(raw_predictions, labels, lookup=None, weights_fn=common_layers.weights_nonzero): """Calculate word error rate. Args: raw_predictions: The raw predictions. labels: The actual labels. lookup: A tf.constant mapping indices to output tokens. weights_fn: Weighting function. Returns: The word error rate. """ def from_tokens(raw, lookup_): gathered = tf.gather(lookup_, tf.cast(raw, tf.int32)) joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b"<EOS>.*", b"") cleaned = tf.regex_replace(joined, b"_", b" ") tokens = tf.string_split(cleaned, " ") return tokens def from_characters(raw, lookup_): """Convert ascii+2 encoded codes to string-tokens.""" corrected = tf.bitcast( tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8) gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0] joined = tf.reduce_join(gathered, axis=1) cleaned = tf.regex_replace(joined, b"\0", b"") tokens = tf.string_split(cleaned, " ") return tokens if lookup is None: lookup = tf.constant([chr(i) for i in range(256)]) convert_fn = from_characters else: convert_fn = from_tokens if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("word_error_rate", values=[raw_predictions, labels]): raw_predictions = tf.squeeze( tf.argmax(raw_predictions, axis=-1), axis=(2, 3)) labels = tf.squeeze(labels, axis=(2, 3)) reference = convert_fn(labels, lookup) predictions = convert_fn(raw_predictions, lookup) distance = tf.reduce_sum( tf.edit_distance(predictions, reference, normalize=False)) reference_length = tf.cast( tf.size(reference.values, out_type=tf.int32), dtype=tf.float32) return distance / reference_length, reference_length
[ "def", "word_error_rate", "(", "raw_predictions", ",", "labels", ",", "lookup", "=", "None", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "def", "from_tokens", "(", "raw", ",", "lookup_", ")", ":", "gathered", "=", "tf", ".", "gather", "(", "lookup_", ",", "tf", ".", "cast", "(", "raw", ",", "tf", ".", "int32", ")", ")", "joined", "=", "tf", ".", "regex_replace", "(", "tf", ".", "reduce_join", "(", "gathered", ",", "axis", "=", "1", ")", ",", "b\"<EOS>.*\"", ",", "b\"\"", ")", "cleaned", "=", "tf", ".", "regex_replace", "(", "joined", ",", "b\"_\"", ",", "b\" \"", ")", "tokens", "=", "tf", ".", "string_split", "(", "cleaned", ",", "\" \"", ")", "return", "tokens", "def", "from_characters", "(", "raw", ",", "lookup_", ")", ":", "\"\"\"Convert ascii+2 encoded codes to string-tokens.\"\"\"", "corrected", "=", "tf", ".", "bitcast", "(", "tf", ".", "clip_by_value", "(", "tf", ".", "subtract", "(", "raw", ",", "2", ")", ",", "0", ",", "255", ")", ",", "tf", ".", "uint8", ")", "gathered", "=", "tf", ".", "gather", "(", "lookup_", ",", "tf", ".", "cast", "(", "corrected", ",", "tf", ".", "int32", ")", ")", "[", ":", ",", ":", ",", "0", "]", "joined", "=", "tf", ".", "reduce_join", "(", "gathered", ",", "axis", "=", "1", ")", "cleaned", "=", "tf", ".", "regex_replace", "(", "joined", ",", "b\"\\0\"", ",", "b\"\"", ")", "tokens", "=", "tf", ".", "string_split", "(", "cleaned", ",", "\" \"", ")", "return", "tokens", "if", "lookup", "is", "None", ":", "lookup", "=", "tf", ".", "constant", "(", "[", "chr", "(", "i", ")", "for", "i", "in", "range", "(", "256", ")", "]", ")", "convert_fn", "=", "from_characters", "else", ":", "convert_fn", "=", "from_tokens", "if", "weights_fn", "is", "not", "common_layers", ".", "weights_nonzero", ":", "raise", "ValueError", "(", "\"Only weights_nonzero can be used for this metric.\"", ")", "with", "tf", ".", "variable_scope", "(", "\"word_error_rate\"", ",", "values", "=", "[", "raw_predictions", ",", "labels", "]", ")", ":", "raw_predictions", "=", "tf", ".", "squeeze", "(", "tf", ".", "argmax", "(", "raw_predictions", ",", "axis", "=", "-", "1", ")", ",", "axis", "=", "(", "2", ",", "3", ")", ")", "labels", "=", "tf", ".", "squeeze", "(", "labels", ",", "axis", "=", "(", "2", ",", "3", ")", ")", "reference", "=", "convert_fn", "(", "labels", ",", "lookup", ")", "predictions", "=", "convert_fn", "(", "raw_predictions", ",", "lookup", ")", "distance", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "edit_distance", "(", "predictions", ",", "reference", ",", "normalize", "=", "False", ")", ")", "reference_length", "=", "tf", ".", "cast", "(", "tf", ".", "size", "(", "reference", ".", "values", ",", "out_type", "=", "tf", ".", "int32", ")", ",", "dtype", "=", "tf", ".", "float32", ")", "return", "distance", "/", "reference_length", ",", "reference_length" ]
Calculate word error rate. Args: raw_predictions: The raw predictions. labels: The actual labels. lookup: A tf.constant mapping indices to output tokens. weights_fn: Weighting function. Returns: The word error rate.
[ "Calculate", "word", "error", "rate", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L704-L761
22,622
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
pearson_correlation_coefficient
def pearson_correlation_coefficient(predictions, labels, weights_fn=None): """Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient. """ del weights_fn _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions, labels) return pearson, tf.constant(1.0)
python
def pearson_correlation_coefficient(predictions, labels, weights_fn=None): """Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient. """ del weights_fn _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions, labels) return pearson, tf.constant(1.0)
[ "def", "pearson_correlation_coefficient", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "del", "weights_fn", "_", ",", "pearson", "=", "tf", ".", "contrib", ".", "metrics", ".", "streaming_pearson_correlation", "(", "predictions", ",", "labels", ")", "return", "pearson", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient.
[ "Calculate", "pearson", "correlation", "coefficient", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L764-L778
22,623
tensorflow/tensor2tensor
tensor2tensor/models/research/attention_lm.py
attention_lm_decoder
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name="decoder"): """A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors """ x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = common_layers.conv_hidden_relu( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
python
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name="decoder"): """A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors """ x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = common_layers.conv_hidden_relu( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
[ "def", "attention_lm_decoder", "(", "decoder_input", ",", "decoder_self_attention_bias", ",", "hparams", ",", "name", "=", "\"decoder\"", ")", ":", "x", "=", "decoder_input", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "for", "layer", "in", "range", "(", "hparams", ".", "num_hidden_layers", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"layer_%d\"", "%", "layer", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"self_attention\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "None", ",", "decoder_self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "with", "tf", ".", "variable_scope", "(", "\"ffn\"", ")", ":", "y", "=", "common_layers", ".", "conv_hidden_relu", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ".", "filter_size", ",", "hparams", ".", "hidden_size", ",", "dropout", "=", "hparams", ".", "relu_dropout", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "return", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")" ]
A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
[ "A", "stack", "of", "attention_lm", "layers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm.py#L92-L127
22,624
tensorflow/tensor2tensor
tensor2tensor/models/research/attention_lm.py
attention_lm_small
def attention_lm_small(): """Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object. """ hparams = attention_lm_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.5 return hparams
python
def attention_lm_small(): """Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object. """ hparams = attention_lm_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.5 return hparams
[ "def", "attention_lm_small", "(", ")", ":", "hparams", "=", "attention_lm_base", "(", ")", "hparams", ".", "num_hidden_layers", "=", "4", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.5", "return", "hparams" ]
Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object.
[ "Cheap", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm.py#L167-L182
22,625
tensorflow/tensor2tensor
tensor2tensor/utils/bleu_hook.py
bleu_score
def bleu_score(predictions, labels, **unused_kwargs): """BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0)
python
def bleu_score(predictions, labels, **unused_kwargs): """BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0)
[ "def", "bleu_score", "(", "predictions", ",", "labels", ",", "*", "*", "unused_kwargs", ")", ":", "outputs", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "# Convert the outputs and labels to a [batch_size, input_length] tensor.", "outputs", "=", "tf", ".", "squeeze", "(", "outputs", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", "labels", "=", "tf", ".", "squeeze", "(", "labels", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", "bleu", "=", "tf", ".", "py_func", "(", "compute_bleu", ",", "(", "labels", ",", "outputs", ")", ",", "tf", ".", "float32", ")", "return", "bleu", ",", "tf", ".", "constant", "(", "1.0", ")" ]
BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score
[ "BLEU", "score", "computation", "between", "labels", "and", "predictions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/bleu_hook.py#L132-L152
22,626
tensorflow/tensor2tensor
tensor2tensor/utils/bleu_hook.py
bleu_tokenize
def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens """ string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) string = uregex.punct_nondigit_re.sub(r" \1 \2", string) string = uregex.symbol_re.sub(r" \1 ", string) return string.split()
python
def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens """ string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) string = uregex.punct_nondigit_re.sub(r" \1 \2", string) string = uregex.symbol_re.sub(r" \1 ", string) return string.split()
[ "def", "bleu_tokenize", "(", "string", ")", ":", "string", "=", "uregex", ".", "nondigit_punct_re", ".", "sub", "(", "r\"\\1 \\2 \"", ",", "string", ")", "string", "=", "uregex", ".", "punct_nondigit_re", ".", "sub", "(", "r\" \\1 \\2\"", ",", "string", ")", "string", "=", "uregex", ".", "symbol_re", ".", "sub", "(", "r\" \\1 \"", ",", "string", ")", "return", "string", ".", "split", "(", ")" ]
r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens
[ "r", "Tokenize", "a", "string", "following", "the", "official", "BLEU", "implementation", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/bleu_hook.py#L172-L199
22,627
tensorflow/tensor2tensor
tensor2tensor/utils/bleu_hook.py
_try_twice_tf_glob
def _try_twice_tf_glob(pattern): """Glob twice, first time possibly catching `NotFoundError`. tf.gfile.Glob may crash with ``` tensorflow.python.framework.errors_impl.NotFoundError: xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; No such file or directory ``` Standard glob.glob does not have this bug, but does not handle multiple filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly catching the `NotFoundError`. Args: pattern: str, glob pattern. Returns: list<str> matching filepaths. """ try: return tf.gfile.Glob(pattern) except tf.errors.NotFoundError: return tf.gfile.Glob(pattern)
python
def _try_twice_tf_glob(pattern): """Glob twice, first time possibly catching `NotFoundError`. tf.gfile.Glob may crash with ``` tensorflow.python.framework.errors_impl.NotFoundError: xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; No such file or directory ``` Standard glob.glob does not have this bug, but does not handle multiple filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly catching the `NotFoundError`. Args: pattern: str, glob pattern. Returns: list<str> matching filepaths. """ try: return tf.gfile.Glob(pattern) except tf.errors.NotFoundError: return tf.gfile.Glob(pattern)
[ "def", "_try_twice_tf_glob", "(", "pattern", ")", ":", "try", ":", "return", "tf", ".", "gfile", ".", "Glob", "(", "pattern", ")", "except", "tf", ".", "errors", ".", "NotFoundError", ":", "return", "tf", ".", "gfile", ".", "Glob", "(", "pattern", ")" ]
Glob twice, first time possibly catching `NotFoundError`. tf.gfile.Glob may crash with ``` tensorflow.python.framework.errors_impl.NotFoundError: xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; No such file or directory ``` Standard glob.glob does not have this bug, but does not handle multiple filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly catching the `NotFoundError`. Args: pattern: str, glob pattern. Returns: list<str> matching filepaths.
[ "Glob", "twice", "first", "time", "possibly", "catching", "NotFoundError", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/bleu_hook.py#L221-L245
22,628
tensorflow/tensor2tensor
tensor2tensor/utils/bleu_hook.py
_read_stepfiles_list
def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0): """Return list of StepFiles sorted by step from files at path_prefix.""" stepfiles = [] for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix): basename = filename[:-len(path_suffix)] if path_suffix else filename try: steps = int(basename.rsplit("-")[-1]) except ValueError: # The -[0-9]* part is not an integer. continue if steps < min_steps: continue if not os.path.exists(filename): tf.logging.info(filename + " was deleted, so skipping it") continue stepfiles.append(StepFile(basename, os.path.getmtime(filename), os.path.getctime(filename), steps)) return sorted(stepfiles, key=lambda x: -x.steps)
python
def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0): """Return list of StepFiles sorted by step from files at path_prefix.""" stepfiles = [] for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix): basename = filename[:-len(path_suffix)] if path_suffix else filename try: steps = int(basename.rsplit("-")[-1]) except ValueError: # The -[0-9]* part is not an integer. continue if steps < min_steps: continue if not os.path.exists(filename): tf.logging.info(filename + " was deleted, so skipping it") continue stepfiles.append(StepFile(basename, os.path.getmtime(filename), os.path.getctime(filename), steps)) return sorted(stepfiles, key=lambda x: -x.steps)
[ "def", "_read_stepfiles_list", "(", "path_prefix", ",", "path_suffix", "=", "\".index\"", ",", "min_steps", "=", "0", ")", ":", "stepfiles", "=", "[", "]", "for", "filename", "in", "_try_twice_tf_glob", "(", "path_prefix", "+", "\"*-[0-9]*\"", "+", "path_suffix", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "path_suffix", ")", "]", "if", "path_suffix", "else", "filename", "try", ":", "steps", "=", "int", "(", "basename", ".", "rsplit", "(", "\"-\"", ")", "[", "-", "1", "]", ")", "except", "ValueError", ":", "# The -[0-9]* part is not an integer.", "continue", "if", "steps", "<", "min_steps", ":", "continue", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "tf", ".", "logging", ".", "info", "(", "filename", "+", "\" was deleted, so skipping it\"", ")", "continue", "stepfiles", ".", "append", "(", "StepFile", "(", "basename", ",", "os", ".", "path", ".", "getmtime", "(", "filename", ")", ",", "os", ".", "path", ".", "getctime", "(", "filename", ")", ",", "steps", ")", ")", "return", "sorted", "(", "stepfiles", ",", "key", "=", "lambda", "x", ":", "-", "x", ".", "steps", ")" ]
Return list of StepFiles sorted by step from files at path_prefix.
[ "Return", "list", "of", "StepFiles", "sorted", "by", "step", "from", "files", "at", "path_prefix", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/bleu_hook.py#L248-L264
22,629
tensorflow/tensor2tensor
tensor2tensor/utils/bleu_hook.py
stepfiles_iterator
def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0, path_suffix=".index", sleep_sec=10): """Continuously yield new files with steps in filename as they appear. This is useful for checkpoint files or other files whose names differ just in an integer marking the number of steps and match the wildcard path_prefix + "*-[0-9]*" + path_suffix. Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always starts from the oldest files (and it cannot miss any file). Note that the oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is up to the user to check that the files returned by this generator actually exist. Args: path_prefix: The directory + possible common filename prefix to the files. wait_minutes: The maximum amount of minutes to wait between files. min_steps: Skip files with lower global step. path_suffix: Common filename suffix (after steps), including possible extension dot. sleep_sec: How often to check for new files. Yields: named tuples (filename, mtime, ctime, steps) of the files as they arrive. """ # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let # path_prefix="D/". if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix): path_prefix += os.sep stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) tf.logging.info("Found %d files with steps: %s", len(stepfiles), ", ".join(str(x.steps) for x in reversed(stepfiles))) exit_time = time.time() + wait_minutes * 60 while True: if not stepfiles and wait_minutes: tf.logging.info( "Waiting till %s if a new file matching %s*-[0-9]*%s appears", time.asctime(time.localtime(exit_time)), path_prefix, path_suffix) while True: stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) if stepfiles or time.time() > exit_time: break time.sleep(sleep_sec) if not stepfiles: return stepfile = stepfiles.pop() exit_time, min_steps = (stepfile.ctime + wait_minutes * 60, stepfile.steps + 1) yield stepfile
python
def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0, path_suffix=".index", sleep_sec=10): """Continuously yield new files with steps in filename as they appear. This is useful for checkpoint files or other files whose names differ just in an integer marking the number of steps and match the wildcard path_prefix + "*-[0-9]*" + path_suffix. Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always starts from the oldest files (and it cannot miss any file). Note that the oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is up to the user to check that the files returned by this generator actually exist. Args: path_prefix: The directory + possible common filename prefix to the files. wait_minutes: The maximum amount of minutes to wait between files. min_steps: Skip files with lower global step. path_suffix: Common filename suffix (after steps), including possible extension dot. sleep_sec: How often to check for new files. Yields: named tuples (filename, mtime, ctime, steps) of the files as they arrive. """ # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let # path_prefix="D/". if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix): path_prefix += os.sep stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) tf.logging.info("Found %d files with steps: %s", len(stepfiles), ", ".join(str(x.steps) for x in reversed(stepfiles))) exit_time = time.time() + wait_minutes * 60 while True: if not stepfiles and wait_minutes: tf.logging.info( "Waiting till %s if a new file matching %s*-[0-9]*%s appears", time.asctime(time.localtime(exit_time)), path_prefix, path_suffix) while True: stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) if stepfiles or time.time() > exit_time: break time.sleep(sleep_sec) if not stepfiles: return stepfile = stepfiles.pop() exit_time, min_steps = (stepfile.ctime + wait_minutes * 60, stepfile.steps + 1) yield stepfile
[ "def", "stepfiles_iterator", "(", "path_prefix", ",", "wait_minutes", "=", "0", ",", "min_steps", "=", "0", ",", "path_suffix", "=", "\".index\"", ",", "sleep_sec", "=", "10", ")", ":", "# Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let", "# path_prefix=\"D/\".", "if", "not", "path_prefix", ".", "endswith", "(", "os", ".", "sep", ")", "and", "os", ".", "path", ".", "isdir", "(", "path_prefix", ")", ":", "path_prefix", "+=", "os", ".", "sep", "stepfiles", "=", "_read_stepfiles_list", "(", "path_prefix", ",", "path_suffix", ",", "min_steps", ")", "tf", ".", "logging", ".", "info", "(", "\"Found %d files with steps: %s\"", ",", "len", "(", "stepfiles", ")", ",", "\", \"", ".", "join", "(", "str", "(", "x", ".", "steps", ")", "for", "x", "in", "reversed", "(", "stepfiles", ")", ")", ")", "exit_time", "=", "time", ".", "time", "(", ")", "+", "wait_minutes", "*", "60", "while", "True", ":", "if", "not", "stepfiles", "and", "wait_minutes", ":", "tf", ".", "logging", ".", "info", "(", "\"Waiting till %s if a new file matching %s*-[0-9]*%s appears\"", ",", "time", ".", "asctime", "(", "time", ".", "localtime", "(", "exit_time", ")", ")", ",", "path_prefix", ",", "path_suffix", ")", "while", "True", ":", "stepfiles", "=", "_read_stepfiles_list", "(", "path_prefix", ",", "path_suffix", ",", "min_steps", ")", "if", "stepfiles", "or", "time", ".", "time", "(", ")", ">", "exit_time", ":", "break", "time", ".", "sleep", "(", "sleep_sec", ")", "if", "not", "stepfiles", ":", "return", "stepfile", "=", "stepfiles", ".", "pop", "(", ")", "exit_time", ",", "min_steps", "=", "(", "stepfile", ".", "ctime", "+", "wait_minutes", "*", "60", ",", "stepfile", ".", "steps", "+", "1", ")", "yield", "stepfile" ]
Continuously yield new files with steps in filename as they appear. This is useful for checkpoint files or other files whose names differ just in an integer marking the number of steps and match the wildcard path_prefix + "*-[0-9]*" + path_suffix. Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always starts from the oldest files (and it cannot miss any file). Note that the oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is up to the user to check that the files returned by this generator actually exist. Args: path_prefix: The directory + possible common filename prefix to the files. wait_minutes: The maximum amount of minutes to wait between files. min_steps: Skip files with lower global step. path_suffix: Common filename suffix (after steps), including possible extension dot. sleep_sec: How often to check for new files. Yields: named tuples (filename, mtime, ctime, steps) of the files as they arrive.
[ "Continuously", "yield", "new", "files", "with", "steps", "in", "filename", "as", "they", "appear", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/bleu_hook.py#L267-L317
22,630
tensorflow/tensor2tensor
tensor2tensor/data_generators/vqa.py
_get_vqa_v2_annotations
def _get_vqa_v2_annotations(directory, annotation_url, annotation_filename="vqa_v2.tar.gz"): """Extract the VQA V2 annotation files to directory unless it's there.""" annotation_file = generator_utils.maybe_download_from_drive( directory, annotation_filename, annotation_url) with tarfile.open(annotation_file, "r:gz") as annotation_tar: annotation_tar.extractall(directory)
python
def _get_vqa_v2_annotations(directory, annotation_url, annotation_filename="vqa_v2.tar.gz"): """Extract the VQA V2 annotation files to directory unless it's there.""" annotation_file = generator_utils.maybe_download_from_drive( directory, annotation_filename, annotation_url) with tarfile.open(annotation_file, "r:gz") as annotation_tar: annotation_tar.extractall(directory)
[ "def", "_get_vqa_v2_annotations", "(", "directory", ",", "annotation_url", ",", "annotation_filename", "=", "\"vqa_v2.tar.gz\"", ")", ":", "annotation_file", "=", "generator_utils", ".", "maybe_download_from_drive", "(", "directory", ",", "annotation_filename", ",", "annotation_url", ")", "with", "tarfile", ".", "open", "(", "annotation_file", ",", "\"r:gz\"", ")", "as", "annotation_tar", ":", "annotation_tar", ".", "extractall", "(", "directory", ")" ]
Extract the VQA V2 annotation files to directory unless it's there.
[ "Extract", "the", "VQA", "V2", "annotation", "files", "to", "directory", "unless", "it", "s", "there", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa.py#L44-L51
22,631
tensorflow/tensor2tensor
tensor2tensor/data_generators/vqa.py
_get_vqa_v2_image_raw_dataset
def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls): """Extract the VQA V2 image data set to directory unless it's there.""" for url in image_urls: filename = os.path.basename(url) download_url = os.path.join(image_root_url, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os.path.join(directory, filename.strip(".zip")) if not tf.gfile.Exists(unzip_dir): zipfile.ZipFile(path, "r").extractall(directory)
python
def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls): """Extract the VQA V2 image data set to directory unless it's there.""" for url in image_urls: filename = os.path.basename(url) download_url = os.path.join(image_root_url, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os.path.join(directory, filename.strip(".zip")) if not tf.gfile.Exists(unzip_dir): zipfile.ZipFile(path, "r").extractall(directory)
[ "def", "_get_vqa_v2_image_raw_dataset", "(", "directory", ",", "image_root_url", ",", "image_urls", ")", ":", "for", "url", "in", "image_urls", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "download_url", "=", "os", ".", "path", ".", "join", "(", "image_root_url", ",", "url", ")", "path", "=", "generator_utils", ".", "maybe_download", "(", "directory", ",", "filename", ",", "download_url", ")", "unzip_dir", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ".", "strip", "(", "\".zip\"", ")", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "unzip_dir", ")", ":", "zipfile", ".", "ZipFile", "(", "path", ",", "\"r\"", ")", ".", "extractall", "(", "directory", ")" ]
Extract the VQA V2 image data set to directory unless it's there.
[ "Extract", "the", "VQA", "V2", "image", "data", "set", "to", "directory", "unless", "it", "s", "there", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa.py#L54-L62
22,632
tensorflow/tensor2tensor
tensor2tensor/data_generators/vqa.py
_get_vqa_v2_image_feature_dataset
def _get_vqa_v2_image_feature_dataset( directory, feature_url, feature_filename="mscoco_feat.tar.gz"): """Extract the VQA V2 feature data set to directory unless it's there.""" feature_file = generator_utils.maybe_download_from_drive( directory, feature_filename, feature_url) with tarfile.open(feature_file, "r:gz") as feature_tar: feature_tar.extractall(directory)
python
def _get_vqa_v2_image_feature_dataset( directory, feature_url, feature_filename="mscoco_feat.tar.gz"): """Extract the VQA V2 feature data set to directory unless it's there.""" feature_file = generator_utils.maybe_download_from_drive( directory, feature_filename, feature_url) with tarfile.open(feature_file, "r:gz") as feature_tar: feature_tar.extractall(directory)
[ "def", "_get_vqa_v2_image_feature_dataset", "(", "directory", ",", "feature_url", ",", "feature_filename", "=", "\"mscoco_feat.tar.gz\"", ")", ":", "feature_file", "=", "generator_utils", ".", "maybe_download_from_drive", "(", "directory", ",", "feature_filename", ",", "feature_url", ")", "with", "tarfile", ".", "open", "(", "feature_file", ",", "\"r:gz\"", ")", "as", "feature_tar", ":", "feature_tar", ".", "extractall", "(", "directory", ")" ]
Extract the VQA V2 feature data set to directory unless it's there.
[ "Extract", "the", "VQA", "V2", "feature", "data", "set", "to", "directory", "unless", "it", "s", "there", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa.py#L65-L71
22,633
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
_parse_fail
def _parse_fail(name, var_type, value, values): """Helper function for raising a value error for bad assignment.""" raise ValueError( 'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' % (name, var_type.__name__, value, values))
python
def _parse_fail(name, var_type, value, values): """Helper function for raising a value error for bad assignment.""" raise ValueError( 'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' % (name, var_type.__name__, value, values))
[ "def", "_parse_fail", "(", "name", ",", "var_type", ",", "value", ",", "values", ")", ":", "raise", "ValueError", "(", "'Could not parse hparam \\'%s\\' of type \\'%s\\' with value \\'%s\\' in %s'", "%", "(", "name", ",", "var_type", ".", "__name__", ",", "value", ",", "values", ")", ")" ]
Helper function for raising a value error for bad assignment.
[ "Helper", "function", "for", "raising", "a", "value", "error", "for", "bad", "assignment", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L42-L46
22,634
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
_process_scalar_value
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used. """ try: parsed_value = parse_fn(m_dict['val']) except ValueError: _parse_fail(name, var_type, m_dict['val'], values) # If no index is provided if not m_dict['index']: if name in results_dictionary: _reuse_fail(name, values) results_dictionary[name] = parsed_value else: if name in results_dictionary: # The name has already been used as a scalar, then it # will be in this dictionary and map to a non-dictionary. if not isinstance(results_dictionary.get(name), dict): _reuse_fail(name, values) else: results_dictionary[name] = {} index = int(m_dict['index']) # Make sure the index position hasn't already been assigned a value. if index in results_dictionary[name]: _reuse_fail('{}[{}]'.format(name, index), values) results_dictionary[name][index] = parsed_value
python
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used. """ try: parsed_value = parse_fn(m_dict['val']) except ValueError: _parse_fail(name, var_type, m_dict['val'], values) # If no index is provided if not m_dict['index']: if name in results_dictionary: _reuse_fail(name, values) results_dictionary[name] = parsed_value else: if name in results_dictionary: # The name has already been used as a scalar, then it # will be in this dictionary and map to a non-dictionary. if not isinstance(results_dictionary.get(name), dict): _reuse_fail(name, values) else: results_dictionary[name] = {} index = int(m_dict['index']) # Make sure the index position hasn't already been assigned a value. if index in results_dictionary[name]: _reuse_fail('{}[{}]'.format(name, index), values) results_dictionary[name][index] = parsed_value
[ "def", "_process_scalar_value", "(", "name", ",", "parse_fn", ",", "var_type", ",", "m_dict", ",", "values", ",", "results_dictionary", ")", ":", "try", ":", "parsed_value", "=", "parse_fn", "(", "m_dict", "[", "'val'", "]", ")", "except", "ValueError", ":", "_parse_fail", "(", "name", ",", "var_type", ",", "m_dict", "[", "'val'", "]", ",", "values", ")", "# If no index is provided", "if", "not", "m_dict", "[", "'index'", "]", ":", "if", "name", "in", "results_dictionary", ":", "_reuse_fail", "(", "name", ",", "values", ")", "results_dictionary", "[", "name", "]", "=", "parsed_value", "else", ":", "if", "name", "in", "results_dictionary", ":", "# The name has already been used as a scalar, then it", "# will be in this dictionary and map to a non-dictionary.", "if", "not", "isinstance", "(", "results_dictionary", ".", "get", "(", "name", ")", ",", "dict", ")", ":", "_reuse_fail", "(", "name", ",", "values", ")", "else", ":", "results_dictionary", "[", "name", "]", "=", "{", "}", "index", "=", "int", "(", "m_dict", "[", "'index'", "]", ")", "# Make sure the index position hasn't already been assigned a value.", "if", "index", "in", "results_dictionary", "[", "name", "]", ":", "_reuse_fail", "(", "'{}[{}]'", ".", "format", "(", "name", ",", "index", ")", ",", "values", ")", "results_dictionary", "[", "name", "]", "[", "index", "]", "=", "parsed_value" ]
Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used.
[ "Update", "results_dictionary", "with", "a", "scalar", "value", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L55-L101
22,635
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
_process_list_value
def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary from a list of values. Used to update results_dictionary to be returned by parse_values when encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) Mutates results_dictionary. Args: name: Name of variable in assignment ("arr"). parse_fn: Function for parsing individual values. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has an index or the values cannot be parsed. """ if m_dict['index'] is not None: raise ValueError('Assignment of a list to a list index.') elements = filter(None, re.split('[ ,]', m_dict['vals'])) # Make sure the name hasn't already been assigned a value if name in results_dictionary: raise _reuse_fail(name, values) try: results_dictionary[name] = [parse_fn(e) for e in elements] except ValueError: _parse_fail(name, var_type, m_dict['vals'], values)
python
def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary from a list of values. Used to update results_dictionary to be returned by parse_values when encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) Mutates results_dictionary. Args: name: Name of variable in assignment ("arr"). parse_fn: Function for parsing individual values. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has an index or the values cannot be parsed. """ if m_dict['index'] is not None: raise ValueError('Assignment of a list to a list index.') elements = filter(None, re.split('[ ,]', m_dict['vals'])) # Make sure the name hasn't already been assigned a value if name in results_dictionary: raise _reuse_fail(name, values) try: results_dictionary[name] = [parse_fn(e) for e in elements] except ValueError: _parse_fail(name, var_type, m_dict['vals'], values)
[ "def", "_process_list_value", "(", "name", ",", "parse_fn", ",", "var_type", ",", "m_dict", ",", "values", ",", "results_dictionary", ")", ":", "if", "m_dict", "[", "'index'", "]", "is", "not", "None", ":", "raise", "ValueError", "(", "'Assignment of a list to a list index.'", ")", "elements", "=", "filter", "(", "None", ",", "re", ".", "split", "(", "'[ ,]'", ",", "m_dict", "[", "'vals'", "]", ")", ")", "# Make sure the name hasn't already been assigned a value", "if", "name", "in", "results_dictionary", ":", "raise", "_reuse_fail", "(", "name", ",", "values", ")", "try", ":", "results_dictionary", "[", "name", "]", "=", "[", "parse_fn", "(", "e", ")", "for", "e", "in", "elements", "]", "except", "ValueError", ":", "_parse_fail", "(", "name", ",", "var_type", ",", "m_dict", "[", "'vals'", "]", ",", "values", ")" ]
Update results_dictionary from a list of values. Used to update results_dictionary to be returned by parse_values when encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) Mutates results_dictionary. Args: name: Name of variable in assignment ("arr"). parse_fn: Function for parsing individual values. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has an index or the values cannot be parsed.
[ "Update", "results_dictionary", "from", "a", "list", "of", "values", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L104-L135
22,636
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
_cast_to_type_if_compatible
def _cast_to_type_if_compatible(name, param_type, value): """Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type. """ fail_msg = ( "Could not cast hparam '%s' of type '%s' from value %r" % (name, param_type, value)) # Some callers use None, for which we can't do any casting/checking. :( if issubclass(param_type, type(None)): return value # Avoid converting a non-string type to a string. if (issubclass(param_type, (six.string_types, six.binary_type)) and not isinstance(value, (six.string_types, six.binary_type))): raise ValueError(fail_msg) # Avoid converting a number or string type to a boolean or vice versa. if issubclass(param_type, bool) != isinstance(value, bool): raise ValueError(fail_msg) # Avoid converting float to an integer (the reverse is fine). if (issubclass(param_type, numbers.Integral) and not isinstance(value, numbers.Integral)): raise ValueError(fail_msg) # Avoid converting a non-numeric type to a numeric type. if (issubclass(param_type, numbers.Number) and not isinstance(value, numbers.Number)): raise ValueError(fail_msg) return param_type(value)
python
def _cast_to_type_if_compatible(name, param_type, value): """Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type. """ fail_msg = ( "Could not cast hparam '%s' of type '%s' from value %r" % (name, param_type, value)) # Some callers use None, for which we can't do any casting/checking. :( if issubclass(param_type, type(None)): return value # Avoid converting a non-string type to a string. if (issubclass(param_type, (six.string_types, six.binary_type)) and not isinstance(value, (six.string_types, six.binary_type))): raise ValueError(fail_msg) # Avoid converting a number or string type to a boolean or vice versa. if issubclass(param_type, bool) != isinstance(value, bool): raise ValueError(fail_msg) # Avoid converting float to an integer (the reverse is fine). if (issubclass(param_type, numbers.Integral) and not isinstance(value, numbers.Integral)): raise ValueError(fail_msg) # Avoid converting a non-numeric type to a numeric type. if (issubclass(param_type, numbers.Number) and not isinstance(value, numbers.Number)): raise ValueError(fail_msg) return param_type(value)
[ "def", "_cast_to_type_if_compatible", "(", "name", ",", "param_type", ",", "value", ")", ":", "fail_msg", "=", "(", "\"Could not cast hparam '%s' of type '%s' from value %r\"", "%", "(", "name", ",", "param_type", ",", "value", ")", ")", "# Some callers use None, for which we can't do any casting/checking. :(", "if", "issubclass", "(", "param_type", ",", "type", "(", "None", ")", ")", ":", "return", "value", "# Avoid converting a non-string type to a string.", "if", "(", "issubclass", "(", "param_type", ",", "(", "six", ".", "string_types", ",", "six", ".", "binary_type", ")", ")", "and", "not", "isinstance", "(", "value", ",", "(", "six", ".", "string_types", ",", "six", ".", "binary_type", ")", ")", ")", ":", "raise", "ValueError", "(", "fail_msg", ")", "# Avoid converting a number or string type to a boolean or vice versa.", "if", "issubclass", "(", "param_type", ",", "bool", ")", "!=", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "ValueError", "(", "fail_msg", ")", "# Avoid converting float to an integer (the reverse is fine).", "if", "(", "issubclass", "(", "param_type", ",", "numbers", ".", "Integral", ")", "and", "not", "isinstance", "(", "value", ",", "numbers", ".", "Integral", ")", ")", ":", "raise", "ValueError", "(", "fail_msg", ")", "# Avoid converting a non-numeric type to a numeric type.", "if", "(", "issubclass", "(", "param_type", ",", "numbers", ".", "Number", ")", "and", "not", "isinstance", "(", "value", ",", "numbers", ".", "Number", ")", ")", ":", "raise", "ValueError", "(", "fail_msg", ")", "return", "param_type", "(", "value", ")" ]
Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type.
[ "Cast", "hparam", "to", "the", "provided", "type", "if", "compatible", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L138-L183
22,637
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
parse_values
def parse_values(values, type_map, ignore_unknown=False): """Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]') """ results_dictionary = {} pos = 0 while pos < len(values): m = PARAM_RE.match(values, pos) if not m: raise ValueError('Malformed hyperparameter value: %s' % values[pos:]) # Check that there is a comma between parameters and move past it. pos = m.end() # Parse the values. m_dict = m.groupdict() name = m_dict['name'] if name not in type_map: if ignore_unknown: continue raise ValueError('Unknown hyperparameter type for %s' % name) type_ = type_map[name] # Set up correct parsing function (depending on whether type_ is a bool) if type_ == bool: def parse_bool(value): if value in ['true', 'True']: return True elif value in ['false', 'False']: return False else: try: return bool(int(value)) except ValueError: _parse_fail(name, type_, value, values) parse = parse_bool else: parse = type_ # If a singe value is provided if m_dict['val'] is not None: _process_scalar_value(name, parse, type_, m_dict, values, results_dictionary) # If the assigned value is a list: elif m_dict['vals'] is not None: _process_list_value(name, parse, type_, m_dict, values, results_dictionary) else: # Not assigned a list or value _parse_fail(name, type_, '', values) return results_dictionary
python
def parse_values(values, type_map, ignore_unknown=False): """Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]') """ results_dictionary = {} pos = 0 while pos < len(values): m = PARAM_RE.match(values, pos) if not m: raise ValueError('Malformed hyperparameter value: %s' % values[pos:]) # Check that there is a comma between parameters and move past it. pos = m.end() # Parse the values. m_dict = m.groupdict() name = m_dict['name'] if name not in type_map: if ignore_unknown: continue raise ValueError('Unknown hyperparameter type for %s' % name) type_ = type_map[name] # Set up correct parsing function (depending on whether type_ is a bool) if type_ == bool: def parse_bool(value): if value in ['true', 'True']: return True elif value in ['false', 'False']: return False else: try: return bool(int(value)) except ValueError: _parse_fail(name, type_, value, values) parse = parse_bool else: parse = type_ # If a singe value is provided if m_dict['val'] is not None: _process_scalar_value(name, parse, type_, m_dict, values, results_dictionary) # If the assigned value is a list: elif m_dict['vals'] is not None: _process_list_value(name, parse, type_, m_dict, values, results_dictionary) else: # Not assigned a list or value _parse_fail(name, type_, '', values) return results_dictionary
[ "def", "parse_values", "(", "values", ",", "type_map", ",", "ignore_unknown", "=", "False", ")", ":", "results_dictionary", "=", "{", "}", "pos", "=", "0", "while", "pos", "<", "len", "(", "values", ")", ":", "m", "=", "PARAM_RE", ".", "match", "(", "values", ",", "pos", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "'Malformed hyperparameter value: %s'", "%", "values", "[", "pos", ":", "]", ")", "# Check that there is a comma between parameters and move past it.", "pos", "=", "m", ".", "end", "(", ")", "# Parse the values.", "m_dict", "=", "m", ".", "groupdict", "(", ")", "name", "=", "m_dict", "[", "'name'", "]", "if", "name", "not", "in", "type_map", ":", "if", "ignore_unknown", ":", "continue", "raise", "ValueError", "(", "'Unknown hyperparameter type for %s'", "%", "name", ")", "type_", "=", "type_map", "[", "name", "]", "# Set up correct parsing function (depending on whether type_ is a bool)", "if", "type_", "==", "bool", ":", "def", "parse_bool", "(", "value", ")", ":", "if", "value", "in", "[", "'true'", ",", "'True'", "]", ":", "return", "True", "elif", "value", "in", "[", "'false'", ",", "'False'", "]", ":", "return", "False", "else", ":", "try", ":", "return", "bool", "(", "int", "(", "value", ")", ")", "except", "ValueError", ":", "_parse_fail", "(", "name", ",", "type_", ",", "value", ",", "values", ")", "parse", "=", "parse_bool", "else", ":", "parse", "=", "type_", "# If a singe value is provided", "if", "m_dict", "[", "'val'", "]", "is", "not", "None", ":", "_process_scalar_value", "(", "name", ",", "parse", ",", "type_", ",", "m_dict", ",", "values", ",", "results_dictionary", ")", "# If the assigned value is a list:", "elif", "m_dict", "[", "'vals'", "]", "is", "not", "None", ":", "_process_list_value", "(", "name", ",", "parse", ",", "type_", ",", "m_dict", ",", "values", ",", "results_dictionary", ")", "else", ":", "# Not assigned a list or value", "_parse_fail", "(", "name", ",", "type_", ",", "''", ",", "values", ")", "return", "results_dictionary" ]
Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]')
[ "Parses", "hyperparameter", "values", "from", "a", "string", "into", "a", "python", "map", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L186-L298
22,638
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.set_hparam
def set_hparam(self, name, value): """Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch. """ param_type, is_list = self._hparam_types[name] if isinstance(value, list): if not is_list: raise ValueError( 'Must not pass a list for single-valued parameter: %s' % name) setattr(self, name, [ _cast_to_type_if_compatible(name, param_type, v) for v in value]) else: if is_list: raise ValueError( 'Must pass a list for multi-valued parameter: %s.' % name) setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
python
def set_hparam(self, name, value): """Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch. """ param_type, is_list = self._hparam_types[name] if isinstance(value, list): if not is_list: raise ValueError( 'Must not pass a list for single-valued parameter: %s' % name) setattr(self, name, [ _cast_to_type_if_compatible(name, param_type, v) for v in value]) else: if is_list: raise ValueError( 'Must pass a list for multi-valued parameter: %s.' % name) setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
[ "def", "set_hparam", "(", "self", ",", "name", ",", "value", ")", ":", "param_type", ",", "is_list", "=", "self", ".", "_hparam_types", "[", "name", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "if", "not", "is_list", ":", "raise", "ValueError", "(", "'Must not pass a list for single-valued parameter: %s'", "%", "name", ")", "setattr", "(", "self", ",", "name", ",", "[", "_cast_to_type_if_compatible", "(", "name", ",", "param_type", ",", "v", ")", "for", "v", "in", "value", "]", ")", "else", ":", "if", "is_list", ":", "raise", "ValueError", "(", "'Must pass a list for multi-valued parameter: %s.'", "%", "name", ")", "setattr", "(", "self", ",", "name", ",", "_cast_to_type_if_compatible", "(", "name", ",", "param_type", ",", "value", ")", ")" ]
Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch.
[ "Set", "the", "value", "of", "an", "existing", "hyperparameter", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L443-L468
22,639
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.del_hparam
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
python
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
[ "def", "del_hparam", "(", "self", ",", "name", ")", ":", "if", "hasattr", "(", "self", ",", "name", ")", ":", "delattr", "(", "self", ",", "name", ")", "del", "self", ".", "_hparam_types", "[", "name", "]" ]
Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter.
[ "Removes", "the", "hyperparameter", "with", "key", "name", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L470-L480
22,640
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.parse
def parse(self, values): """Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist. """ type_map = {} for name, t in self._hparam_types.items(): param_type, _ = t type_map[name] = param_type values_map = parse_values(values, type_map) return self.override_from_dict(values_map)
python
def parse(self, values): """Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist. """ type_map = {} for name, t in self._hparam_types.items(): param_type, _ = t type_map[name] = param_type values_map = parse_values(values, type_map) return self.override_from_dict(values_map)
[ "def", "parse", "(", "self", ",", "values", ")", ":", "type_map", "=", "{", "}", "for", "name", ",", "t", "in", "self", ".", "_hparam_types", ".", "items", "(", ")", ":", "param_type", ",", "_", "=", "t", "type_map", "[", "name", "]", "=", "param_type", "values_map", "=", "parse_values", "(", "values", ",", "type_map", ")", "return", "self", ".", "override_from_dict", "(", "values_map", ")" ]
Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist.
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "string", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L482-L504
22,641
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.override_from_dict
def override_from_dict(self, values_dict): """Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed. """ for name, value in values_dict.items(): self.set_hparam(name, value) return self
python
def override_from_dict(self, values_dict): """Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed. """ for name, value in values_dict.items(): self.set_hparam(name, value) return self
[ "def", "override_from_dict", "(", "self", ",", "values_dict", ")", ":", "for", "name", ",", "value", "in", "values_dict", ".", "items", "(", ")", ":", "self", ".", "set_hparam", "(", "name", ",", "value", ")", "return", "self" ]
Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed.
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "dictionary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L506-L521
22,642
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.to_json
def to_json(self, indent=None, separators=None, sort_keys=False): """Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string. """ def remove_callables(x): """Omit callable elements from input with arbitrary nesting.""" if isinstance(x, dict): return {k: remove_callables(v) for k, v in six.iteritems(x) if not callable(v)} elif isinstance(x, list): return [remove_callables(i) for i in x if not callable(i)] return x return json.dumps( remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)
python
def to_json(self, indent=None, separators=None, sort_keys=False): """Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string. """ def remove_callables(x): """Omit callable elements from input with arbitrary nesting.""" if isinstance(x, dict): return {k: remove_callables(v) for k, v in six.iteritems(x) if not callable(v)} elif isinstance(x, list): return [remove_callables(i) for i in x if not callable(i)] return x return json.dumps( remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)
[ "def", "to_json", "(", "self", ",", "indent", "=", "None", ",", "separators", "=", "None", ",", "sort_keys", "=", "False", ")", ":", "def", "remove_callables", "(", "x", ")", ":", "\"\"\"Omit callable elements from input with arbitrary nesting.\"\"\"", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "{", "k", ":", "remove_callables", "(", "v", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "x", ")", "if", "not", "callable", "(", "v", ")", "}", "elif", "isinstance", "(", "x", ",", "list", ")", ":", "return", "[", "remove_callables", "(", "i", ")", "for", "i", "in", "x", "if", "not", "callable", "(", "i", ")", "]", "return", "x", "return", "json", ".", "dumps", "(", "remove_callables", "(", "self", ".", "values", "(", ")", ")", ",", "indent", "=", "indent", ",", "separators", "=", "separators", ",", "sort_keys", "=", "sort_keys", ")" ]
Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string.
[ "Serializes", "the", "hyperparameters", "into", "JSON", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L529-L556
22,643
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.parse_json
def parse_json(self, values_json): """Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed. """ values_map = json.loads(values_json) return self.override_from_dict(values_map)
python
def parse_json(self, values_json): """Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed. """ values_map = json.loads(values_json) return self.override_from_dict(values_map)
[ "def", "parse_json", "(", "self", ",", "values_json", ")", ":", "values_map", "=", "json", ".", "loads", "(", "values_json", ")", "return", "self", ".", "override_from_dict", "(", "values_map", ")" ]
Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed.
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "json", "object", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L558-L572
22,644
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.values
def values(self): """Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values. """ return {n: getattr(self, n) for n in self._hparam_types.keys()}
python
def values(self): """Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values. """ return {n: getattr(self, n) for n in self._hparam_types.keys()}
[ "def", "values", "(", "self", ")", ":", "return", "{", "n", ":", "getattr", "(", "self", ",", "n", ")", "for", "n", "in", "self", ".", "_hparam_types", ".", "keys", "(", ")", "}" ]
Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values.
[ "Return", "the", "hyperparameter", "values", "as", "a", "Python", "dictionary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L574-L581
22,645
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams.get
def get(self, key, default=None): """Returns the value of `key` if it exists, else `default`.""" if key in self._hparam_types: # Ensure that default is compatible with the parameter type. if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
python
def get(self, key, default=None): """Returns the value of `key` if it exists, else `default`.""" if key in self._hparam_types: # Ensure that default is compatible with the parameter type. if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", "in", "self", ".", "_hparam_types", ":", "# Ensure that default is compatible with the parameter type.", "if", "default", "is", "not", "None", ":", "param_type", ",", "is_param_list", "=", "self", ".", "_hparam_types", "[", "key", "]", "type_str", "=", "'list<%s>'", "%", "param_type", "if", "is_param_list", "else", "str", "(", "param_type", ")", "fail_msg", "=", "(", "\"Hparam '%s' of type '%s' is incompatible with \"", "'default=%s'", "%", "(", "key", ",", "type_str", ",", "default", ")", ")", "is_default_list", "=", "isinstance", "(", "default", ",", "list", ")", "if", "is_param_list", "!=", "is_default_list", ":", "raise", "ValueError", "(", "fail_msg", ")", "try", ":", "if", "is_default_list", ":", "for", "value", "in", "default", ":", "_cast_to_type_if_compatible", "(", "key", ",", "param_type", ",", "value", ")", "else", ":", "_cast_to_type_if_compatible", "(", "key", ",", "param_type", ",", "default", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "'%s. %s'", "%", "(", "fail_msg", ",", "e", ")", ")", "return", "getattr", "(", "self", ",", "key", ")", "return", "default" ]
Returns the value of `key` if it exists, else `default`.
[ "Returns", "the", "value", "of", "key", "if", "it", "exists", "else", "default", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L583-L608
22,646
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
HParams._get_kind_name
def _get_kind_name(param_type, is_list): """Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized. """ if issubclass(param_type, bool): # This check must happen before issubclass(param_type, six.integer_types), # since Python considers bool to be a subclass of int. typename = 'bool' elif issubclass(param_type, six.integer_types): # Setting 'int' and 'long' types to be 'int64' to ensure the type is # compatible with both Python2 and Python3. typename = 'int64' elif issubclass(param_type, (six.string_types, six.binary_type)): # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is # compatible with both Python2 and Python3. typename = 'bytes' elif issubclass(param_type, float): typename = 'float' else: raise ValueError('Unsupported parameter type: %s' % str(param_type)) suffix = 'list' if is_list else 'value' return '_'.join([typename, suffix])
python
def _get_kind_name(param_type, is_list): """Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized. """ if issubclass(param_type, bool): # This check must happen before issubclass(param_type, six.integer_types), # since Python considers bool to be a subclass of int. typename = 'bool' elif issubclass(param_type, six.integer_types): # Setting 'int' and 'long' types to be 'int64' to ensure the type is # compatible with both Python2 and Python3. typename = 'int64' elif issubclass(param_type, (six.string_types, six.binary_type)): # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is # compatible with both Python2 and Python3. typename = 'bytes' elif issubclass(param_type, float): typename = 'float' else: raise ValueError('Unsupported parameter type: %s' % str(param_type)) suffix = 'list' if is_list else 'value' return '_'.join([typename, suffix])
[ "def", "_get_kind_name", "(", "param_type", ",", "is_list", ")", ":", "if", "issubclass", "(", "param_type", ",", "bool", ")", ":", "# This check must happen before issubclass(param_type, six.integer_types),", "# since Python considers bool to be a subclass of int.", "typename", "=", "'bool'", "elif", "issubclass", "(", "param_type", ",", "six", ".", "integer_types", ")", ":", "# Setting 'int' and 'long' types to be 'int64' to ensure the type is", "# compatible with both Python2 and Python3.", "typename", "=", "'int64'", "elif", "issubclass", "(", "param_type", ",", "(", "six", ".", "string_types", ",", "six", ".", "binary_type", ")", ")", ":", "# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is", "# compatible with both Python2 and Python3.", "typename", "=", "'bytes'", "elif", "issubclass", "(", "param_type", ",", "float", ")", ":", "typename", "=", "'float'", "else", ":", "raise", "ValueError", "(", "'Unsupported parameter type: %s'", "%", "str", "(", "param_type", ")", ")", "suffix", "=", "'list'", "if", "is_list", "else", "'value'", "return", "'_'", ".", "join", "(", "[", "typename", ",", "suffix", "]", ")" ]
Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized.
[ "Returns", "the", "field", "name", "given", "parameter", "type", "and", "is_list", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L620-L651
22,647
tensorflow/tensor2tensor
tensor2tensor/trax/trainer.py
_default_output_dir
def _default_output_dir(): """Default output directory.""" try: dataset_name = gin.query_parameter("inputs.dataset_name") except ValueError: dataset_name = "random" dir_name = "{model_name}_{dataset_name}_{timestamp}".format( model_name=gin.query_parameter("train.model").configurable.name, dataset_name=dataset_name, timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"), ) dir_path = os.path.join("~", "trax", dir_name) print() trax.log("No --output_dir specified") return dir_path
python
def _default_output_dir(): """Default output directory.""" try: dataset_name = gin.query_parameter("inputs.dataset_name") except ValueError: dataset_name = "random" dir_name = "{model_name}_{dataset_name}_{timestamp}".format( model_name=gin.query_parameter("train.model").configurable.name, dataset_name=dataset_name, timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"), ) dir_path = os.path.join("~", "trax", dir_name) print() trax.log("No --output_dir specified") return dir_path
[ "def", "_default_output_dir", "(", ")", ":", "try", ":", "dataset_name", "=", "gin", ".", "query_parameter", "(", "\"inputs.dataset_name\"", ")", "except", "ValueError", ":", "dataset_name", "=", "\"random\"", "dir_name", "=", "\"{model_name}_{dataset_name}_{timestamp}\"", ".", "format", "(", "model_name", "=", "gin", ".", "query_parameter", "(", "\"train.model\"", ")", ".", "configurable", ".", "name", ",", "dataset_name", "=", "dataset_name", ",", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y%m%d_%H%M\"", ")", ",", ")", "dir_path", "=", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "\"trax\"", ",", "dir_name", ")", "print", "(", ")", "trax", ".", "log", "(", "\"No --output_dir specified\"", ")", "return", "dir_path" ]
Default output directory.
[ "Default", "output", "directory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trainer.py#L48-L62
22,648
tensorflow/tensor2tensor
tensor2tensor/trax/trainer.py
_setup_gin
def _setup_gin(): """Setup gin configuration.""" # Imports for configurables # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable from tensor2tensor.trax import models as _trax_models from tensor2tensor.trax import optimizers as _trax_opt # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable configs = FLAGS.config or [] # Override with --dataset and --model if FLAGS.dataset: configs.append("inputs.dataset_name='%s'" % FLAGS.dataset) if FLAGS.data_dir: configs.append("inputs.data_dir='%s'" % FLAGS.data_dir) if FLAGS.model: configs.append("train.model=@trax.models.%s" % FLAGS.model) gin.parse_config_files_and_bindings(FLAGS.config_file, configs)
python
def _setup_gin(): """Setup gin configuration.""" # Imports for configurables # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable from tensor2tensor.trax import models as _trax_models from tensor2tensor.trax import optimizers as _trax_opt # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable configs = FLAGS.config or [] # Override with --dataset and --model if FLAGS.dataset: configs.append("inputs.dataset_name='%s'" % FLAGS.dataset) if FLAGS.data_dir: configs.append("inputs.data_dir='%s'" % FLAGS.data_dir) if FLAGS.model: configs.append("train.model=@trax.models.%s" % FLAGS.model) gin.parse_config_files_and_bindings(FLAGS.config_file, configs)
[ "def", "_setup_gin", "(", ")", ":", "# Imports for configurables", "# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable", "from", "tensor2tensor", ".", "trax", "import", "models", "as", "_trax_models", "from", "tensor2tensor", ".", "trax", "import", "optimizers", "as", "_trax_opt", "# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable", "configs", "=", "FLAGS", ".", "config", "or", "[", "]", "# Override with --dataset and --model", "if", "FLAGS", ".", "dataset", ":", "configs", ".", "append", "(", "\"inputs.dataset_name='%s'\"", "%", "FLAGS", ".", "dataset", ")", "if", "FLAGS", ".", "data_dir", ":", "configs", ".", "append", "(", "\"inputs.data_dir='%s'\"", "%", "FLAGS", ".", "data_dir", ")", "if", "FLAGS", ".", "model", ":", "configs", ".", "append", "(", "\"train.model=@trax.models.%s\"", "%", "FLAGS", ".", "model", ")", "gin", ".", "parse_config_files_and_bindings", "(", "FLAGS", ".", "config_file", ",", "configs", ")" ]
Setup gin configuration.
[ "Setup", "gin", "configuration", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trainer.py#L65-L81
22,649
tensorflow/tensor2tensor
tensor2tensor/v2/t2t.py
_make_info
def _make_info(shape_list, num_classes): """Create an info-like tuple for feature given some shapes and vocab size.""" feature_info = collections.namedtuple("FeatureInfo", ["shape", "num_classes"]) cur_shape = list(shape_list[0]) # We need to merge the provided shapes, put None where they disagree. for shape in shape_list: if len(shape) != len(cur_shape): raise ValueError("Shapes need to have the same number of dimensions.") for i in range(len(shape)): if cur_shape[i] is not None: if shape[i] != cur_shape[i]: cur_shape[i] = None return feature_info(cur_shape, num_classes)
python
def _make_info(shape_list, num_classes): """Create an info-like tuple for feature given some shapes and vocab size.""" feature_info = collections.namedtuple("FeatureInfo", ["shape", "num_classes"]) cur_shape = list(shape_list[0]) # We need to merge the provided shapes, put None where they disagree. for shape in shape_list: if len(shape) != len(cur_shape): raise ValueError("Shapes need to have the same number of dimensions.") for i in range(len(shape)): if cur_shape[i] is not None: if shape[i] != cur_shape[i]: cur_shape[i] = None return feature_info(cur_shape, num_classes)
[ "def", "_make_info", "(", "shape_list", ",", "num_classes", ")", ":", "feature_info", "=", "collections", ".", "namedtuple", "(", "\"FeatureInfo\"", ",", "[", "\"shape\"", ",", "\"num_classes\"", "]", ")", "cur_shape", "=", "list", "(", "shape_list", "[", "0", "]", ")", "# We need to merge the provided shapes, put None where they disagree.", "for", "shape", "in", "shape_list", ":", "if", "len", "(", "shape", ")", "!=", "len", "(", "cur_shape", ")", ":", "raise", "ValueError", "(", "\"Shapes need to have the same number of dimensions.\"", ")", "for", "i", "in", "range", "(", "len", "(", "shape", ")", ")", ":", "if", "cur_shape", "[", "i", "]", "is", "not", "None", ":", "if", "shape", "[", "i", "]", "!=", "cur_shape", "[", "i", "]", ":", "cur_shape", "[", "i", "]", "=", "None", "return", "feature_info", "(", "cur_shape", ",", "num_classes", ")" ]
Create an info-like tuple for feature given some shapes and vocab size.
[ "Create", "an", "info", "-", "like", "tuple", "for", "feature", "given", "some", "shapes", "and", "vocab", "size", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L86-L98
22,650
tensorflow/tensor2tensor
tensor2tensor/v2/t2t.py
_select_features
def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ["inputs", "targets"] return {f: example[f] for f in feature_list}
python
def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ["inputs", "targets"] return {f: example[f] for f in feature_list}
[ "def", "_select_features", "(", "example", ",", "feature_list", "=", "None", ")", ":", "feature_list", "=", "feature_list", "or", "[", "\"inputs\"", ",", "\"targets\"", "]", "return", "{", "f", ":", "example", "[", "f", "]", "for", "f", "in", "feature_list", "}" ]
Select a subset of features from the example dict.
[ "Select", "a", "subset", "of", "features", "from", "the", "example", "dict", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L101-L104
22,651
tensorflow/tensor2tensor
tensor2tensor/v2/t2t.py
optimize_fn
def optimize_fn(model, optimizer=None, learning_rate_schedule=None, loss=None, metrics=None): """Compile the model in Keras.""" learning_rate_schedule = learning_rate_schedule or T2TLearningRateSchedule() if optimizer: optimizer = optimizer(learning_rate=learning_rate_schedule) else: # We use Adam by default with adjusted parameters. optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_schedule, beta_1=0.9, beta_2=0.997, epsilon=1e-9) metrics = metrics or [tf.keras.metrics.sparse_categorical_accuracy] def xent_loss(y, x): return tf.keras.backend.sparse_categorical_crossentropy( y, x, from_logits=True) loss = loss or xent_loss return model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
python
def optimize_fn(model, optimizer=None, learning_rate_schedule=None, loss=None, metrics=None): """Compile the model in Keras.""" learning_rate_schedule = learning_rate_schedule or T2TLearningRateSchedule() if optimizer: optimizer = optimizer(learning_rate=learning_rate_schedule) else: # We use Adam by default with adjusted parameters. optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_schedule, beta_1=0.9, beta_2=0.997, epsilon=1e-9) metrics = metrics or [tf.keras.metrics.sparse_categorical_accuracy] def xent_loss(y, x): return tf.keras.backend.sparse_categorical_crossentropy( y, x, from_logits=True) loss = loss or xent_loss return model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
[ "def", "optimize_fn", "(", "model", ",", "optimizer", "=", "None", ",", "learning_rate_schedule", "=", "None", ",", "loss", "=", "None", ",", "metrics", "=", "None", ")", ":", "learning_rate_schedule", "=", "learning_rate_schedule", "or", "T2TLearningRateSchedule", "(", ")", "if", "optimizer", ":", "optimizer", "=", "optimizer", "(", "learning_rate", "=", "learning_rate_schedule", ")", "else", ":", "# We use Adam by default with adjusted parameters.", "optimizer", "=", "tf", ".", "keras", ".", "optimizers", ".", "Adam", "(", "learning_rate", "=", "learning_rate_schedule", ",", "beta_1", "=", "0.9", ",", "beta_2", "=", "0.997", ",", "epsilon", "=", "1e-9", ")", "metrics", "=", "metrics", "or", "[", "tf", ".", "keras", ".", "metrics", ".", "sparse_categorical_accuracy", "]", "def", "xent_loss", "(", "y", ",", "x", ")", ":", "return", "tf", ".", "keras", ".", "backend", ".", "sparse_categorical_crossentropy", "(", "y", ",", "x", ",", "from_logits", "=", "True", ")", "loss", "=", "loss", "or", "xent_loss", "return", "model", ".", "compile", "(", "optimizer", "=", "optimizer", ",", "loss", "=", "loss", ",", "metrics", "=", "metrics", ")" ]
Compile the model in Keras.
[ "Compile", "the", "model", "in", "Keras", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L233-L253
22,652
tensorflow/tensor2tensor
tensor2tensor/v2/t2t.py
train_fn
def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100): """Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation. """ train_data, eval_data, features_info, keys = train_and_eval_dataset( dataset, data_dir) if input_names is None: input_names = keys[0] if target_names is None: target_names = keys[1] # TODO(lukaszkaiser): The use of distribution strategy below fails like this: # .../keras/models.py", line 93, in _clone_functional_model # for layer in model._input_layers: # AttributeError: 'BasicFcRelu' object has no attribute '_input_layers' # strategy = tf.distribute.MirroredStrategy() # with strategy.scope(): model = model_class(features_info=features_info, input_names=input_names, target_names=target_names) optimize_fn(model) train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False) # Need to run one training step just to get optimizer variables to load. model.fit(train_batches, epochs=1, steps_per_epoch=1) # Training loop. callbacks = [] callbacks.append(tf.keras.callbacks.History()) callbacks.append(tf.keras.callbacks.BaseLogger()) last_epoch = 0 if output_dir is not None: callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir)) output_format = os.path.join(output_dir, "model-{epoch:05d}") callbacks.append(tf.keras.callbacks.ModelCheckpoint( filepath=output_format, save_weights_only=True)) checkpoints = tf.gfile.Glob(os.path.join(output_dir, "model-*")) # Take basenames and strip the "model-" prefix. checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints] # Get epoch numbers from the filenames and sort to obtain last epoch. epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4] epoch_numbers.sort() if epoch_numbers: last_epoch = epoch_numbers[-1] saved_path = os.path.join(output_dir, "model-%05d" % last_epoch) model.load_weights(saved_path) model.fit(train_batches, epochs=train_steps // eval_frequency, steps_per_epoch=eval_frequency, validation_data=eval_batches, validation_steps=eval_steps, initial_epoch=last_epoch, callbacks=callbacks)
python
def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100): """Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation. """ train_data, eval_data, features_info, keys = train_and_eval_dataset( dataset, data_dir) if input_names is None: input_names = keys[0] if target_names is None: target_names = keys[1] # TODO(lukaszkaiser): The use of distribution strategy below fails like this: # .../keras/models.py", line 93, in _clone_functional_model # for layer in model._input_layers: # AttributeError: 'BasicFcRelu' object has no attribute '_input_layers' # strategy = tf.distribute.MirroredStrategy() # with strategy.scope(): model = model_class(features_info=features_info, input_names=input_names, target_names=target_names) optimize_fn(model) train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False) # Need to run one training step just to get optimizer variables to load. model.fit(train_batches, epochs=1, steps_per_epoch=1) # Training loop. callbacks = [] callbacks.append(tf.keras.callbacks.History()) callbacks.append(tf.keras.callbacks.BaseLogger()) last_epoch = 0 if output_dir is not None: callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir)) output_format = os.path.join(output_dir, "model-{epoch:05d}") callbacks.append(tf.keras.callbacks.ModelCheckpoint( filepath=output_format, save_weights_only=True)) checkpoints = tf.gfile.Glob(os.path.join(output_dir, "model-*")) # Take basenames and strip the "model-" prefix. checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints] # Get epoch numbers from the filenames and sort to obtain last epoch. epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4] epoch_numbers.sort() if epoch_numbers: last_epoch = epoch_numbers[-1] saved_path = os.path.join(output_dir, "model-%05d" % last_epoch) model.load_weights(saved_path) model.fit(train_batches, epochs=train_steps // eval_frequency, steps_per_epoch=eval_frequency, validation_data=eval_batches, validation_steps=eval_steps, initial_epoch=last_epoch, callbacks=callbacks)
[ "def", "train_fn", "(", "data_dir", "=", "None", ",", "output_dir", "=", "None", ",", "model_class", "=", "gin", ".", "REQUIRED", ",", "dataset", "=", "gin", ".", "REQUIRED", ",", "input_names", "=", "None", ",", "target_names", "=", "None", ",", "train_steps", "=", "1000", ",", "eval_steps", "=", "1", ",", "eval_frequency", "=", "100", ")", ":", "train_data", ",", "eval_data", ",", "features_info", ",", "keys", "=", "train_and_eval_dataset", "(", "dataset", ",", "data_dir", ")", "if", "input_names", "is", "None", ":", "input_names", "=", "keys", "[", "0", "]", "if", "target_names", "is", "None", ":", "target_names", "=", "keys", "[", "1", "]", "# TODO(lukaszkaiser): The use of distribution strategy below fails like this:", "# .../keras/models.py\", line 93, in _clone_functional_model", "# for layer in model._input_layers:", "# AttributeError: 'BasicFcRelu' object has no attribute '_input_layers'", "# strategy = tf.distribute.MirroredStrategy()", "# with strategy.scope():", "model", "=", "model_class", "(", "features_info", "=", "features_info", ",", "input_names", "=", "input_names", ",", "target_names", "=", "target_names", ")", "optimize_fn", "(", "model", ")", "train_batches", "=", "shuffle_and_batch_data", "(", "train_data", ",", "target_names", ",", "features_info", ",", "training", "=", "True", ")", "eval_batches", "=", "shuffle_and_batch_data", "(", "eval_data", ",", "target_names", ",", "features_info", ",", "training", "=", "False", ")", "# Need to run one training step just to get optimizer variables to load.", "model", ".", "fit", "(", "train_batches", ",", "epochs", "=", "1", ",", "steps_per_epoch", "=", "1", ")", "# Training loop.", "callbacks", "=", "[", "]", "callbacks", ".", "append", "(", "tf", ".", "keras", ".", "callbacks", ".", "History", "(", ")", ")", "callbacks", ".", "append", "(", "tf", ".", "keras", ".", "callbacks", ".", "BaseLogger", "(", ")", ")", "last_epoch", "=", "0", "if", "output_dir", "is", "not", "None", ":", "callbacks", ".", "append", "(", "tf", ".", "keras", ".", "callbacks", ".", "TensorBoard", "(", "log_dir", "=", "output_dir", ")", ")", "output_format", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model-{epoch:05d}\"", ")", "callbacks", ".", "append", "(", "tf", ".", "keras", ".", "callbacks", ".", "ModelCheckpoint", "(", "filepath", "=", "output_format", ",", "save_weights_only", "=", "True", ")", ")", "checkpoints", "=", "tf", ".", "gfile", ".", "Glob", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model-*\"", ")", ")", "# Take basenames and strip the \"model-\" prefix.", "checkpoints", "=", "[", "os", ".", "path", ".", "basename", "(", "ckpt", ")", "[", "6", ":", "]", "for", "ckpt", "in", "checkpoints", "]", "# Get epoch numbers from the filenames and sort to obtain last epoch.", "epoch_numbers", "=", "[", "int", "(", "ckpt", "[", ":", "5", "]", ")", "for", "ckpt", "in", "checkpoints", "if", "len", "(", "ckpt", ")", ">", "4", "]", "epoch_numbers", ".", "sort", "(", ")", "if", "epoch_numbers", ":", "last_epoch", "=", "epoch_numbers", "[", "-", "1", "]", "saved_path", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model-%05d\"", "%", "last_epoch", ")", "model", ".", "load_weights", "(", "saved_path", ")", "model", ".", "fit", "(", "train_batches", ",", "epochs", "=", "train_steps", "//", "eval_frequency", ",", "steps_per_epoch", "=", "eval_frequency", ",", "validation_data", "=", "eval_batches", ",", "validation_steps", "=", "eval_steps", ",", "initial_epoch", "=", "last_epoch", ",", "callbacks", "=", "callbacks", ")" ]
Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation.
[ "Train", "the", "given", "model", "on", "the", "given", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L259-L324
22,653
tensorflow/tensor2tensor
tensor2tensor/v2/t2t.py
t2t_train
def t2t_train(model_name, dataset_name, data_dir=None, output_dir=None, config_file=None, config=None): """Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters. """ if model_name not in _MODEL_REGISTRY: raise ValueError("Model %s not in registry. Available models:\n * %s." % (model_name, "\n * ".join(_MODEL_REGISTRY.keys()))) model_class = _MODEL_REGISTRY[model_name]() gin.bind_parameter("train_fn.model_class", model_class) gin.bind_parameter("train_fn.dataset", dataset_name) gin.parse_config_files_and_bindings(config_file, config) # TODO(lukaszkaiser): save gin config in output_dir if provided? train_fn(data_dir, output_dir=output_dir)
python
def t2t_train(model_name, dataset_name, data_dir=None, output_dir=None, config_file=None, config=None): """Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters. """ if model_name not in _MODEL_REGISTRY: raise ValueError("Model %s not in registry. Available models:\n * %s." % (model_name, "\n * ".join(_MODEL_REGISTRY.keys()))) model_class = _MODEL_REGISTRY[model_name]() gin.bind_parameter("train_fn.model_class", model_class) gin.bind_parameter("train_fn.dataset", dataset_name) gin.parse_config_files_and_bindings(config_file, config) # TODO(lukaszkaiser): save gin config in output_dir if provided? train_fn(data_dir, output_dir=output_dir)
[ "def", "t2t_train", "(", "model_name", ",", "dataset_name", ",", "data_dir", "=", "None", ",", "output_dir", "=", "None", ",", "config_file", "=", "None", ",", "config", "=", "None", ")", ":", "if", "model_name", "not", "in", "_MODEL_REGISTRY", ":", "raise", "ValueError", "(", "\"Model %s not in registry. Available models:\\n * %s.\"", "%", "(", "model_name", ",", "\"\\n * \"", ".", "join", "(", "_MODEL_REGISTRY", ".", "keys", "(", ")", ")", ")", ")", "model_class", "=", "_MODEL_REGISTRY", "[", "model_name", "]", "(", ")", "gin", ".", "bind_parameter", "(", "\"train_fn.model_class\"", ",", "model_class", ")", "gin", ".", "bind_parameter", "(", "\"train_fn.dataset\"", ",", "dataset_name", ")", "gin", ".", "parse_config_files_and_bindings", "(", "config_file", ",", "config", ")", "# TODO(lukaszkaiser): save gin config in output_dir if provided?", "train_fn", "(", "data_dir", ",", "output_dir", "=", "output_dir", ")" ]
Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters.
[ "Main", "function", "to", "train", "the", "given", "model", "on", "the", "given", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L327-L347
22,654
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_decoder.py
decode
def decode(estimator, hparams, decode_hp): """Decode from estimator. Interactive, from file, or from dataset.""" if FLAGS.decode_interactive: if estimator.config.use_tpu: raise ValueError("TPU can only decode from dataset.") decoding.decode_interactively(estimator, hparams, decode_hp, checkpoint_path=FLAGS.checkpoint_path) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, decode_hp, FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) if FLAGS.checkpoint_path and FLAGS.keep_timestamp: ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) else: decoding.decode_from_dataset( estimator, FLAGS.problem, hparams, decode_hp, decode_to_file=FLAGS.decode_to_file, dataset_split="test" if FLAGS.eval_use_test_set else None, checkpoint_path=FLAGS.checkpoint_path)
python
def decode(estimator, hparams, decode_hp): """Decode from estimator. Interactive, from file, or from dataset.""" if FLAGS.decode_interactive: if estimator.config.use_tpu: raise ValueError("TPU can only decode from dataset.") decoding.decode_interactively(estimator, hparams, decode_hp, checkpoint_path=FLAGS.checkpoint_path) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, decode_hp, FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) if FLAGS.checkpoint_path and FLAGS.keep_timestamp: ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) else: decoding.decode_from_dataset( estimator, FLAGS.problem, hparams, decode_hp, decode_to_file=FLAGS.decode_to_file, dataset_split="test" if FLAGS.eval_use_test_set else None, checkpoint_path=FLAGS.checkpoint_path)
[ "def", "decode", "(", "estimator", ",", "hparams", ",", "decode_hp", ")", ":", "if", "FLAGS", ".", "decode_interactive", ":", "if", "estimator", ".", "config", ".", "use_tpu", ":", "raise", "ValueError", "(", "\"TPU can only decode from dataset.\"", ")", "decoding", ".", "decode_interactively", "(", "estimator", ",", "hparams", ",", "decode_hp", ",", "checkpoint_path", "=", "FLAGS", ".", "checkpoint_path", ")", "elif", "FLAGS", ".", "decode_from_file", ":", "decoding", ".", "decode_from_file", "(", "estimator", ",", "FLAGS", ".", "decode_from_file", ",", "hparams", ",", "decode_hp", ",", "FLAGS", ".", "decode_to_file", ",", "checkpoint_path", "=", "FLAGS", ".", "checkpoint_path", ")", "if", "FLAGS", ".", "checkpoint_path", "and", "FLAGS", ".", "keep_timestamp", ":", "ckpt_time", "=", "os", ".", "path", ".", "getmtime", "(", "FLAGS", ".", "checkpoint_path", "+", "\".index\"", ")", "os", ".", "utime", "(", "FLAGS", ".", "decode_to_file", ",", "(", "ckpt_time", ",", "ckpt_time", ")", ")", "else", ":", "decoding", ".", "decode_from_dataset", "(", "estimator", ",", "FLAGS", ".", "problem", ",", "hparams", ",", "decode_hp", ",", "decode_to_file", "=", "FLAGS", ".", "decode_to_file", ",", "dataset_split", "=", "\"test\"", "if", "FLAGS", ".", "eval_use_test_set", "else", "None", ",", "checkpoint_path", "=", "FLAGS", ".", "checkpoint_path", ")" ]
Decode from estimator. Interactive, from file, or from dataset.
[ "Decode", "from", "estimator", ".", "Interactive", "from", "file", "or", "from", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_decoder.py#L82-L104
22,655
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_decoder.py
score_file
def score_file(filename): """Score each line in a file and return the scores.""" # Prepare model. hparams = create_hparams() encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) has_inputs = "inputs" in encoders # Prepare features for feeding into the model. if has_inputs: inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. if has_inputs: features = {"inputs": batch_inputs, "targets": batch_targets} else: features = {"targets": batch_targets} # Prepare the model and the graph when model runs on features. model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) _, losses = model(features) saver = tf.train.Saver() with tf.Session() as sess: # Load weights from checkpoint. if FLAGS.checkpoint_path is None: ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) ckpt = ckpts.model_checkpoint_path else: ckpt = FLAGS.checkpoint_path saver.restore(sess, ckpt) # Run on each line. with tf.gfile.Open(filename) as f: lines = f.readlines() results = [] for line in lines: tab_split = line.split("\t") if len(tab_split) > 2: raise ValueError("Each line must have at most one tab separator.") if len(tab_split) == 1: targets = tab_split[0].strip() else: targets = tab_split[1].strip() inputs = tab_split[0].strip() # Run encoders and append EOS symbol. targets_numpy = encoders["targets"].encode( targets) + [text_encoder.EOS_ID] if has_inputs: inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] # Prepare the feed. if has_inputs: feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} else: feed = {targets_ph: targets_numpy} # Get the score. np_loss = sess.run(losses["training"], feed) results.append(np_loss) return results
python
def score_file(filename): """Score each line in a file and return the scores.""" # Prepare model. hparams = create_hparams() encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) has_inputs = "inputs" in encoders # Prepare features for feeding into the model. if has_inputs: inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. if has_inputs: features = {"inputs": batch_inputs, "targets": batch_targets} else: features = {"targets": batch_targets} # Prepare the model and the graph when model runs on features. model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) _, losses = model(features) saver = tf.train.Saver() with tf.Session() as sess: # Load weights from checkpoint. if FLAGS.checkpoint_path is None: ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) ckpt = ckpts.model_checkpoint_path else: ckpt = FLAGS.checkpoint_path saver.restore(sess, ckpt) # Run on each line. with tf.gfile.Open(filename) as f: lines = f.readlines() results = [] for line in lines: tab_split = line.split("\t") if len(tab_split) > 2: raise ValueError("Each line must have at most one tab separator.") if len(tab_split) == 1: targets = tab_split[0].strip() else: targets = tab_split[1].strip() inputs = tab_split[0].strip() # Run encoders and append EOS symbol. targets_numpy = encoders["targets"].encode( targets) + [text_encoder.EOS_ID] if has_inputs: inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] # Prepare the feed. if has_inputs: feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} else: feed = {targets_ph: targets_numpy} # Get the score. np_loss = sess.run(losses["training"], feed) results.append(np_loss) return results
[ "def", "score_file", "(", "filename", ")", ":", "# Prepare model.", "hparams", "=", "create_hparams", "(", ")", "encoders", "=", "registry", ".", "problem", "(", "FLAGS", ".", "problem", ")", ".", "feature_encoders", "(", "FLAGS", ".", "data_dir", ")", "has_inputs", "=", "\"inputs\"", "in", "encoders", "# Prepare features for feeding into the model.", "if", "has_inputs", ":", "inputs_ph", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "int32", ")", "# Just length dimension.", "batch_inputs", "=", "tf", ".", "reshape", "(", "inputs_ph", ",", "[", "1", ",", "-", "1", ",", "1", ",", "1", "]", ")", "# Make it 4D.", "targets_ph", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "int32", ")", "# Just length dimension.", "batch_targets", "=", "tf", ".", "reshape", "(", "targets_ph", ",", "[", "1", ",", "-", "1", ",", "1", ",", "1", "]", ")", "# Make it 4D.", "if", "has_inputs", ":", "features", "=", "{", "\"inputs\"", ":", "batch_inputs", ",", "\"targets\"", ":", "batch_targets", "}", "else", ":", "features", "=", "{", "\"targets\"", ":", "batch_targets", "}", "# Prepare the model and the graph when model runs on features.", "model", "=", "registry", ".", "model", "(", "FLAGS", ".", "model", ")", "(", "hparams", ",", "tf", ".", "estimator", ".", "ModeKeys", ".", "EVAL", ")", "_", ",", "losses", "=", "model", "(", "features", ")", "saver", "=", "tf", ".", "train", ".", "Saver", "(", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "# Load weights from checkpoint.", "if", "FLAGS", ".", "checkpoint_path", "is", "None", ":", "ckpts", "=", "tf", ".", "train", ".", "get_checkpoint_state", "(", "FLAGS", ".", "output_dir", ")", "ckpt", "=", "ckpts", ".", "model_checkpoint_path", "else", ":", "ckpt", "=", "FLAGS", ".", "checkpoint_path", "saver", ".", "restore", "(", "sess", ",", "ckpt", ")", "# Run on each line.", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "results", "=", "[", "]", "for", "line", "in", "lines", ":", "tab_split", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "tab_split", ")", ">", "2", ":", "raise", "ValueError", "(", "\"Each line must have at most one tab separator.\"", ")", "if", "len", "(", "tab_split", ")", "==", "1", ":", "targets", "=", "tab_split", "[", "0", "]", ".", "strip", "(", ")", "else", ":", "targets", "=", "tab_split", "[", "1", "]", ".", "strip", "(", ")", "inputs", "=", "tab_split", "[", "0", "]", ".", "strip", "(", ")", "# Run encoders and append EOS symbol.", "targets_numpy", "=", "encoders", "[", "\"targets\"", "]", ".", "encode", "(", "targets", ")", "+", "[", "text_encoder", ".", "EOS_ID", "]", "if", "has_inputs", ":", "inputs_numpy", "=", "encoders", "[", "\"inputs\"", "]", ".", "encode", "(", "inputs", ")", "+", "[", "text_encoder", ".", "EOS_ID", "]", "# Prepare the feed.", "if", "has_inputs", ":", "feed", "=", "{", "inputs_ph", ":", "inputs_numpy", ",", "targets_ph", ":", "targets_numpy", "}", "else", ":", "feed", "=", "{", "targets_ph", ":", "targets_numpy", "}", "# Get the score.", "np_loss", "=", "sess", ".", "run", "(", "losses", "[", "\"training\"", "]", ",", "feed", ")", "results", ".", "append", "(", "np_loss", ")", "return", "results" ]
Score each line in a file and return the scores.
[ "Score", "each", "line", "in", "a", "file", "and", "return", "the", "scores", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_decoder.py#L107-L164
22,656
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
time_to_channels
def time_to_channels(embedded_video): """Put time dimension on channels in an embedded video.""" video_shape = common_layers.shape_list(embedded_video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) return tf.reshape(transposed, [ video_shape[0], video_shape[2], video_shape[3], video_shape[1] * video_shape[4] ])
python
def time_to_channels(embedded_video): """Put time dimension on channels in an embedded video.""" video_shape = common_layers.shape_list(embedded_video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) return tf.reshape(transposed, [ video_shape[0], video_shape[2], video_shape[3], video_shape[1] * video_shape[4] ])
[ "def", "time_to_channels", "(", "embedded_video", ")", ":", "video_shape", "=", "common_layers", ".", "shape_list", "(", "embedded_video", ")", "if", "len", "(", "video_shape", ")", "!=", "5", ":", "raise", "ValueError", "(", "\"Assuming videos given as tensors in the format \"", "\"[batch, time, height, width, channels] but got one \"", "\"of shape: %s\"", "%", "str", "(", "video_shape", ")", ")", "transposed", "=", "tf", ".", "transpose", "(", "embedded_video", ",", "[", "0", ",", "2", ",", "3", ",", "1", ",", "4", "]", ")", "return", "tf", ".", "reshape", "(", "transposed", ",", "[", "video_shape", "[", "0", "]", ",", "video_shape", "[", "2", "]", ",", "video_shape", "[", "3", "]", ",", "video_shape", "[", "1", "]", "*", "video_shape", "[", "4", "]", "]", ")" ]
Put time dimension on channels in an embedded video.
[ "Put", "time", "dimension", "on", "channels", "in", "an", "embedded", "video", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L38-L49
22,657
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_autoregressive
def autoencoder_autoregressive(): """Autoregressive autoencoder model.""" hparams = autoencoder_basic() hparams.add_hparam("autoregressive_forget_base", False) hparams.add_hparam("autoregressive_mode", "none") hparams.add_hparam("autoregressive_decode_steps", 0) hparams.add_hparam("autoregressive_eval_pure_autoencoder", False) hparams.add_hparam("autoregressive_gumbel_sample", False) return hparams
python
def autoencoder_autoregressive(): """Autoregressive autoencoder model.""" hparams = autoencoder_basic() hparams.add_hparam("autoregressive_forget_base", False) hparams.add_hparam("autoregressive_mode", "none") hparams.add_hparam("autoregressive_decode_steps", 0) hparams.add_hparam("autoregressive_eval_pure_autoencoder", False) hparams.add_hparam("autoregressive_gumbel_sample", False) return hparams
[ "def", "autoencoder_autoregressive", "(", ")", ":", "hparams", "=", "autoencoder_basic", "(", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_forget_base\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_mode\"", ",", "\"none\"", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_decode_steps\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_eval_pure_autoencoder\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_gumbel_sample\"", ",", "False", ")", "return", "hparams" ]
Autoregressive autoencoder model.
[ "Autoregressive", "autoencoder", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1073-L1081
22,658
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_residual
def autoencoder_residual(): """Residual autoencoder model.""" hparams = autoencoder_autoregressive() hparams.optimizer = "Adafactor" hparams.clip_grad_norm = 1.0 hparams.learning_rate_constant = 0.5 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.max_hidden_size = 1024 hparams.add_hparam("num_residual_layers", 2) hparams.add_hparam("residual_kernel_height", 3) hparams.add_hparam("residual_kernel_width", 3) hparams.add_hparam("residual_filter_multiplier", 2.0) hparams.add_hparam("residual_dropout", 0.2) hparams.add_hparam("residual_use_separable_conv", int(True)) hparams.add_hparam("kl_beta", 1.0) return hparams
python
def autoencoder_residual(): """Residual autoencoder model.""" hparams = autoencoder_autoregressive() hparams.optimizer = "Adafactor" hparams.clip_grad_norm = 1.0 hparams.learning_rate_constant = 0.5 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.max_hidden_size = 1024 hparams.add_hparam("num_residual_layers", 2) hparams.add_hparam("residual_kernel_height", 3) hparams.add_hparam("residual_kernel_width", 3) hparams.add_hparam("residual_filter_multiplier", 2.0) hparams.add_hparam("residual_dropout", 0.2) hparams.add_hparam("residual_use_separable_conv", int(True)) hparams.add_hparam("kl_beta", 1.0) return hparams
[ "def", "autoencoder_residual", "(", ")", ":", "hparams", "=", "autoencoder_autoregressive", "(", ")", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "clip_grad_norm", "=", "1.0", "hparams", ".", "learning_rate_constant", "=", "0.5", "hparams", ".", "learning_rate_warmup_steps", "=", "500", "hparams", ".", "learning_rate_schedule", "=", "\"constant * linear_warmup * rsqrt_decay\"", "hparams", ".", "num_hidden_layers", "=", "5", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "max_hidden_size", "=", "1024", "hparams", ".", "add_hparam", "(", "\"num_residual_layers\"", ",", "2", ")", "hparams", ".", "add_hparam", "(", "\"residual_kernel_height\"", ",", "3", ")", "hparams", ".", "add_hparam", "(", "\"residual_kernel_width\"", ",", "3", ")", "hparams", ".", "add_hparam", "(", "\"residual_filter_multiplier\"", ",", "2.0", ")", "hparams", ".", "add_hparam", "(", "\"residual_dropout\"", ",", "0.2", ")", "hparams", ".", "add_hparam", "(", "\"residual_use_separable_conv\"", ",", "int", "(", "True", ")", ")", "hparams", ".", "add_hparam", "(", "\"kl_beta\"", ",", "1.0", ")", "return", "hparams" ]
Residual autoencoder model.
[ "Residual", "autoencoder", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1085-L1103
22,659
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_residual_text
def autoencoder_residual_text(): """Residual autoencoder model for text.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 32 hparams.batch_size = 1024 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.autoregressive_mode = "none" hparams.sample_width = 1 return hparams
python
def autoencoder_residual_text(): """Residual autoencoder model for text.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 32 hparams.batch_size = 1024 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.autoregressive_mode = "none" hparams.sample_width = 1 return hparams
[ "def", "autoencoder_residual_text", "(", ")", ":", "hparams", "=", "autoencoder_residual", "(", ")", "hparams", ".", "bottleneck_bits", "=", "32", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "max_hidden_size", "=", "512", "hparams", ".", "bottleneck_noise", "=", "0.0", "hparams", ".", "bottom", "=", "{", "\"inputs\"", ":", "modalities", ".", "identity_bottom", ",", "\"targets\"", ":", "modalities", ".", "identity_bottom", ",", "}", "hparams", ".", "top", "=", "{", "\"targets\"", ":", "modalities", ".", "identity_top", ",", "}", "hparams", ".", "autoregressive_mode", "=", "\"none\"", "hparams", ".", "sample_width", "=", "1", "return", "hparams" ]
Residual autoencoder model for text.
[ "Residual", "autoencoder", "model", "for", "text", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1107-L1124
22,660
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_residual_discrete
def autoencoder_residual_discrete(): """Residual discrete autoencoder model.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.05 hparams.add_hparam("discretize_warmup_steps", 16000) hparams.add_hparam("bottleneck_kind", "tanh_discrete") hparams.add_hparam("isemhash_noise_dev", 0.5) hparams.add_hparam("isemhash_mix_prob", 0.5) hparams.add_hparam("isemhash_filter_size_multiplier", 2.0) hparams.add_hparam("vq_beta", 0.25) hparams.add_hparam("vq_decay", 0.999) hparams.add_hparam("vq_epsilon", 1e-5) return hparams
python
def autoencoder_residual_discrete(): """Residual discrete autoencoder model.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.05 hparams.add_hparam("discretize_warmup_steps", 16000) hparams.add_hparam("bottleneck_kind", "tanh_discrete") hparams.add_hparam("isemhash_noise_dev", 0.5) hparams.add_hparam("isemhash_mix_prob", 0.5) hparams.add_hparam("isemhash_filter_size_multiplier", 2.0) hparams.add_hparam("vq_beta", 0.25) hparams.add_hparam("vq_decay", 0.999) hparams.add_hparam("vq_epsilon", 1e-5) return hparams
[ "def", "autoencoder_residual_discrete", "(", ")", ":", "hparams", "=", "autoencoder_residual", "(", ")", "hparams", ".", "bottleneck_bits", "=", "1024", "hparams", ".", "bottleneck_noise", "=", "0.05", "hparams", ".", "add_hparam", "(", "\"discretize_warmup_steps\"", ",", "16000", ")", "hparams", ".", "add_hparam", "(", "\"bottleneck_kind\"", ",", "\"tanh_discrete\"", ")", "hparams", ".", "add_hparam", "(", "\"isemhash_noise_dev\"", ",", "0.5", ")", "hparams", ".", "add_hparam", "(", "\"isemhash_mix_prob\"", ",", "0.5", ")", "hparams", ".", "add_hparam", "(", "\"isemhash_filter_size_multiplier\"", ",", "2.0", ")", "hparams", ".", "add_hparam", "(", "\"vq_beta\"", ",", "0.25", ")", "hparams", ".", "add_hparam", "(", "\"vq_decay\"", ",", "0.999", ")", "hparams", ".", "add_hparam", "(", "\"vq_epsilon\"", ",", "1e-5", ")", "return", "hparams" ]
Residual discrete autoencoder model.
[ "Residual", "discrete", "autoencoder", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1140-L1153
22,661
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_residual_discrete_big
def autoencoder_residual_discrete_big(): """Residual discrete autoencoder model, big version.""" hparams = autoencoder_residual_discrete() hparams.hidden_size = 128 hparams.max_hidden_size = 4096 hparams.bottleneck_noise = 0.1 hparams.residual_dropout = 0.4 return hparams
python
def autoencoder_residual_discrete_big(): """Residual discrete autoencoder model, big version.""" hparams = autoencoder_residual_discrete() hparams.hidden_size = 128 hparams.max_hidden_size = 4096 hparams.bottleneck_noise = 0.1 hparams.residual_dropout = 0.4 return hparams
[ "def", "autoencoder_residual_discrete_big", "(", ")", ":", "hparams", "=", "autoencoder_residual_discrete", "(", ")", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "max_hidden_size", "=", "4096", "hparams", ".", "bottleneck_noise", "=", "0.1", "hparams", ".", "residual_dropout", "=", "0.4", "return", "hparams" ]
Residual discrete autoencoder model, big version.
[ "Residual", "discrete", "autoencoder", "model", "big", "version", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1157-L1164
22,662
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_ordered_text
def autoencoder_ordered_text(): """Ordered discrete autoencoder model for text.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_bits = 1024 hparams.bottleneck_shared_bits = 1024-64 hparams.bottleneck_shared_bits_start_warmup = 75000 hparams.bottleneck_shared_bits_stop_warmup = 275000 hparams.num_hidden_layers = 7 hparams.batch_size = 1024 hparams.autoregressive_mode = "conv5" hparams.max_hidden_size = 1024 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.sample_height = 128 hparams.sample_width = 1 return hparams
python
def autoencoder_ordered_text(): """Ordered discrete autoencoder model for text.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_bits = 1024 hparams.bottleneck_shared_bits = 1024-64 hparams.bottleneck_shared_bits_start_warmup = 75000 hparams.bottleneck_shared_bits_stop_warmup = 275000 hparams.num_hidden_layers = 7 hparams.batch_size = 1024 hparams.autoregressive_mode = "conv5" hparams.max_hidden_size = 1024 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.sample_height = 128 hparams.sample_width = 1 return hparams
[ "def", "autoencoder_ordered_text", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "bottleneck_bits", "=", "1024", "hparams", ".", "bottleneck_shared_bits", "=", "1024", "-", "64", "hparams", ".", "bottleneck_shared_bits_start_warmup", "=", "75000", "hparams", ".", "bottleneck_shared_bits_stop_warmup", "=", "275000", "hparams", ".", "num_hidden_layers", "=", "7", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "autoregressive_mode", "=", "\"conv5\"", "hparams", ".", "max_hidden_size", "=", "1024", "hparams", ".", "bottom", "=", "{", "\"inputs\"", ":", "modalities", ".", "identity_bottom", ",", "\"targets\"", ":", "modalities", ".", "identity_bottom", ",", "}", "hparams", ".", "top", "=", "{", "\"targets\"", ":", "modalities", ".", "identity_top", ",", "}", "hparams", ".", "sample_height", "=", "128", "hparams", ".", "sample_width", "=", "1", "return", "hparams" ]
Ordered discrete autoencoder model for text.
[ "Ordered", "discrete", "autoencoder", "model", "for", "text", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1214-L1234
22,663
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_ordered_text_small
def autoencoder_ordered_text_small(): """Ordered discrete autoencoder model for text, small version.""" hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
python
def autoencoder_ordered_text_small(): """Ordered discrete autoencoder model for text, small version.""" hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
[ "def", "autoencoder_ordered_text_small", "(", ")", ":", "hparams", "=", "autoencoder_ordered_text", "(", ")", "hparams", ".", "bottleneck_bits", "=", "32", "hparams", ".", "num_hidden_layers", "=", "3", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "max_hidden_size", "=", "512", "hparams", ".", "bottleneck_noise", "=", "0.0", "hparams", ".", "autoregressive_mode", "=", "\"conv5\"", "hparams", ".", "sample_height", "=", "4", "return", "hparams" ]
Ordered discrete autoencoder model for text, small version.
[ "Ordered", "discrete", "autoencoder", "model", "for", "text", "small", "version", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1238-L1248
22,664
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_discrete_pong
def autoencoder_discrete_pong(): """Discrete autoencoder model for compressing pong frames.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 3 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0.01 hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) return hparams
python
def autoencoder_discrete_pong(): """Discrete autoencoder model for compressing pong frames.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 3 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0.01 hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) return hparams
[ "def", "autoencoder_discrete_pong", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "num_hidden_layers", "=", "3", "hparams", ".", "bottleneck_bits", "=", "24", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "gan_loss_factor", "=", "0.01", "hparams", ".", "bottleneck_l2_factor", "=", "0.001", "hparams", ".", "add_hparam", "(", "\"video_modality_loss_cutoff\"", ",", "0.02", ")", "return", "hparams" ]
Discrete autoencoder model for compressing pong frames.
[ "Discrete", "autoencoder", "model", "for", "compressing", "pong", "frames", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1261-L1270
22,665
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_discrete_tiny
def autoencoder_discrete_tiny(): """Discrete autoencoder model for compressing pong frames for testing.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 2 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0. hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) hparams.num_residual_layers = 1 hparams.hidden_size = 32 hparams.max_hidden_size = 64 return hparams
python
def autoencoder_discrete_tiny(): """Discrete autoencoder model for compressing pong frames for testing.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 2 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0. hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) hparams.num_residual_layers = 1 hparams.hidden_size = 32 hparams.max_hidden_size = 64 return hparams
[ "def", "autoencoder_discrete_tiny", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "bottleneck_bits", "=", "24", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "gan_loss_factor", "=", "0.", "hparams", ".", "bottleneck_l2_factor", "=", "0.001", "hparams", ".", "add_hparam", "(", "\"video_modality_loss_cutoff\"", ",", "0.02", ")", "hparams", ".", "num_residual_layers", "=", "1", "hparams", ".", "hidden_size", "=", "32", "hparams", ".", "max_hidden_size", "=", "64", "return", "hparams" ]
Discrete autoencoder model for compressing pong frames for testing.
[ "Discrete", "autoencoder", "model", "for", "compressing", "pong", "frames", "for", "testing", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1274-L1286
22,666
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_discrete_cifar
def autoencoder_discrete_cifar(): """Discrete autoencoder model for compressing cifar.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_noise = 0.0 hparams.bottleneck_bits = 90 hparams.num_hidden_layers = 2 hparams.hidden_size = 256 hparams.num_residual_layers = 4 hparams.batch_size = 32 hparams.learning_rate_constant = 1.0 return hparams
python
def autoencoder_discrete_cifar(): """Discrete autoencoder model for compressing cifar.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_noise = 0.0 hparams.bottleneck_bits = 90 hparams.num_hidden_layers = 2 hparams.hidden_size = 256 hparams.num_residual_layers = 4 hparams.batch_size = 32 hparams.learning_rate_constant = 1.0 return hparams
[ "def", "autoencoder_discrete_cifar", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "bottleneck_noise", "=", "0.0", "hparams", ".", "bottleneck_bits", "=", "90", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "num_residual_layers", "=", "4", "hparams", ".", "batch_size", "=", "32", "hparams", ".", "learning_rate_constant", "=", "1.0", "return", "hparams" ]
Discrete autoencoder model for compressing cifar.
[ "Discrete", "autoencoder", "model", "for", "compressing", "cifar", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1290-L1300
22,667
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
autoencoder_range
def autoencoder_range(rhp): """Tuning grid of the main autoencoder params.""" rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
python
def autoencoder_range(rhp): """Tuning grid of the main autoencoder params.""" rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
[ "def", "autoencoder_range", "(", "rhp", ")", ":", "rhp", ".", "set_float", "(", "\"dropout\"", ",", "0.01", ",", "0.3", ")", "rhp", ".", "set_float", "(", "\"gan_loss_factor\"", ",", "0.01", ",", "0.1", ")", "rhp", ".", "set_float", "(", "\"bottleneck_l2_factor\"", ",", "0.001", ",", "0.1", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_discrete", "(", "\"bottleneck_warmup_steps\"", ",", "[", "200", ",", "2000", "]", ")", "rhp", ".", "set_float", "(", "\"gumbel_temperature\"", ",", "0", ",", "1", ")", "rhp", ".", "set_float", "(", "\"gumbel_noise_factor\"", ",", "0", ",", "0.5", ")" ]
Tuning grid of the main autoencoder params.
[ "Tuning", "grid", "of", "the", "main", "autoencoder", "params", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1304-L1311
22,668
tensorflow/tensor2tensor
tensor2tensor/models/research/vqa_attention.py
question_encoder
def question_encoder(question, hparams, name="encoder"): """Question encoder, run LSTM encoder and get the last output as encoding.""" with tf.variable_scope(name, "encoder", values=[question]): question = common_layers.flatten4d3d(question) padding = common_attention.embedding_to_padding(question) length = common_attention.padding_to_length(padding) max_question_length = hparams.max_question_length question = question[:, :max_question_length, :] actual_question_length = common_layers.shape_list(question)[1] length = tf.minimum(length, max_question_length) padding = [[0, 0], [0, max_question_length-actual_question_length], [0, 0]] question = tf.pad(question, padding) question_shape = question.get_shape().as_list() question_shape[1] = max_question_length question.set_shape(question_shape) # apply tanh dropout on question embedding question = tf.tanh(question) question = tf.nn.dropout(question, keep_prob=1.-hparams.dropout) question = [question[:, i, :] for i in range(max_question_length)] # rnn_layers = [_get_rnn_cell(hparams) # for _ in range(hparams.num_rnn_layers)] # rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) rnn_cell = _get_rnn_cell(hparams) # outputs, _ = tf.nn.dynamic_rnn( # rnn_cell, question, length, dtype=tf.float32) _, state = tf.nn.static_rnn(rnn_cell, question, sequence_length=length, dtype=tf.float32) # outputs = [tf.expand_dims(output, axis=1) for output in outputs] # outputs = tf.concat(outputs, axis=1) # utils.collect_named_outputs("vqa_attention_debug", "question_output", # outputs) # utils.collect_named_outputs("vqa_attention_debug", "question_state", # state.h) # batch_size = common_layers.shape_list(outputs)[0] # row_indices = tf.range(batch_size) # # length - 1 as index # indices = tf.transpose([row_indices, tf.maximum(length-1, 0)]) # last_output = tf.gather_nd(outputs, indices) # utils.collect_named_outputs("vqa_attention_debug", # "question_final_output", last_output) return state.h
python
def question_encoder(question, hparams, name="encoder"): """Question encoder, run LSTM encoder and get the last output as encoding.""" with tf.variable_scope(name, "encoder", values=[question]): question = common_layers.flatten4d3d(question) padding = common_attention.embedding_to_padding(question) length = common_attention.padding_to_length(padding) max_question_length = hparams.max_question_length question = question[:, :max_question_length, :] actual_question_length = common_layers.shape_list(question)[1] length = tf.minimum(length, max_question_length) padding = [[0, 0], [0, max_question_length-actual_question_length], [0, 0]] question = tf.pad(question, padding) question_shape = question.get_shape().as_list() question_shape[1] = max_question_length question.set_shape(question_shape) # apply tanh dropout on question embedding question = tf.tanh(question) question = tf.nn.dropout(question, keep_prob=1.-hparams.dropout) question = [question[:, i, :] for i in range(max_question_length)] # rnn_layers = [_get_rnn_cell(hparams) # for _ in range(hparams.num_rnn_layers)] # rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) rnn_cell = _get_rnn_cell(hparams) # outputs, _ = tf.nn.dynamic_rnn( # rnn_cell, question, length, dtype=tf.float32) _, state = tf.nn.static_rnn(rnn_cell, question, sequence_length=length, dtype=tf.float32) # outputs = [tf.expand_dims(output, axis=1) for output in outputs] # outputs = tf.concat(outputs, axis=1) # utils.collect_named_outputs("vqa_attention_debug", "question_output", # outputs) # utils.collect_named_outputs("vqa_attention_debug", "question_state", # state.h) # batch_size = common_layers.shape_list(outputs)[0] # row_indices = tf.range(batch_size) # # length - 1 as index # indices = tf.transpose([row_indices, tf.maximum(length-1, 0)]) # last_output = tf.gather_nd(outputs, indices) # utils.collect_named_outputs("vqa_attention_debug", # "question_final_output", last_output) return state.h
[ "def", "question_encoder", "(", "question", ",", "hparams", ",", "name", "=", "\"encoder\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "\"encoder\"", ",", "values", "=", "[", "question", "]", ")", ":", "question", "=", "common_layers", ".", "flatten4d3d", "(", "question", ")", "padding", "=", "common_attention", ".", "embedding_to_padding", "(", "question", ")", "length", "=", "common_attention", ".", "padding_to_length", "(", "padding", ")", "max_question_length", "=", "hparams", ".", "max_question_length", "question", "=", "question", "[", ":", ",", ":", "max_question_length", ",", ":", "]", "actual_question_length", "=", "common_layers", ".", "shape_list", "(", "question", ")", "[", "1", "]", "length", "=", "tf", ".", "minimum", "(", "length", ",", "max_question_length", ")", "padding", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "max_question_length", "-", "actual_question_length", "]", ",", "[", "0", ",", "0", "]", "]", "question", "=", "tf", ".", "pad", "(", "question", ",", "padding", ")", "question_shape", "=", "question", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "question_shape", "[", "1", "]", "=", "max_question_length", "question", ".", "set_shape", "(", "question_shape", ")", "# apply tanh dropout on question embedding", "question", "=", "tf", ".", "tanh", "(", "question", ")", "question", "=", "tf", ".", "nn", ".", "dropout", "(", "question", ",", "keep_prob", "=", "1.", "-", "hparams", ".", "dropout", ")", "question", "=", "[", "question", "[", ":", ",", "i", ",", ":", "]", "for", "i", "in", "range", "(", "max_question_length", ")", "]", "# rnn_layers = [_get_rnn_cell(hparams)", "# for _ in range(hparams.num_rnn_layers)]", "# rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)", "rnn_cell", "=", "_get_rnn_cell", "(", "hparams", ")", "# outputs, _ = tf.nn.dynamic_rnn(", "# rnn_cell, question, length, dtype=tf.float32)", "_", ",", "state", "=", "tf", ".", "nn", ".", "static_rnn", "(", "rnn_cell", ",", "question", ",", "sequence_length", "=", "length", ",", "dtype", "=", "tf", ".", "float32", ")", "# outputs = [tf.expand_dims(output, axis=1) for output in outputs]", "# outputs = tf.concat(outputs, axis=1)", "# utils.collect_named_outputs(\"vqa_attention_debug\", \"question_output\",", "# outputs)", "# utils.collect_named_outputs(\"vqa_attention_debug\", \"question_state\",", "# state.h)", "# batch_size = common_layers.shape_list(outputs)[0]", "# row_indices = tf.range(batch_size)", "# # length - 1 as index", "# indices = tf.transpose([row_indices, tf.maximum(length-1, 0)])", "# last_output = tf.gather_nd(outputs, indices)", "# utils.collect_named_outputs(\"vqa_attention_debug\",", "# \"question_final_output\", last_output)", "return", "state", ".", "h" ]
Question encoder, run LSTM encoder and get the last output as encoding.
[ "Question", "encoder", "run", "LSTM", "encoder", "and", "get", "the", "last", "output", "as", "encoding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L245-L295
22,669
tensorflow/tensor2tensor
tensor2tensor/trax/history.py
History.get
def get(self, mode, metric): """Get the history for the given metric and mode.""" if mode not in self._values: logging.info("Metric %s not found for mode %s", metric, mode) return [] return list(self._values[mode][metric])
python
def get(self, mode, metric): """Get the history for the given metric and mode.""" if mode not in self._values: logging.info("Metric %s not found for mode %s", metric, mode) return [] return list(self._values[mode][metric])
[ "def", "get", "(", "self", ",", "mode", ",", "metric", ")", ":", "if", "mode", "not", "in", "self", ".", "_values", ":", "logging", ".", "info", "(", "\"Metric %s not found for mode %s\"", ",", "metric", ",", "mode", ")", "return", "[", "]", "return", "list", "(", "self", ".", "_values", "[", "mode", "]", "[", "metric", "]", ")" ]
Get the history for the given metric and mode.
[ "Get", "the", "history", "for", "the", "given", "metric", "and", "mode", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/history.py#L58-L63
22,670
tensorflow/tensor2tensor
tensor2tensor/trax/history.py
History.metrics_for_mode
def metrics_for_mode(self, mode): """Metrics available for a given mode.""" if mode not in self._values: logging.info("Mode %s not found", mode) return [] return sorted(list(self._values[mode].keys()))
python
def metrics_for_mode(self, mode): """Metrics available for a given mode.""" if mode not in self._values: logging.info("Mode %s not found", mode) return [] return sorted(list(self._values[mode].keys()))
[ "def", "metrics_for_mode", "(", "self", ",", "mode", ")", ":", "if", "mode", "not", "in", "self", ".", "_values", ":", "logging", ".", "info", "(", "\"Mode %s not found\"", ",", "mode", ")", "return", "[", "]", "return", "sorted", "(", "list", "(", "self", ".", "_values", "[", "mode", "]", ".", "keys", "(", ")", ")", ")" ]
Metrics available for a given mode.
[ "Metrics", "available", "for", "a", "given", "mode", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/history.py#L70-L75
22,671
tensorflow/tensor2tensor
tensor2tensor/models/resnet.py
batch_norm_relu
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format="channels_first"): """Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`. """ if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if data_format == "channels_first": axis = 1 else: axis = 3 inputs = layers().BatchNormalization( axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training) if relu: inputs = tf.nn.relu(inputs) return inputs
python
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format="channels_first"): """Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`. """ if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if data_format == "channels_first": axis = 1 else: axis = 3 inputs = layers().BatchNormalization( axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training) if relu: inputs = tf.nn.relu(inputs) return inputs
[ "def", "batch_norm_relu", "(", "inputs", ",", "is_training", ",", "relu", "=", "True", ",", "init_zero", "=", "False", ",", "data_format", "=", "\"channels_first\"", ")", ":", "if", "init_zero", ":", "gamma_initializer", "=", "tf", ".", "zeros_initializer", "(", ")", "else", ":", "gamma_initializer", "=", "tf", ".", "ones_initializer", "(", ")", "if", "data_format", "==", "\"channels_first\"", ":", "axis", "=", "1", "else", ":", "axis", "=", "3", "inputs", "=", "layers", "(", ")", ".", "BatchNormalization", "(", "axis", "=", "axis", ",", "momentum", "=", "BATCH_NORM_DECAY", ",", "epsilon", "=", "BATCH_NORM_EPSILON", ",", "center", "=", "True", ",", "scale", "=", "True", ",", "fused", "=", "True", ",", "gamma_initializer", "=", "gamma_initializer", ")", "(", "inputs", ",", "training", "=", "is_training", ")", "if", "relu", ":", "inputs", "=", "tf", ".", "nn", ".", "relu", "(", "inputs", ")", "return", "inputs" ]
Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`.
[ "Performs", "a", "batch", "normalization", "followed", "by", "a", "ReLU", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L41-L81
22,672
tensorflow/tensor2tensor
tensor2tensor/models/resnet.py
residual_block
def residual_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ del final_block shortcut = inputs inputs = batch_norm_relu(inputs, is_training, data_format=data_format) if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return inputs + shortcut
python
def residual_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ del final_block shortcut = inputs inputs = batch_norm_relu(inputs, is_training, data_format=data_format) if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return inputs + shortcut
[ "def", "residual_block", "(", "inputs", ",", "filters", ",", "is_training", ",", "projection_shortcut", ",", "strides", ",", "final_block", ",", "data_format", "=", "\"channels_first\"", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "keep_prob", "=", "None", ")", ":", "del", "final_block", "shortcut", "=", "inputs", "inputs", "=", "batch_norm_relu", "(", "inputs", ",", "is_training", ",", "data_format", "=", "data_format", ")", "if", "projection_shortcut", "is", "not", "None", ":", "shortcut", "=", "projection_shortcut", "(", "inputs", ")", "inputs", "=", "conv2d_fixed_padding", "(", "inputs", "=", "inputs", ",", "filters", "=", "filters", ",", "kernel_size", "=", "3", ",", "strides", "=", "strides", ",", "data_format", "=", "data_format", ",", "use_td", "=", "use_td", ",", "targeting_rate", "=", "targeting_rate", ",", "keep_prob", "=", "keep_prob", ",", "is_training", "=", "is_training", ")", "inputs", "=", "batch_norm_relu", "(", "inputs", ",", "is_training", ",", "data_format", "=", "data_format", ")", "inputs", "=", "conv2d_fixed_padding", "(", "inputs", "=", "inputs", ",", "filters", "=", "filters", ",", "kernel_size", "=", "3", ",", "strides", "=", "1", ",", "data_format", "=", "data_format", ",", "use_td", "=", "use_td", ",", "targeting_rate", "=", "targeting_rate", ",", "keep_prob", "=", "keep_prob", ",", "is_training", "=", "is_training", ")", "return", "inputs", "+", "shortcut" ]
Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block.
[ "Standard", "building", "block", "for", "residual", "networks", "with", "BN", "before", "convolutions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L191-L257
22,673
tensorflow/tensor2tensor
tensor2tensor/models/resnet.py
resnet_v2
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): """Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations. """ inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
python
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): """Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations. """ inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
[ "def", "resnet_v2", "(", "inputs", ",", "block_fn", ",", "layer_blocks", ",", "filters", ",", "data_format", "=", "\"channels_first\"", ",", "is_training", "=", "False", ",", "is_cifar", "=", "False", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "keep_prob", "=", "None", ")", ":", "inputs", "=", "block_layer", "(", "inputs", "=", "inputs", ",", "filters", "=", "filters", "[", "1", "]", ",", "block_fn", "=", "block_fn", ",", "blocks", "=", "layer_blocks", "[", "0", "]", ",", "strides", "=", "1", ",", "is_training", "=", "is_training", ",", "name", "=", "\"block_layer1\"", ",", "data_format", "=", "data_format", ",", "use_td", "=", "use_td", ",", "targeting_rate", "=", "targeting_rate", ",", "keep_prob", "=", "keep_prob", ")", "inputs", "=", "block_layer", "(", "inputs", "=", "inputs", ",", "filters", "=", "filters", "[", "2", "]", ",", "block_fn", "=", "block_fn", ",", "blocks", "=", "layer_blocks", "[", "1", "]", ",", "strides", "=", "2", ",", "is_training", "=", "is_training", ",", "name", "=", "\"block_layer2\"", ",", "data_format", "=", "data_format", ",", "use_td", "=", "use_td", ",", "targeting_rate", "=", "targeting_rate", ",", "keep_prob", "=", "keep_prob", ")", "inputs", "=", "block_layer", "(", "inputs", "=", "inputs", ",", "filters", "=", "filters", "[", "3", "]", ",", "block_fn", "=", "block_fn", ",", "blocks", "=", "layer_blocks", "[", "2", "]", ",", "strides", "=", "2", ",", "is_training", "=", "is_training", ",", "name", "=", "\"block_layer3\"", ",", "data_format", "=", "data_format", ",", "use_td", "=", "use_td", ",", "targeting_rate", "=", "targeting_rate", ",", "keep_prob", "=", "keep_prob", ")", "if", "not", "is_cifar", ":", "inputs", "=", "block_layer", "(", "inputs", "=", "inputs", ",", "filters", "=", "filters", "[", "4", "]", ",", "block_fn", "=", "block_fn", ",", "blocks", "=", "layer_blocks", "[", "3", "]", ",", "strides", "=", "2", ",", "is_training", "=", "is_training", ",", "name", "=", "\"block_layer4\"", ",", "data_format", "=", "data_format", ",", "use_td", "=", "use_td", ",", "targeting_rate", "=", "targeting_rate", ",", "keep_prob", "=", "keep_prob", ")", "return", "inputs" ]
Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations.
[ "Resnet", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L427-L511
22,674
tensorflow/tensor2tensor
tensor2tensor/utils/rouge.py
_len_lcs
def _len_lcs(x, y): """Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """ table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
python
def _len_lcs(x, y): """Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """ table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
[ "def", "_len_lcs", "(", "x", ",", "y", ")", ":", "table", "=", "_lcs", "(", "x", ",", "y", ")", "n", ",", "m", "=", "len", "(", "x", ")", ",", "len", "(", "y", ")", "return", "table", "[", "n", ",", "m", "]" ]
Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y
[ "Returns", "the", "length", "of", "the", "Longest", "Common", "Subsequence", "between", "two", "seqs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L33-L47
22,675
tensorflow/tensor2tensor
tensor2tensor/utils/rouge.py
_lcs
def _lcs(x, y): """Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs """ n, m = len(x), len(y) table = {} for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: table[i, j] = 0 elif x[i - 1] == y[j - 1]: table[i, j] = table[i - 1, j - 1] + 1 else: table[i, j] = max(table[i - 1, j], table[i, j - 1]) return table
python
def _lcs(x, y): """Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs """ n, m = len(x), len(y) table = {} for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: table[i, j] = 0 elif x[i - 1] == y[j - 1]: table[i, j] = table[i - 1, j - 1] + 1 else: table[i, j] = max(table[i - 1, j], table[i, j - 1]) return table
[ "def", "_lcs", "(", "x", ",", "y", ")", ":", "n", ",", "m", "=", "len", "(", "x", ")", ",", "len", "(", "y", ")", "table", "=", "{", "}", "for", "i", "in", "range", "(", "n", "+", "1", ")", ":", "for", "j", "in", "range", "(", "m", "+", "1", ")", ":", "if", "i", "==", "0", "or", "j", "==", "0", ":", "table", "[", "i", ",", "j", "]", "=", "0", "elif", "x", "[", "i", "-", "1", "]", "==", "y", "[", "j", "-", "1", "]", ":", "table", "[", "i", ",", "j", "]", "=", "table", "[", "i", "-", "1", ",", "j", "-", "1", "]", "+", "1", "else", ":", "table", "[", "i", ",", "j", "]", "=", "max", "(", "table", "[", "i", "-", "1", ",", "j", "]", ",", "table", "[", "i", ",", "j", "-", "1", "]", ")", "return", "table" ]
Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs
[ "Computes", "the", "length", "of", "the", "LCS", "between", "two", "seqs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L50-L74
22,676
tensorflow/tensor2tensor
tensor2tensor/utils/rouge.py
_get_ngrams
def _get_ngrams(n, text): """Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
python
def _get_ngrams(n, text): """Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
[ "def", "_get_ngrams", "(", "n", ",", "text", ")", ":", "ngram_set", "=", "set", "(", ")", "text_length", "=", "len", "(", "text", ")", "max_index_ngram_start", "=", "text_length", "-", "n", "for", "i", "in", "range", "(", "max_index_ngram_start", "+", "1", ")", ":", "ngram_set", ".", "add", "(", "tuple", "(", "text", "[", "i", ":", "i", "+", "n", "]", ")", ")", "return", "ngram_set" ]
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
[ "Calculates", "n", "-", "grams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L156-L171
22,677
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem.py
flatten_zip_dataset
def flatten_zip_dataset(*args): """A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset. """ flattened = tf.data.Dataset.from_tensors(args[0]) for ex in args[1:]: flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex)) return flattened
python
def flatten_zip_dataset(*args): """A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset. """ flattened = tf.data.Dataset.from_tensors(args[0]) for ex in args[1:]: flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex)) return flattened
[ "def", "flatten_zip_dataset", "(", "*", "args", ")", ":", "flattened", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", "(", "args", "[", "0", "]", ")", "for", "ex", "in", "args", "[", "1", ":", "]", ":", "flattened", "=", "flattened", ".", "concatenate", "(", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", "(", "ex", ")", ")", "return", "flattened" ]
A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset.
[ "A", "list", "of", "examples", "to", "a", "dataset", "containing", "mixed", "examples", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L111-L128
22,678
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem.py
aggregate_task_lm_losses
def aggregate_task_lm_losses(hparams, problem_hparams, logits, feature_name, feature): """LM loss for multiproblems.""" summaries = [] vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) loss_num = 0. loss_den = 0. for task in hparams.problem.task_list: loss_num_, loss_den_ = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size, weights_fn) loss_num += loss_num_ loss_den += loss_den_ loss_val = loss_num_ / tf.maximum(1.0, loss_den_) summaries.append([task.name+"_loss", loss_val]) return loss_num, loss_den, summaries
python
def aggregate_task_lm_losses(hparams, problem_hparams, logits, feature_name, feature): """LM loss for multiproblems.""" summaries = [] vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) loss_num = 0. loss_den = 0. for task in hparams.problem.task_list: loss_num_, loss_den_ = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size, weights_fn) loss_num += loss_num_ loss_den += loss_den_ loss_val = loss_num_ / tf.maximum(1.0, loss_den_) summaries.append([task.name+"_loss", loss_val]) return loss_num, loss_den, summaries
[ "def", "aggregate_task_lm_losses", "(", "hparams", ",", "problem_hparams", ",", "logits", ",", "feature_name", ",", "feature", ")", ":", "summaries", "=", "[", "]", "vocab_size", "=", "problem_hparams", ".", "vocab_size", "[", "feature_name", "]", "if", "vocab_size", "is", "not", "None", "and", "hasattr", "(", "hparams", ",", "\"vocab_divisor\"", ")", ":", "vocab_size", "+=", "(", "-", "vocab_size", ")", "%", "hparams", ".", "vocab_divisor", "modality", "=", "problem_hparams", ".", "modality", "[", "feature_name", "]", "loss", "=", "hparams", ".", "loss", ".", "get", "(", "feature_name", ",", "modalities", ".", "get_loss", "(", "modality", ")", ")", "weights_fn", "=", "hparams", ".", "weights_fn", ".", "get", "(", "feature_name", ",", "modalities", ".", "get_weights_fn", "(", "modality", ")", ")", "loss_num", "=", "0.", "loss_den", "=", "0.", "for", "task", "in", "hparams", ".", "problem", ".", "task_list", ":", "loss_num_", ",", "loss_den_", "=", "loss", "(", "logits", ",", "feature", ",", "lambda", "x", ":", "common_layers", ".", "weights_multi_problem_all", "(", "x", ",", "task", ".", "task_id", ")", ",", "# pylint: disable=cell-var-from-loop", "hparams", ",", "vocab_size", ",", "weights_fn", ")", "loss_num", "+=", "loss_num_", "loss_den", "+=", "loss_den_", "loss_val", "=", "loss_num_", "/", "tf", ".", "maximum", "(", "1.0", ",", "loss_den_", ")", "summaries", ".", "append", "(", "[", "task", ".", "name", "+", "\"_loss\"", ",", "loss_val", "]", ")", "return", "loss_num", ",", "loss_den", ",", "summaries" ]
LM loss for multiproblems.
[ "LM", "loss", "for", "multiproblems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L525-L553
22,679
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem.py
MultiProblem.update_task_ids
def update_task_ids(self, encoder_vocab_size): """Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset. """ for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
python
def update_task_ids(self, encoder_vocab_size): """Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset. """ for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
[ "def", "update_task_ids", "(", "self", ",", "encoder_vocab_size", ")", ":", "for", "idx", ",", "task", "in", "enumerate", "(", "self", ".", "task_list", ")", ":", "task", ".", "set_task_id", "(", "idx", "+", "encoder_vocab_size", ")", "tf", ".", "logging", ".", "info", "(", "\"Task %d (%s) has id %d.\"", "%", "(", "idx", ",", "task", ".", "name", ",", "task", ".", "task_id", ")", ")" ]
Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset.
[ "Generate", "task_ids", "for", "each", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L385-L397
22,680
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem.py
MultiProblem.get_max_num_classes
def get_max_num_classes(self): """Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem. """ num = 0 for task in self.task_list: if hasattr(task, "num_classes"): if num < task.num_classes: num = task.num_classes return num
python
def get_max_num_classes(self): """Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem. """ num = 0 for task in self.task_list: if hasattr(task, "num_classes"): if num < task.num_classes: num = task.num_classes return num
[ "def", "get_max_num_classes", "(", "self", ")", ":", "num", "=", "0", "for", "task", "in", "self", ".", "task_list", ":", "if", "hasattr", "(", "task", ",", "\"num_classes\"", ")", ":", "if", "num", "<", "task", ".", "num_classes", ":", "num", "=", "task", ".", "num_classes", "return", "num" ]
Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem.
[ "Compute", "the", "maximum", "number", "of", "classes", "any", "subtask", "has", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L399-L416
22,681
tensorflow/tensor2tensor
tensor2tensor/layers/transformer_memory.py
TransformerMemory._norm
def _norm(self, x): """Compute the safe norm.""" return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
python
def _norm(self, x): """Compute the safe norm.""" return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
[ "def", "_norm", "(", "self", ",", "x", ")", ":", "return", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "x", ")", ",", "keepdims", "=", "True", ",", "axis", "=", "-", "1", ")", "+", "1e-7", ")" ]
Compute the safe norm.
[ "Compute", "the", "safe", "norm", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L226-L228
22,682
tensorflow/tensor2tensor
tensor2tensor/layers/transformer_memory.py
TransformerMemory._address_content
def _address_content(self, x): """Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size]. """ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
python
def _address_content(self, x): """Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size]. """ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
[ "def", "_address_content", "(", "self", ",", "x", ")", ":", "mem_keys", "=", "tf", ".", "layers", ".", "dense", "(", "self", ".", "mem_vals", ",", "self", ".", "key_depth", ",", "bias_initializer", "=", "tf", ".", "constant_initializer", "(", "1.0", ")", ",", "name", "=", "\"mem_key\"", ")", "mem_query", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "self", ".", "key_depth", ",", "bias_initializer", "=", "tf", ".", "constant_initializer", "(", "1.0", ")", ",", "name", "=", "\"mem_query\"", ")", "norm", "=", "tf", ".", "matmul", "(", "self", ".", "_norm", "(", "mem_query", ")", ",", "self", ".", "_norm", "(", "mem_keys", ")", ",", "transpose_b", "=", "True", ")", "dot_product", "=", "tf", ".", "matmul", "(", "mem_query", ",", "mem_keys", ",", "transpose_b", "=", "True", ")", "cos_dist", "=", "tf", ".", "div", "(", "dot_product", ",", "norm", "+", "1e-7", ",", "name", "=", "\"cos_dist\"", ")", "access_logits", "=", "self", ".", "sharpen_factor", "*", "cos_dist", "return", "access_logits" ]
Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size].
[ "Address", "the", "memory", "based", "on", "content", "similarity", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L230-L249
22,683
tensorflow/tensor2tensor
tensor2tensor/layers/transformer_memory.py
TransformerMemory.read
def read(self, x): """Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth]. """ access_logits = self._address_content(x) weights = tf.nn.softmax(access_logits) retrieved_mem = tf.reduce_sum( tf.multiply(tf.expand_dims(weights, 3), tf.expand_dims(self.mem_vals, axis=1)), axis=2) return access_logits, retrieved_mem
python
def read(self, x): """Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth]. """ access_logits = self._address_content(x) weights = tf.nn.softmax(access_logits) retrieved_mem = tf.reduce_sum( tf.multiply(tf.expand_dims(weights, 3), tf.expand_dims(self.mem_vals, axis=1)), axis=2) return access_logits, retrieved_mem
[ "def", "read", "(", "self", ",", "x", ")", ":", "access_logits", "=", "self", ".", "_address_content", "(", "x", ")", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "access_logits", ")", "retrieved_mem", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "multiply", "(", "tf", ".", "expand_dims", "(", "weights", ",", "3", ")", ",", "tf", ".", "expand_dims", "(", "self", ".", "mem_vals", ",", "axis", "=", "1", ")", ")", ",", "axis", "=", "2", ")", "return", "access_logits", ",", "retrieved_mem" ]
Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth].
[ "Read", "from", "the", "memory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L251-L270
22,684
tensorflow/tensor2tensor
tensor2tensor/layers/transformer_memory.py
TransformerMemory.write
def write(self, x, access_logits): """Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op. """ gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name="candidate_value") erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name="erase") write_weights = tf.nn.softmax(write_logits) erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1)) addition = tf.multiply( tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2)) update_value_op = self.mem_vals.assign( tf.reduce_mean(erase + addition, axis=1)) with tf.control_dependencies([update_value_op]): write_op = self.mean_logits.assign( self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) return write_op
python
def write(self, x, access_logits): """Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op. """ gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name="candidate_value") erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name="erase") write_weights = tf.nn.softmax(write_logits) erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1)) addition = tf.multiply( tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2)) update_value_op = self.mem_vals.assign( tf.reduce_mean(erase + addition, axis=1)) with tf.control_dependencies([update_value_op]): write_op = self.mean_logits.assign( self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) return write_op
[ "def", "write", "(", "self", ",", "x", ",", "access_logits", ")", ":", "gamma", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "1", ",", "activation", "=", "tf", ".", "sigmoid", ",", "name", "=", "\"gamma\"", ")", "write_logits", "=", "access_logits", "-", "gamma", "*", "tf", ".", "expand_dims", "(", "self", ".", "mean_logits", ",", "1", ")", "candidate_value", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "self", ".", "val_depth", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ",", "name", "=", "\"candidate_value\"", ")", "erase_gates", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "self", ".", "memory_size", ",", "activation", "=", "tf", ".", "nn", ".", "sigmoid", ",", "name", "=", "\"erase\"", ")", "write_weights", "=", "tf", ".", "nn", ".", "softmax", "(", "write_logits", ")", "erase_weights", "=", "tf", ".", "expand_dims", "(", "1", "-", "erase_gates", "*", "write_weights", ",", "3", ")", "erase", "=", "tf", ".", "multiply", "(", "erase_weights", ",", "tf", ".", "expand_dims", "(", "self", ".", "mem_vals", ",", "1", ")", ")", "addition", "=", "tf", ".", "multiply", "(", "tf", ".", "expand_dims", "(", "write_weights", ",", "3", ")", ",", "tf", ".", "expand_dims", "(", "candidate_value", ",", "2", ")", ")", "update_value_op", "=", "self", ".", "mem_vals", ".", "assign", "(", "tf", ".", "reduce_mean", "(", "erase", "+", "addition", ",", "axis", "=", "1", ")", ")", "with", "tf", ".", "control_dependencies", "(", "[", "update_value_op", "]", ")", ":", "write_op", "=", "self", ".", "mean_logits", ".", "assign", "(", "self", ".", "mean_logits", "*", "0.1", "+", "tf", ".", "reduce_mean", "(", "write_logits", "*", "0.9", ",", "axis", "=", "1", ")", ")", "return", "write_op" ]
Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op.
[ "Write", "to", "the", "memory", "based", "on", "a", "combination", "of", "similarity", "and", "least", "used", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L272-L303
22,685
tensorflow/tensor2tensor
tensor2tensor/layers/transformer_memory.py
TransformerMemory.reset
def reset(self, entries_to_reset): """Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op. """ num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
python
def reset(self, entries_to_reset): """Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op. """ num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
[ "def", "reset", "(", "self", ",", "entries_to_reset", ")", ":", "num_updates", "=", "tf", ".", "size", "(", "entries_to_reset", ")", "update_vals", "=", "tf", ".", "scatter_update", "(", "self", ".", "mem_vals", ",", "entries_to_reset", ",", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "tf", ".", "fill", "(", "[", "self", ".", "memory_size", ",", "self", ".", "val_depth", "]", ",", ".0", ")", ",", "0", ")", ",", "[", "num_updates", ",", "1", ",", "1", "]", ")", ")", "update_logits", "=", "tf", ".", "scatter_update", "(", "self", ".", "mean_logits", ",", "entries_to_reset", ",", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "tf", ".", "fill", "(", "[", "self", ".", "memory_size", "]", ",", ".0", ")", ",", "0", ")", ",", "[", "num_updates", ",", "1", "]", ")", ")", "reset_op", "=", "tf", ".", "group", "(", "[", "update_vals", ",", "update_logits", "]", ")", "return", "reset_op" ]
Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op.
[ "Reset", "the", "entries", "in", "the", "memory", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L317-L337
22,686
tensorflow/tensor2tensor
tensor2tensor/rl/ppo_learner.py
_define_train
def _define_train( train_env, ppo_hparams, eval_env_fn=None, sampling_temp=1.0, **collect_kwargs ): """Define the training setup.""" memory, collect_summary, train_initialization = ( _define_collect( train_env, ppo_hparams, "ppo_train", eval_phase=False, sampling_temp=sampling_temp, **collect_kwargs)) ppo_summary = ppo.define_ppo_epoch( memory, ppo_hparams, train_env.action_space, train_env.batch_size) train_summary = tf.summary.merge([collect_summary, ppo_summary]) if ppo_hparams.eval_every_epochs: # TODO(koz4k): Do we need this at all? assert eval_env_fn is not None eval_env = eval_env_fn(in_graph=True) (_, eval_collect_summary, eval_initialization) = ( _define_collect( eval_env, ppo_hparams, "ppo_eval", eval_phase=True, sampling_temp=0.0, **collect_kwargs)) return (train_summary, eval_collect_summary, (train_initialization, eval_initialization)) else: return (train_summary, None, (train_initialization,))
python
def _define_train( train_env, ppo_hparams, eval_env_fn=None, sampling_temp=1.0, **collect_kwargs ): """Define the training setup.""" memory, collect_summary, train_initialization = ( _define_collect( train_env, ppo_hparams, "ppo_train", eval_phase=False, sampling_temp=sampling_temp, **collect_kwargs)) ppo_summary = ppo.define_ppo_epoch( memory, ppo_hparams, train_env.action_space, train_env.batch_size) train_summary = tf.summary.merge([collect_summary, ppo_summary]) if ppo_hparams.eval_every_epochs: # TODO(koz4k): Do we need this at all? assert eval_env_fn is not None eval_env = eval_env_fn(in_graph=True) (_, eval_collect_summary, eval_initialization) = ( _define_collect( eval_env, ppo_hparams, "ppo_eval", eval_phase=True, sampling_temp=0.0, **collect_kwargs)) return (train_summary, eval_collect_summary, (train_initialization, eval_initialization)) else: return (train_summary, None, (train_initialization,))
[ "def", "_define_train", "(", "train_env", ",", "ppo_hparams", ",", "eval_env_fn", "=", "None", ",", "sampling_temp", "=", "1.0", ",", "*", "*", "collect_kwargs", ")", ":", "memory", ",", "collect_summary", ",", "train_initialization", "=", "(", "_define_collect", "(", "train_env", ",", "ppo_hparams", ",", "\"ppo_train\"", ",", "eval_phase", "=", "False", ",", "sampling_temp", "=", "sampling_temp", ",", "*", "*", "collect_kwargs", ")", ")", "ppo_summary", "=", "ppo", ".", "define_ppo_epoch", "(", "memory", ",", "ppo_hparams", ",", "train_env", ".", "action_space", ",", "train_env", ".", "batch_size", ")", "train_summary", "=", "tf", ".", "summary", ".", "merge", "(", "[", "collect_summary", ",", "ppo_summary", "]", ")", "if", "ppo_hparams", ".", "eval_every_epochs", ":", "# TODO(koz4k): Do we need this at all?", "assert", "eval_env_fn", "is", "not", "None", "eval_env", "=", "eval_env_fn", "(", "in_graph", "=", "True", ")", "(", "_", ",", "eval_collect_summary", ",", "eval_initialization", ")", "=", "(", "_define_collect", "(", "eval_env", ",", "ppo_hparams", ",", "\"ppo_eval\"", ",", "eval_phase", "=", "True", ",", "sampling_temp", "=", "0.0", ",", "*", "*", "collect_kwargs", ")", ")", "return", "(", "train_summary", ",", "eval_collect_summary", ",", "(", "train_initialization", ",", "eval_initialization", ")", ")", "else", ":", "return", "(", "train_summary", ",", "None", ",", "(", "train_initialization", ",", ")", ")" ]
Define the training setup.
[ "Define", "the", "training", "setup", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo_learner.py#L151-L186
22,687
tensorflow/tensor2tensor
tensor2tensor/rl/ppo_learner.py
_rollout_metadata
def _rollout_metadata(batch_env): """Metadata for rollouts.""" batch_env_shape = batch_env.observ.get_shape().as_list() batch_size = [batch_env_shape[0]] shapes_types_names = [ # TODO(piotrmilos): possibly retrieve the observation type for batch_env (batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"), (batch_size, tf.float32, "reward"), (batch_size, tf.bool, "done"), (batch_size + list(batch_env.action_shape), batch_env.action_dtype, "action"), (batch_size, tf.float32, "pdf"), (batch_size, tf.float32, "value_function"), ] return shapes_types_names
python
def _rollout_metadata(batch_env): """Metadata for rollouts.""" batch_env_shape = batch_env.observ.get_shape().as_list() batch_size = [batch_env_shape[0]] shapes_types_names = [ # TODO(piotrmilos): possibly retrieve the observation type for batch_env (batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"), (batch_size, tf.float32, "reward"), (batch_size, tf.bool, "done"), (batch_size + list(batch_env.action_shape), batch_env.action_dtype, "action"), (batch_size, tf.float32, "pdf"), (batch_size, tf.float32, "value_function"), ] return shapes_types_names
[ "def", "_rollout_metadata", "(", "batch_env", ")", ":", "batch_env_shape", "=", "batch_env", ".", "observ", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "batch_size", "=", "[", "batch_env_shape", "[", "0", "]", "]", "shapes_types_names", "=", "[", "# TODO(piotrmilos): possibly retrieve the observation type for batch_env", "(", "batch_size", "+", "batch_env_shape", "[", "1", ":", "]", ",", "batch_env", ".", "observ_dtype", ",", "\"observation\"", ")", ",", "(", "batch_size", ",", "tf", ".", "float32", ",", "\"reward\"", ")", ",", "(", "batch_size", ",", "tf", ".", "bool", ",", "\"done\"", ")", ",", "(", "batch_size", "+", "list", "(", "batch_env", ".", "action_shape", ")", ",", "batch_env", ".", "action_dtype", ",", "\"action\"", ")", ",", "(", "batch_size", ",", "tf", ".", "float32", ",", "\"pdf\"", ")", ",", "(", "batch_size", ",", "tf", ".", "float32", ",", "\"value_function\"", ")", ",", "]", "return", "shapes_types_names" ]
Metadata for rollouts.
[ "Metadata", "for", "rollouts", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo_learner.py#L254-L268
22,688
tensorflow/tensor2tensor
tensor2tensor/models/vanilla_gan.py
sliced_gan
def sliced_gan(): """Basic parameters for a vanilla_gan.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 128 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 1e-6 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.bottleneck_bits = 128 hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 4096) return hparams
python
def sliced_gan(): """Basic parameters for a vanilla_gan.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 128 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 1e-6 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.bottleneck_bits = 128 hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 4096) return hparams
[ "def", "sliced_gan", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "optimizer", "=", "\"adam\"", "hparams", ".", "learning_rate_constant", "=", "0.0002", "hparams", ".", "learning_rate_warmup_steps", "=", "500", "hparams", ".", "learning_rate_schedule", "=", "\"constant * linear_warmup\"", "hparams", ".", "label_smoothing", "=", "0.0", "hparams", ".", "batch_size", "=", "128", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "initializer", "=", "\"uniform_unit_scaling\"", "hparams", ".", "initializer_gain", "=", "1.0", "hparams", ".", "weight_decay", "=", "1e-6", "hparams", ".", "kernel_height", "=", "4", "hparams", ".", "kernel_width", "=", "4", "hparams", ".", "bottleneck_bits", "=", "128", "hparams", ".", "add_hparam", "(", "\"discriminator_batchnorm\"", ",", "True", ")", "hparams", ".", "add_hparam", "(", "\"num_sliced_vecs\"", ",", "4096", ")", "return", "hparams" ]
Basic parameters for a vanilla_gan.
[ "Basic", "parameters", "for", "a", "vanilla_gan", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L199-L217
22,689
tensorflow/tensor2tensor
tensor2tensor/models/vanilla_gan.py
AbstractGAN.body
def body(self, features): """Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss). """ features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN # Input images. inputs = tf.to_float(features["targets_raw"]) # Noise vector. z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") # Generator output: fake images. out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) # pylint: disable=not-callable summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: # Returns an dummy output and the losses dictionary. return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
python
def body(self, features): """Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss). """ features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN # Input images. inputs = tf.to_float(features["targets_raw"]) # Noise vector. z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") # Generator output: fake images. out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) # pylint: disable=not-callable summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: # Returns an dummy output and the losses dictionary. return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
[ "def", "body", "(", "self", ",", "features", ")", ":", "features", "[", "\"targets\"", "]", "=", "features", "[", "\"inputs\"", "]", "is_training", "=", "self", ".", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "# Input images.", "inputs", "=", "tf", ".", "to_float", "(", "features", "[", "\"targets_raw\"", "]", ")", "# Noise vector.", "z", "=", "tf", ".", "random_uniform", "(", "[", "self", ".", "hparams", ".", "batch_size", ",", "self", ".", "hparams", ".", "bottleneck_bits", "]", ",", "minval", "=", "-", "1", ",", "maxval", "=", "1", ",", "name", "=", "\"z\"", ")", "# Generator output: fake images.", "out_shape", "=", "common_layers", ".", "shape_list", "(", "inputs", ")", "[", "1", ":", "4", "]", "g", "=", "self", ".", "generator", "(", "z", ",", "is_training", ",", "out_shape", ")", "losses", "=", "self", ".", "losses", "(", "inputs", ",", "g", ")", "# pylint: disable=not-callable", "summary_g_image", "=", "tf", ".", "reshape", "(", "g", "[", "0", ",", ":", "]", ",", "[", "1", "]", "+", "common_layers", ".", "shape_list", "(", "inputs", ")", "[", "1", ":", "]", ")", "tf", ".", "summary", ".", "image", "(", "\"generated\"", ",", "summary_g_image", ",", "max_outputs", "=", "1", ")", "if", "is_training", ":", "# Returns an dummy output and the losses dictionary.", "return", "tf", ".", "zeros_like", "(", "inputs", ")", ",", "losses", "return", "tf", ".", "reshape", "(", "g", ",", "tf", ".", "shape", "(", "inputs", ")", ")", ",", "losses" ]
Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss).
[ "Body", "of", "the", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L127-L160
22,690
tensorflow/tensor2tensor
tensor2tensor/trax/inputs.py
inputs
def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False): """Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs """ assert data_dir, "Must provide a data directory" data_dir = os.path.expanduser(data_dir) (train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches( dataset_name, data_dir, input_name, num_devices) def numpy_stream(dataset): return dataset_to_stream( dataset, input_name, num_chunks=num_chunks, append_targets=append_targets) if num_chunks > 0: length = input_shape[0] input_shape = tuple( [tuple([length // num_chunks] + list(input_shape)[1:])] * num_chunks) return Inputs(train_stream=lambda: numpy_stream(train_batches), train_eval_stream=lambda: numpy_stream(train_eval_batches), eval_stream=lambda: numpy_stream(eval_batches), input_shape=input_shape)
python
def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False): """Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs """ assert data_dir, "Must provide a data directory" data_dir = os.path.expanduser(data_dir) (train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches( dataset_name, data_dir, input_name, num_devices) def numpy_stream(dataset): return dataset_to_stream( dataset, input_name, num_chunks=num_chunks, append_targets=append_targets) if num_chunks > 0: length = input_shape[0] input_shape = tuple( [tuple([length // num_chunks] + list(input_shape)[1:])] * num_chunks) return Inputs(train_stream=lambda: numpy_stream(train_batches), train_eval_stream=lambda: numpy_stream(train_eval_batches), eval_stream=lambda: numpy_stream(eval_batches), input_shape=input_shape)
[ "def", "inputs", "(", "num_devices", ",", "dataset_name", ",", "data_dir", "=", "None", ",", "input_name", "=", "None", ",", "num_chunks", "=", "0", ",", "append_targets", "=", "False", ")", ":", "assert", "data_dir", ",", "\"Must provide a data directory\"", "data_dir", "=", "os", ".", "path", ".", "expanduser", "(", "data_dir", ")", "(", "train_batches", ",", "train_eval_batches", ",", "eval_batches", ",", "input_name", ",", "input_shape", ")", "=", "_train_and_eval_batches", "(", "dataset_name", ",", "data_dir", ",", "input_name", ",", "num_devices", ")", "def", "numpy_stream", "(", "dataset", ")", ":", "return", "dataset_to_stream", "(", "dataset", ",", "input_name", ",", "num_chunks", "=", "num_chunks", ",", "append_targets", "=", "append_targets", ")", "if", "num_chunks", ">", "0", ":", "length", "=", "input_shape", "[", "0", "]", "input_shape", "=", "tuple", "(", "[", "tuple", "(", "[", "length", "//", "num_chunks", "]", "+", "list", "(", "input_shape", ")", "[", "1", ":", "]", ")", "]", "*", "num_chunks", ")", "return", "Inputs", "(", "train_stream", "=", "lambda", ":", "numpy_stream", "(", "train_batches", ")", ",", "train_eval_stream", "=", "lambda", ":", "numpy_stream", "(", "train_eval_batches", ")", ",", "eval_stream", "=", "lambda", ":", "numpy_stream", "(", "eval_batches", ")", ",", "input_shape", "=", "input_shape", ")" ]
Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs
[ "Make", "Inputs", "for", "built", "-", "in", "datasets", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L58-L95
22,691
tensorflow/tensor2tensor
tensor2tensor/trax/inputs.py
random_inputs
def random_inputs( num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)): """Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs """ if input_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of input_shape[%s]", num_devices, input_shape) if output_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of output_shape[%s]", num_devices, output_shape) def random_minibatches(): """Generate a stream of random mini-batches.""" if input_dtype in [np.float16, np.float32, np.float64]: rand = np.random.uniform else: rand = np.random.random_integers while True: inp = rand(input_range[0], input_range[1], input_shape) inp = inp.astype(input_dtype) out = rand(output_range[0], output_range[1], output_shape) out = out.astype(output_dtype) yield inp, out input_shape_without_batch = list(input_shape)[1:] return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)
python
def random_inputs( num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)): """Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs """ if input_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of input_shape[%s]", num_devices, input_shape) if output_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of output_shape[%s]", num_devices, output_shape) def random_minibatches(): """Generate a stream of random mini-batches.""" if input_dtype in [np.float16, np.float32, np.float64]: rand = np.random.uniform else: rand = np.random.random_integers while True: inp = rand(input_range[0], input_range[1], input_shape) inp = inp.astype(input_dtype) out = rand(output_range[0], output_range[1], output_shape) out = out.astype(output_dtype) yield inp, out input_shape_without_batch = list(input_shape)[1:] return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)
[ "def", "random_inputs", "(", "num_devices", ",", "input_shape", "=", "gin", ".", "REQUIRED", ",", "input_dtype", "=", "np", ".", "int32", ",", "input_range", "=", "(", "0", ",", "255", ")", ",", "output_shape", "=", "gin", ".", "REQUIRED", ",", "output_dtype", "=", "np", ".", "int32", ",", "output_range", "=", "(", "0", ",", "9", ")", ")", ":", "if", "input_shape", "[", "0", "]", "%", "num_devices", "!=", "0", ":", "tf", ".", "logging", ".", "fatal", "(", "\"num_devices[%d] should divide the first dimension of input_shape[%s]\"", ",", "num_devices", ",", "input_shape", ")", "if", "output_shape", "[", "0", "]", "%", "num_devices", "!=", "0", ":", "tf", ".", "logging", ".", "fatal", "(", "\"num_devices[%d] should divide the first dimension of output_shape[%s]\"", ",", "num_devices", ",", "output_shape", ")", "def", "random_minibatches", "(", ")", ":", "\"\"\"Generate a stream of random mini-batches.\"\"\"", "if", "input_dtype", "in", "[", "np", ".", "float16", ",", "np", ".", "float32", ",", "np", ".", "float64", "]", ":", "rand", "=", "np", ".", "random", ".", "uniform", "else", ":", "rand", "=", "np", ".", "random", ".", "random_integers", "while", "True", ":", "inp", "=", "rand", "(", "input_range", "[", "0", "]", ",", "input_range", "[", "1", "]", ",", "input_shape", ")", "inp", "=", "inp", ".", "astype", "(", "input_dtype", ")", "out", "=", "rand", "(", "output_range", "[", "0", "]", ",", "output_range", "[", "1", "]", ",", "output_shape", ")", "out", "=", "out", ".", "astype", "(", "output_dtype", ")", "yield", "inp", ",", "out", "input_shape_without_batch", "=", "list", "(", "input_shape", ")", "[", "1", ":", "]", "return", "Inputs", "(", "train_stream", "=", "random_minibatches", ",", "train_eval_stream", "=", "random_minibatches", ",", "eval_stream", "=", "random_minibatches", ",", "input_shape", "=", "input_shape_without_batch", ")" ]
Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs
[ "Make", "random", "Inputs", "for", "debugging", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L99-L143
22,692
tensorflow/tensor2tensor
tensor2tensor/trax/inputs.py
dataset_to_stream
def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False): """Takes a tf.Dataset and creates a numpy stream of ready batches.""" for example in tfds.as_numpy(dataset): inp, out = example[0][input_name], example[1] if len(out.shape) > 1 and out.shape[-1] == 1: out = np.squeeze(out, axis=-1) if num_chunks > 0: inp = np.split(inp, num_chunks, axis=1) out = np.split(out, num_chunks, axis=1) if append_targets: inp = (inp, out) yield inp, out
python
def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False): """Takes a tf.Dataset and creates a numpy stream of ready batches.""" for example in tfds.as_numpy(dataset): inp, out = example[0][input_name], example[1] if len(out.shape) > 1 and out.shape[-1] == 1: out = np.squeeze(out, axis=-1) if num_chunks > 0: inp = np.split(inp, num_chunks, axis=1) out = np.split(out, num_chunks, axis=1) if append_targets: inp = (inp, out) yield inp, out
[ "def", "dataset_to_stream", "(", "dataset", ",", "input_name", ",", "num_chunks", "=", "0", ",", "append_targets", "=", "False", ")", ":", "for", "example", "in", "tfds", ".", "as_numpy", "(", "dataset", ")", ":", "inp", ",", "out", "=", "example", "[", "0", "]", "[", "input_name", "]", ",", "example", "[", "1", "]", "if", "len", "(", "out", ".", "shape", ")", ">", "1", "and", "out", ".", "shape", "[", "-", "1", "]", "==", "1", ":", "out", "=", "np", ".", "squeeze", "(", "out", ",", "axis", "=", "-", "1", ")", "if", "num_chunks", ">", "0", ":", "inp", "=", "np", ".", "split", "(", "inp", ",", "num_chunks", ",", "axis", "=", "1", ")", "out", "=", "np", ".", "split", "(", "out", ",", "num_chunks", ",", "axis", "=", "1", ")", "if", "append_targets", ":", "inp", "=", "(", "inp", ",", "out", ")", "yield", "inp", ",", "out" ]
Takes a tf.Dataset and creates a numpy stream of ready batches.
[ "Takes", "a", "tf", ".", "Dataset", "and", "creates", "a", "numpy", "stream", "of", "ready", "batches", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L146-L157
22,693
tensorflow/tensor2tensor
tensor2tensor/trax/inputs.py
_train_and_eval_batches
def _train_and_eval_batches(dataset, data_dir, input_name, num_devices): """Return train and eval batches with input name and shape.""" (train_data, eval_data, features_info, keys) = train_and_eval_dataset( dataset, data_dir) input_names, target_names = keys[0], keys[1] train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True, num_devices=num_devices) train_eval_batches = shuffle_and_batch_data( # Data for eval-on-train. train_data, target_names, features_info, training=False, num_devices=num_devices) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False, num_devices=num_devices) input_name = input_name or input_names[0] input_shape = features_info[input_name].shape return (train_batches, train_eval_batches, eval_batches, input_name, list(input_shape))
python
def _train_and_eval_batches(dataset, data_dir, input_name, num_devices): """Return train and eval batches with input name and shape.""" (train_data, eval_data, features_info, keys) = train_and_eval_dataset( dataset, data_dir) input_names, target_names = keys[0], keys[1] train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True, num_devices=num_devices) train_eval_batches = shuffle_and_batch_data( # Data for eval-on-train. train_data, target_names, features_info, training=False, num_devices=num_devices) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False, num_devices=num_devices) input_name = input_name or input_names[0] input_shape = features_info[input_name].shape return (train_batches, train_eval_batches, eval_batches, input_name, list(input_shape))
[ "def", "_train_and_eval_batches", "(", "dataset", ",", "data_dir", ",", "input_name", ",", "num_devices", ")", ":", "(", "train_data", ",", "eval_data", ",", "features_info", ",", "keys", ")", "=", "train_and_eval_dataset", "(", "dataset", ",", "data_dir", ")", "input_names", ",", "target_names", "=", "keys", "[", "0", "]", ",", "keys", "[", "1", "]", "train_batches", "=", "shuffle_and_batch_data", "(", "train_data", ",", "target_names", ",", "features_info", ",", "training", "=", "True", ",", "num_devices", "=", "num_devices", ")", "train_eval_batches", "=", "shuffle_and_batch_data", "(", "# Data for eval-on-train.", "train_data", ",", "target_names", ",", "features_info", ",", "training", "=", "False", ",", "num_devices", "=", "num_devices", ")", "eval_batches", "=", "shuffle_and_batch_data", "(", "eval_data", ",", "target_names", ",", "features_info", ",", "training", "=", "False", ",", "num_devices", "=", "num_devices", ")", "input_name", "=", "input_name", "or", "input_names", "[", "0", "]", "input_shape", "=", "features_info", "[", "input_name", "]", ".", "shape", "return", "(", "train_batches", ",", "train_eval_batches", ",", "eval_batches", ",", "input_name", ",", "list", "(", "input_shape", ")", ")" ]
Return train and eval batches with input name and shape.
[ "Return", "train", "and", "eval", "batches", "with", "input", "name", "and", "shape", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L388-L405
22,694
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
get_multi_dataset
def get_multi_dataset(datasets, pmf=None): """Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator. """ pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = lambda _: categorical_case(pmf, samplers) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
python
def get_multi_dataset(datasets, pmf=None): """Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator. """ pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = lambda _: categorical_case(pmf, samplers) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
[ "def", "get_multi_dataset", "(", "datasets", ",", "pmf", "=", "None", ")", ":", "pmf", "=", "tf", ".", "fill", "(", "[", "len", "(", "datasets", ")", "]", ",", "1.0", "/", "len", "(", "datasets", ")", ")", "if", "pmf", "is", "None", "else", "pmf", "samplers", "=", "[", "d", ".", "repeat", "(", ")", ".", "make_one_shot_iterator", "(", ")", ".", "get_next", "for", "d", "in", "datasets", "]", "sample", "=", "lambda", "_", ":", "categorical_case", "(", "pmf", ",", "samplers", ")", "return", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", "(", "[", "]", ")", ".", "repeat", "(", ")", ".", "map", "(", "sample", ")" ]
Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator.
[ "Returns", "a", "Dataset", "that", "samples", "records", "from", "one", "or", "more", "Datasets", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L205-L223
22,695
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
get_schedule_distribution
def get_schedule_distribution(schedule, global_step=None): """Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step. """ interpolation, steps, pmfs = schedule if len(pmfs) == 1: # py_func doesn't seem to work on TPU - at least get the constant case to # run. # TODO(noam): get the general case working. return pmfs[0] if global_step is None: global_step = tf.train.get_or_create_global_step() if interpolation == 'step': interpolation_fn = step_interpolation elif interpolation == 'linear': interpolation_fn = linear_interpolation else: raise ValueError('Invalid interpolation strategy: %s' % interpolation) return tf.reshape( tf.py_func( func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
python
def get_schedule_distribution(schedule, global_step=None): """Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step. """ interpolation, steps, pmfs = schedule if len(pmfs) == 1: # py_func doesn't seem to work on TPU - at least get the constant case to # run. # TODO(noam): get the general case working. return pmfs[0] if global_step is None: global_step = tf.train.get_or_create_global_step() if interpolation == 'step': interpolation_fn = step_interpolation elif interpolation == 'linear': interpolation_fn = linear_interpolation else: raise ValueError('Invalid interpolation strategy: %s' % interpolation) return tf.reshape( tf.py_func( func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
[ "def", "get_schedule_distribution", "(", "schedule", ",", "global_step", "=", "None", ")", ":", "interpolation", ",", "steps", ",", "pmfs", "=", "schedule", "if", "len", "(", "pmfs", ")", "==", "1", ":", "# py_func doesn't seem to work on TPU - at least get the constant case to", "# run.", "# TODO(noam): get the general case working.", "return", "pmfs", "[", "0", "]", "if", "global_step", "is", "None", ":", "global_step", "=", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", "if", "interpolation", "==", "'step'", ":", "interpolation_fn", "=", "step_interpolation", "elif", "interpolation", "==", "'linear'", ":", "interpolation_fn", "=", "linear_interpolation", "else", ":", "raise", "ValueError", "(", "'Invalid interpolation strategy: %s'", "%", "interpolation", ")", "return", "tf", ".", "reshape", "(", "tf", ".", "py_func", "(", "func", "=", "lambda", "x", ":", "interpolation_fn", "(", "x", ",", "np", ".", "array", "(", "steps", ")", ",", "np", ".", "array", "(", "pmfs", ")", ")", ",", "inp", "=", "[", "global_step", "]", ",", "Tout", "=", "tf", ".", "float32", ")", ",", "[", "len", "(", "pmfs", "[", "0", "]", ")", "]", ")" ]
Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step.
[ "Computes", "the", "pmf", "of", "a", "schedule", "given", "the", "global_step", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L226-L253
22,696
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
linear_interpolation
def linear_interpolation(x, xp, fp, **kwargs): """Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values. """ yp = fp.reshape([fp.shape[0], -1]).transpose() y = np.stack([np.interp(x, xp, zp, **kwargs) for zp in yp]).transpose() return y.reshape(x.shape[:1] + fp.shape[1:]).astype(np.float32)
python
def linear_interpolation(x, xp, fp, **kwargs): """Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values. """ yp = fp.reshape([fp.shape[0], -1]).transpose() y = np.stack([np.interp(x, xp, zp, **kwargs) for zp in yp]).transpose() return y.reshape(x.shape[:1] + fp.shape[1:]).astype(np.float32)
[ "def", "linear_interpolation", "(", "x", ",", "xp", ",", "fp", ",", "*", "*", "kwargs", ")", ":", "yp", "=", "fp", ".", "reshape", "(", "[", "fp", ".", "shape", "[", "0", "]", ",", "-", "1", "]", ")", ".", "transpose", "(", ")", "y", "=", "np", ".", "stack", "(", "[", "np", ".", "interp", "(", "x", ",", "xp", ",", "zp", ",", "*", "*", "kwargs", ")", "for", "zp", "in", "yp", "]", ")", ".", "transpose", "(", ")", "return", "y", ".", "reshape", "(", "x", ".", "shape", "[", ":", "1", "]", "+", "fp", ".", "shape", "[", "1", ":", "]", ")", ".", "astype", "(", "np", ".", "float32", ")" ]
Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values.
[ "Multi", "-", "dimensional", "linear", "interpolation", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L274-L294
22,697
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
step_interpolation
def step_interpolation(x, xp, fp, **kwargs): """Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values. """ del kwargs # Unused. xp = np.expand_dims(xp, -1) lower, upper = xp[:-1], xp[1:] conditions = (x >= lower) & (x < upper) # Underflow and overflow conditions and values. Values default to fp[0] and # fp[-1] respectively. conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]]) values = np.concatenate([[fp[0]], fp]) assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.' indices = np.argmax(conditions, 0) return values[indices].astype(np.float32)
python
def step_interpolation(x, xp, fp, **kwargs): """Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values. """ del kwargs # Unused. xp = np.expand_dims(xp, -1) lower, upper = xp[:-1], xp[1:] conditions = (x >= lower) & (x < upper) # Underflow and overflow conditions and values. Values default to fp[0] and # fp[-1] respectively. conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]]) values = np.concatenate([[fp[0]], fp]) assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.' indices = np.argmax(conditions, 0) return values[indices].astype(np.float32)
[ "def", "step_interpolation", "(", "x", ",", "xp", ",", "fp", ",", "*", "*", "kwargs", ")", ":", "del", "kwargs", "# Unused.", "xp", "=", "np", ".", "expand_dims", "(", "xp", ",", "-", "1", ")", "lower", ",", "upper", "=", "xp", "[", ":", "-", "1", "]", ",", "xp", "[", "1", ":", "]", "conditions", "=", "(", "x", ">=", "lower", ")", "&", "(", "x", "<", "upper", ")", "# Underflow and overflow conditions and values. Values default to fp[0] and", "# fp[-1] respectively.", "conditions", "=", "np", ".", "concatenate", "(", "[", "[", "x", "<", "xp", "[", "0", "]", "]", ",", "conditions", ",", "[", "x", ">=", "xp", "[", "-", "1", "]", "]", "]", ")", "values", "=", "np", ".", "concatenate", "(", "[", "[", "fp", "[", "0", "]", "]", ",", "fp", "]", ")", "assert", "np", ".", "all", "(", "np", ".", "sum", "(", "conditions", ",", "0", ")", "==", "1", ")", ",", "'xp must be increasing.'", "indices", "=", "np", ".", "argmax", "(", "conditions", ",", "0", ")", "return", "values", "[", "indices", "]", ".", "astype", "(", "np", ".", "float32", ")" ]
Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values.
[ "Multi", "-", "dimensional", "step", "interpolation", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L297-L325
22,698
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
epoch_rates_to_pmf
def epoch_rates_to_pmf(problems, epoch_rates=None): """Create a probability-mass-function based on relative epoch rates. if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems) i.e. it takes each problem the same time to go through one epoch. If epoch_rates is given, then these are the relative numbers of epochs of each problem to go through in a given amount of time. Each must have problem.num_training_examples implemented. Args: problems: a list of Problem instances. epoch_rates: an optional list of float Returns: a list of floating point values. """ if epoch_rates is None: epoch_rates = [1.0] * len(problems) example_rates = [epoch_rate * p.num_training_examples for p, epoch_rate in zip(problems, epoch_rates)] return example_rates_to_pmf(example_rates)
python
def epoch_rates_to_pmf(problems, epoch_rates=None): """Create a probability-mass-function based on relative epoch rates. if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems) i.e. it takes each problem the same time to go through one epoch. If epoch_rates is given, then these are the relative numbers of epochs of each problem to go through in a given amount of time. Each must have problem.num_training_examples implemented. Args: problems: a list of Problem instances. epoch_rates: an optional list of float Returns: a list of floating point values. """ if epoch_rates is None: epoch_rates = [1.0] * len(problems) example_rates = [epoch_rate * p.num_training_examples for p, epoch_rate in zip(problems, epoch_rates)] return example_rates_to_pmf(example_rates)
[ "def", "epoch_rates_to_pmf", "(", "problems", ",", "epoch_rates", "=", "None", ")", ":", "if", "epoch_rates", "is", "None", ":", "epoch_rates", "=", "[", "1.0", "]", "*", "len", "(", "problems", ")", "example_rates", "=", "[", "epoch_rate", "*", "p", ".", "num_training_examples", "for", "p", ",", "epoch_rate", "in", "zip", "(", "problems", ",", "epoch_rates", ")", "]", "return", "example_rates_to_pmf", "(", "example_rates", ")" ]
Create a probability-mass-function based on relative epoch rates. if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems) i.e. it takes each problem the same time to go through one epoch. If epoch_rates is given, then these are the relative numbers of epochs of each problem to go through in a given amount of time. Each must have problem.num_training_examples implemented. Args: problems: a list of Problem instances. epoch_rates: an optional list of float Returns: a list of floating point values.
[ "Create", "a", "probability", "-", "mass", "-", "function", "based", "on", "relative", "epoch", "rates", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L353-L375
22,699
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
encode_schedule
def encode_schedule(schedule): """Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple. """ interpolation, steps, pmfs = schedule return interpolation + ' ' + ' '.join( '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs))
python
def encode_schedule(schedule): """Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple. """ interpolation, steps, pmfs = schedule return interpolation + ' ' + ' '.join( '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs))
[ "def", "encode_schedule", "(", "schedule", ")", ":", "interpolation", ",", "steps", ",", "pmfs", "=", "schedule", "return", "interpolation", "+", "' '", "+", "' '", ".", "join", "(", "'@'", "+", "str", "(", "s", ")", "+", "' '", "+", "' '", ".", "join", "(", "map", "(", "str", ",", "p", ")", ")", "for", "s", ",", "p", "in", "zip", "(", "steps", ",", "pmfs", ")", ")" ]
Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple.
[ "Encodes", "a", "schedule", "tuple", "into", "a", "string", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L378-L394