id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
21,700
|
tensorflow/tensor2tensor
|
tensor2tensor/serving/export.py
|
_get_hparams_path
|
def _get_hparams_path():
"""Get hyper-parameters file path."""
hparams_path = None
if FLAGS.output_dir:
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
else:
tf.logging.warning(
"--output_dir not specified. Hyper-parameters will be infered from"
"--hparams_set and --hparams only. These may not match training time"
"hyper-parameters.")
return hparams_path
|
python
|
def _get_hparams_path():
"""Get hyper-parameters file path."""
hparams_path = None
if FLAGS.output_dir:
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
else:
tf.logging.warning(
"--output_dir not specified. Hyper-parameters will be infered from"
"--hparams_set and --hparams only. These may not match training time"
"hyper-parameters.")
return hparams_path
|
[
"def",
"_get_hparams_path",
"(",
")",
":",
"hparams_path",
"=",
"None",
"if",
"FLAGS",
".",
"output_dir",
":",
"hparams_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"output_dir",
",",
"\"hparams.json\"",
")",
"else",
":",
"tf",
".",
"logging",
".",
"warning",
"(",
"\"--output_dir not specified. Hyper-parameters will be infered from\"",
"\"--hparams_set and --hparams only. These may not match training time\"",
"\"hyper-parameters.\"",
")",
"return",
"hparams_path"
] |
Get hyper-parameters file path.
|
[
"Get",
"hyper",
"-",
"parameters",
"file",
"path",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/export.py#L47-L57
|
21,701
|
tensorflow/tensor2tensor
|
tensor2tensor/serving/export.py
|
export_module_spec_with_checkpoint
|
def export_module_spec_with_checkpoint(module_spec,
checkpoint_path,
export_path,
scope_prefix=""):
"""Exports given checkpoint as tfhub module with given spec."""
# The main requirement is that it is possible to know how to map from
# module variable name to checkpoint variable name.
# This is trivial if the original code used variable scopes,
# but can be messy if the variables to export are interwined
# with variables not export.
with tf.Graph().as_default():
m = hub.Module(module_spec)
assign_map = {
scope_prefix + name: value for name, value in m.variable_map.items()
}
tf.train.init_from_checkpoint(checkpoint_path, assign_map)
init_op = tf.initializers.global_variables()
with tf.Session() as session:
session.run(init_op)
m.export(export_path, session)
|
python
|
def export_module_spec_with_checkpoint(module_spec,
checkpoint_path,
export_path,
scope_prefix=""):
"""Exports given checkpoint as tfhub module with given spec."""
# The main requirement is that it is possible to know how to map from
# module variable name to checkpoint variable name.
# This is trivial if the original code used variable scopes,
# but can be messy if the variables to export are interwined
# with variables not export.
with tf.Graph().as_default():
m = hub.Module(module_spec)
assign_map = {
scope_prefix + name: value for name, value in m.variable_map.items()
}
tf.train.init_from_checkpoint(checkpoint_path, assign_map)
init_op = tf.initializers.global_variables()
with tf.Session() as session:
session.run(init_op)
m.export(export_path, session)
|
[
"def",
"export_module_spec_with_checkpoint",
"(",
"module_spec",
",",
"checkpoint_path",
",",
"export_path",
",",
"scope_prefix",
"=",
"\"\"",
")",
":",
"# The main requirement is that it is possible to know how to map from",
"# module variable name to checkpoint variable name.",
"# This is trivial if the original code used variable scopes,",
"# but can be messy if the variables to export are interwined",
"# with variables not export.",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"m",
"=",
"hub",
".",
"Module",
"(",
"module_spec",
")",
"assign_map",
"=",
"{",
"scope_prefix",
"+",
"name",
":",
"value",
"for",
"name",
",",
"value",
"in",
"m",
".",
"variable_map",
".",
"items",
"(",
")",
"}",
"tf",
".",
"train",
".",
"init_from_checkpoint",
"(",
"checkpoint_path",
",",
"assign_map",
")",
"init_op",
"=",
"tf",
".",
"initializers",
".",
"global_variables",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"session",
":",
"session",
".",
"run",
"(",
"init_op",
")",
"m",
".",
"export",
"(",
"export_path",
",",
"session",
")"
] |
Exports given checkpoint as tfhub module with given spec.
|
[
"Exports",
"given",
"checkpoint",
"as",
"tfhub",
"module",
"with",
"given",
"spec",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/export.py#L80-L100
|
21,702
|
tensorflow/tensor2tensor
|
tensor2tensor/serving/export.py
|
export_as_tfhub_module
|
def export_as_tfhub_module(model_name,
hparams,
decode_hparams,
problem,
checkpoint_path,
export_dir):
"""Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
"""
def hub_module_fn():
"""Creates the TF graph for the hub module."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name,
hparams,
decode_hparams=decode_hparams,
use_tpu=FLAGS.use_tpu)
features = problem.serving_input_fn(
hparams, decode_hparams, use_tpu=FLAGS.use_tpu).features
# we must do a copy of the features, as the model_fn can add additional
# entries there (like hyperparameter settings etc).
original_features = features.copy()
spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)
hub.add_signature(
inputs=original_features,
outputs=spec.export_outputs["serving_default"].outputs)
# TFHub doesn't support the following collections.
drop_collections = [tf.GraphKeys.LOSSES,
tf.GraphKeys.SUMMARIES, tf.GraphKeys.LOCAL_VARIABLES]
module_spec = hub.create_module_spec(
hub_module_fn, drop_collections=drop_collections)
# Loads the weights from the checkpoint using the model above
# and saves it in the export_path.
export_module_spec_with_checkpoint(
module_spec,
checkpoint_path=checkpoint_path,
export_path=export_dir,
scope_prefix="")
|
python
|
def export_as_tfhub_module(model_name,
hparams,
decode_hparams,
problem,
checkpoint_path,
export_dir):
"""Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
"""
def hub_module_fn():
"""Creates the TF graph for the hub module."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name,
hparams,
decode_hparams=decode_hparams,
use_tpu=FLAGS.use_tpu)
features = problem.serving_input_fn(
hparams, decode_hparams, use_tpu=FLAGS.use_tpu).features
# we must do a copy of the features, as the model_fn can add additional
# entries there (like hyperparameter settings etc).
original_features = features.copy()
spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)
hub.add_signature(
inputs=original_features,
outputs=spec.export_outputs["serving_default"].outputs)
# TFHub doesn't support the following collections.
drop_collections = [tf.GraphKeys.LOSSES,
tf.GraphKeys.SUMMARIES, tf.GraphKeys.LOCAL_VARIABLES]
module_spec = hub.create_module_spec(
hub_module_fn, drop_collections=drop_collections)
# Loads the weights from the checkpoint using the model above
# and saves it in the export_path.
export_module_spec_with_checkpoint(
module_spec,
checkpoint_path=checkpoint_path,
export_path=export_dir,
scope_prefix="")
|
[
"def",
"export_as_tfhub_module",
"(",
"model_name",
",",
"hparams",
",",
"decode_hparams",
",",
"problem",
",",
"checkpoint_path",
",",
"export_dir",
")",
":",
"def",
"hub_module_fn",
"(",
")",
":",
"\"\"\"Creates the TF graph for the hub module.\"\"\"",
"model_fn",
"=",
"t2t_model",
".",
"T2TModel",
".",
"make_estimator_model_fn",
"(",
"model_name",
",",
"hparams",
",",
"decode_hparams",
"=",
"decode_hparams",
",",
"use_tpu",
"=",
"FLAGS",
".",
"use_tpu",
")",
"features",
"=",
"problem",
".",
"serving_input_fn",
"(",
"hparams",
",",
"decode_hparams",
",",
"use_tpu",
"=",
"FLAGS",
".",
"use_tpu",
")",
".",
"features",
"# we must do a copy of the features, as the model_fn can add additional",
"# entries there (like hyperparameter settings etc).",
"original_features",
"=",
"features",
".",
"copy",
"(",
")",
"spec",
"=",
"model_fn",
"(",
"features",
",",
"labels",
"=",
"None",
",",
"mode",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
")",
"hub",
".",
"add_signature",
"(",
"inputs",
"=",
"original_features",
",",
"outputs",
"=",
"spec",
".",
"export_outputs",
"[",
"\"serving_default\"",
"]",
".",
"outputs",
")",
"# TFHub doesn't support the following collections.",
"drop_collections",
"=",
"[",
"tf",
".",
"GraphKeys",
".",
"LOSSES",
",",
"tf",
".",
"GraphKeys",
".",
"SUMMARIES",
",",
"tf",
".",
"GraphKeys",
".",
"LOCAL_VARIABLES",
"]",
"module_spec",
"=",
"hub",
".",
"create_module_spec",
"(",
"hub_module_fn",
",",
"drop_collections",
"=",
"drop_collections",
")",
"# Loads the weights from the checkpoint using the model above",
"# and saves it in the export_path.",
"export_module_spec_with_checkpoint",
"(",
"module_spec",
",",
"checkpoint_path",
"=",
"checkpoint_path",
",",
"export_path",
"=",
"export_dir",
",",
"scope_prefix",
"=",
"\"\"",
")"
] |
Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
|
[
"Exports",
"the",
"last",
"checkpoint",
"from",
"the",
"directory",
"as",
"tfhub",
"module",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/export.py#L103-L154
|
21,703
|
tensorflow/tensor2tensor
|
tensor2tensor/visualization/visualization.py
|
build_model
|
def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1):
"""Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
"""
hparams = trainer_lib.create_hparams(
hparams_set, data_dir=data_dir, problem_name=problem_name)
translate_model = registry.model(model_name)(
hparams, tf.estimator.ModeKeys.EVAL)
inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="inputs")
targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="targets")
translate_model({
"inputs": inputs,
"targets": targets,
})
# Must be called after building the training graph, so that the dict will
# have been filled with the attention tensors. BUT before creating the
# inference graph otherwise the dict will be filled with tensors from
# inside a tf.while_loop from decoding and are marked unfetchable.
att_mats = get_att_mats(translate_model)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
samples = translate_model.infer({
"inputs": inputs,
}, beam_size=beam_size)["outputs"]
return inputs, targets, samples, att_mats
|
python
|
def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1):
"""Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
"""
hparams = trainer_lib.create_hparams(
hparams_set, data_dir=data_dir, problem_name=problem_name)
translate_model = registry.model(model_name)(
hparams, tf.estimator.ModeKeys.EVAL)
inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="inputs")
targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="targets")
translate_model({
"inputs": inputs,
"targets": targets,
})
# Must be called after building the training graph, so that the dict will
# have been filled with the attention tensors. BUT before creating the
# inference graph otherwise the dict will be filled with tensors from
# inside a tf.while_loop from decoding and are marked unfetchable.
att_mats = get_att_mats(translate_model)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
samples = translate_model.infer({
"inputs": inputs,
}, beam_size=beam_size)["outputs"]
return inputs, targets, samples, att_mats
|
[
"def",
"build_model",
"(",
"hparams_set",
",",
"model_name",
",",
"data_dir",
",",
"problem_name",
",",
"beam_size",
"=",
"1",
")",
":",
"hparams",
"=",
"trainer_lib",
".",
"create_hparams",
"(",
"hparams_set",
",",
"data_dir",
"=",
"data_dir",
",",
"problem_name",
"=",
"problem_name",
")",
"translate_model",
"=",
"registry",
".",
"model",
"(",
"model_name",
")",
"(",
"hparams",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
")",
"inputs",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"shape",
"=",
"(",
"1",
",",
"None",
",",
"1",
",",
"1",
")",
",",
"name",
"=",
"\"inputs\"",
")",
"targets",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"shape",
"=",
"(",
"1",
",",
"None",
",",
"1",
",",
"1",
")",
",",
"name",
"=",
"\"targets\"",
")",
"translate_model",
"(",
"{",
"\"inputs\"",
":",
"inputs",
",",
"\"targets\"",
":",
"targets",
",",
"}",
")",
"# Must be called after building the training graph, so that the dict will",
"# have been filled with the attention tensors. BUT before creating the",
"# inference graph otherwise the dict will be filled with tensors from",
"# inside a tf.while_loop from decoding and are marked unfetchable.",
"att_mats",
"=",
"get_att_mats",
"(",
"translate_model",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"tf",
".",
"get_variable_scope",
"(",
")",
",",
"reuse",
"=",
"True",
")",
":",
"samples",
"=",
"translate_model",
".",
"infer",
"(",
"{",
"\"inputs\"",
":",
"inputs",
",",
"}",
",",
"beam_size",
"=",
"beam_size",
")",
"[",
"\"outputs\"",
"]",
"return",
"inputs",
",",
"targets",
",",
"samples",
",",
"att_mats"
] |
Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
|
[
"Build",
"the",
"graph",
"required",
"to",
"fetch",
"the",
"attention",
"weights",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L113-L156
|
21,704
|
tensorflow/tensor2tensor
|
tensor2tensor/visualization/visualization.py
|
get_att_mats
|
def get_att_mats(translate_model):
"""Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
enc_atts = []
dec_atts = []
encdec_atts = []
prefix = "transformer/body/"
postfix_self_attention = "/multihead_attention/dot_product_attention"
if translate_model.hparams.self_attention_type == "dot_product_relative":
postfix_self_attention = ("/multihead_attention/"
"dot_product_attention_relative")
postfix_encdec = "/multihead_attention/dot_product_attention"
for i in range(translate_model.hparams.num_hidden_layers):
enc_att = translate_model.attention_weights[
"%sencoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
dec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
encdec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)]
enc_atts.append(enc_att)
dec_atts.append(dec_att)
encdec_atts.append(encdec_att)
return enc_atts, dec_atts, encdec_atts
|
python
|
def get_att_mats(translate_model):
"""Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
enc_atts = []
dec_atts = []
encdec_atts = []
prefix = "transformer/body/"
postfix_self_attention = "/multihead_attention/dot_product_attention"
if translate_model.hparams.self_attention_type == "dot_product_relative":
postfix_self_attention = ("/multihead_attention/"
"dot_product_attention_relative")
postfix_encdec = "/multihead_attention/dot_product_attention"
for i in range(translate_model.hparams.num_hidden_layers):
enc_att = translate_model.attention_weights[
"%sencoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
dec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
encdec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)]
enc_atts.append(enc_att)
dec_atts.append(dec_att)
encdec_atts.append(encdec_att)
return enc_atts, dec_atts, encdec_atts
|
[
"def",
"get_att_mats",
"(",
"translate_model",
")",
":",
"enc_atts",
"=",
"[",
"]",
"dec_atts",
"=",
"[",
"]",
"encdec_atts",
"=",
"[",
"]",
"prefix",
"=",
"\"transformer/body/\"",
"postfix_self_attention",
"=",
"\"/multihead_attention/dot_product_attention\"",
"if",
"translate_model",
".",
"hparams",
".",
"self_attention_type",
"==",
"\"dot_product_relative\"",
":",
"postfix_self_attention",
"=",
"(",
"\"/multihead_attention/\"",
"\"dot_product_attention_relative\"",
")",
"postfix_encdec",
"=",
"\"/multihead_attention/dot_product_attention\"",
"for",
"i",
"in",
"range",
"(",
"translate_model",
".",
"hparams",
".",
"num_hidden_layers",
")",
":",
"enc_att",
"=",
"translate_model",
".",
"attention_weights",
"[",
"\"%sencoder/layer_%i/self_attention%s\"",
"%",
"(",
"prefix",
",",
"i",
",",
"postfix_self_attention",
")",
"]",
"dec_att",
"=",
"translate_model",
".",
"attention_weights",
"[",
"\"%sdecoder/layer_%i/self_attention%s\"",
"%",
"(",
"prefix",
",",
"i",
",",
"postfix_self_attention",
")",
"]",
"encdec_att",
"=",
"translate_model",
".",
"attention_weights",
"[",
"\"%sdecoder/layer_%i/encdec_attention%s\"",
"%",
"(",
"prefix",
",",
"i",
",",
"postfix_encdec",
")",
"]",
"enc_atts",
".",
"append",
"(",
"enc_att",
")",
"dec_atts",
".",
"append",
"(",
"dec_att",
")",
"encdec_atts",
".",
"append",
"(",
"encdec_att",
")",
"return",
"enc_atts",
",",
"dec_atts",
",",
"encdec_atts"
] |
Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
|
[
"Get",
"s",
"the",
"tensors",
"representing",
"the",
"attentions",
"from",
"a",
"build",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L159-L205
|
21,705
|
tensorflow/tensor2tensor
|
tensor2tensor/visualization/visualization.py
|
AttentionVisualizer.encode
|
def encode(self, input_str):
"""Input str to features dict, ready for inference."""
inputs = self.encoders["inputs"].encode(input_str) + [EOS_ID]
batch_inputs = np.reshape(inputs, [1, -1, 1, 1]) # Make it 3D.
return batch_inputs
|
python
|
def encode(self, input_str):
"""Input str to features dict, ready for inference."""
inputs = self.encoders["inputs"].encode(input_str) + [EOS_ID]
batch_inputs = np.reshape(inputs, [1, -1, 1, 1]) # Make it 3D.
return batch_inputs
|
[
"def",
"encode",
"(",
"self",
",",
"input_str",
")",
":",
"inputs",
"=",
"self",
".",
"encoders",
"[",
"\"inputs\"",
"]",
".",
"encode",
"(",
"input_str",
")",
"+",
"[",
"EOS_ID",
"]",
"batch_inputs",
"=",
"np",
".",
"reshape",
"(",
"inputs",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
",",
"1",
"]",
")",
"# Make it 3D.",
"return",
"batch_inputs"
] |
Input str to features dict, ready for inference.
|
[
"Input",
"str",
"to",
"features",
"dict",
"ready",
"for",
"inference",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L52-L56
|
21,706
|
tensorflow/tensor2tensor
|
tensor2tensor/visualization/visualization.py
|
AttentionVisualizer.decode
|
def decode(self, integers):
"""List of ints to str."""
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode(integers)
|
python
|
def decode(self, integers):
"""List of ints to str."""
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode(integers)
|
[
"def",
"decode",
"(",
"self",
",",
"integers",
")",
":",
"integers",
"=",
"list",
"(",
"np",
".",
"squeeze",
"(",
"integers",
")",
")",
"return",
"self",
".",
"encoders",
"[",
"\"inputs\"",
"]",
".",
"decode",
"(",
"integers",
")"
] |
List of ints to str.
|
[
"List",
"of",
"ints",
"to",
"str",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L58-L61
|
21,707
|
tensorflow/tensor2tensor
|
tensor2tensor/visualization/visualization.py
|
AttentionVisualizer.decode_list
|
def decode_list(self, integers):
"""List of ints to list of str."""
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode_list(integers)
|
python
|
def decode_list(self, integers):
"""List of ints to list of str."""
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode_list(integers)
|
[
"def",
"decode_list",
"(",
"self",
",",
"integers",
")",
":",
"integers",
"=",
"list",
"(",
"np",
".",
"squeeze",
"(",
"integers",
")",
")",
"return",
"self",
".",
"encoders",
"[",
"\"inputs\"",
"]",
".",
"decode_list",
"(",
"integers",
")"
] |
List of ints to list of str.
|
[
"List",
"of",
"ints",
"to",
"list",
"of",
"str",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L63-L66
|
21,708
|
tensorflow/tensor2tensor
|
tensor2tensor/visualization/visualization.py
|
AttentionVisualizer.get_vis_data_from_string
|
def get_vis_data_from_string(self, sess, input_string):
"""Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
encoded_inputs = self.encode(input_string)
# Run inference graph to get the translation.
out = sess.run(self.samples, {
self.inputs: encoded_inputs,
})
# Run the decoded translation through the training graph to get the
# attention tensors.
att_mats = sess.run(self.att_mats, {
self.inputs: encoded_inputs,
self.targets: np.reshape(out, [1, -1, 1, 1]),
})
output_string = self.decode(out)
input_list = self.decode_list(encoded_inputs)
output_list = self.decode_list(out)
return output_string, input_list, output_list, att_mats
|
python
|
def get_vis_data_from_string(self, sess, input_string):
"""Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
encoded_inputs = self.encode(input_string)
# Run inference graph to get the translation.
out = sess.run(self.samples, {
self.inputs: encoded_inputs,
})
# Run the decoded translation through the training graph to get the
# attention tensors.
att_mats = sess.run(self.att_mats, {
self.inputs: encoded_inputs,
self.targets: np.reshape(out, [1, -1, 1, 1]),
})
output_string = self.decode(out)
input_list = self.decode_list(encoded_inputs)
output_list = self.decode_list(out)
return output_string, input_list, output_list, att_mats
|
[
"def",
"get_vis_data_from_string",
"(",
"self",
",",
"sess",
",",
"input_string",
")",
":",
"encoded_inputs",
"=",
"self",
".",
"encode",
"(",
"input_string",
")",
"# Run inference graph to get the translation.",
"out",
"=",
"sess",
".",
"run",
"(",
"self",
".",
"samples",
",",
"{",
"self",
".",
"inputs",
":",
"encoded_inputs",
",",
"}",
")",
"# Run the decoded translation through the training graph to get the",
"# attention tensors.",
"att_mats",
"=",
"sess",
".",
"run",
"(",
"self",
".",
"att_mats",
",",
"{",
"self",
".",
"inputs",
":",
"encoded_inputs",
",",
"self",
".",
"targets",
":",
"np",
".",
"reshape",
"(",
"out",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
",",
"1",
"]",
")",
",",
"}",
")",
"output_string",
"=",
"self",
".",
"decode",
"(",
"out",
")",
"input_list",
"=",
"self",
".",
"decode_list",
"(",
"encoded_inputs",
")",
"output_list",
"=",
"self",
".",
"decode_list",
"(",
"out",
")",
"return",
"output_string",
",",
"input_list",
",",
"output_list",
",",
"att_mats"
] |
Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
|
[
"Constructs",
"the",
"data",
"needed",
"for",
"visualizing",
"attentions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L68-L110
|
21,709
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow.py
|
glow_hparams
|
def glow_hparams():
"""Glow Hparams."""
hparams = common_hparams.basic_params1()
hparams.clip_grad_norm = None
hparams.weight_decay = 0.0
hparams.learning_rate_constant = 3e-4
hparams.batch_size = 32
# can be prev_level, prev_step or normal.
# see: glow_ops.merge_level_and_latent_dist
hparams.add_hparam("level_scale", "prev_level")
hparams.add_hparam("n_levels", 3)
hparams.add_hparam("n_bits_x", 8)
hparams.add_hparam("depth", 32)
# Activation - Relu or Gatu
hparams.add_hparam("activation", "relu")
# Coupling layer, additive or affine.
hparams.add_hparam("coupling", "affine")
hparams.add_hparam("coupling_width", 512)
hparams.add_hparam("coupling_dropout", 0.0)
hparams.add_hparam("top_prior", "single_conv")
# init_batch_size denotes the number of examples used for data-dependent
# initialization. A higher init_batch_size is required for training
# stability especially when hparams.batch_size is low.
hparams.add_hparam("init_batch_size", 256)
hparams.add_hparam("temperature", 1.0)
return hparams
|
python
|
def glow_hparams():
"""Glow Hparams."""
hparams = common_hparams.basic_params1()
hparams.clip_grad_norm = None
hparams.weight_decay = 0.0
hparams.learning_rate_constant = 3e-4
hparams.batch_size = 32
# can be prev_level, prev_step or normal.
# see: glow_ops.merge_level_and_latent_dist
hparams.add_hparam("level_scale", "prev_level")
hparams.add_hparam("n_levels", 3)
hparams.add_hparam("n_bits_x", 8)
hparams.add_hparam("depth", 32)
# Activation - Relu or Gatu
hparams.add_hparam("activation", "relu")
# Coupling layer, additive or affine.
hparams.add_hparam("coupling", "affine")
hparams.add_hparam("coupling_width", 512)
hparams.add_hparam("coupling_dropout", 0.0)
hparams.add_hparam("top_prior", "single_conv")
# init_batch_size denotes the number of examples used for data-dependent
# initialization. A higher init_batch_size is required for training
# stability especially when hparams.batch_size is low.
hparams.add_hparam("init_batch_size", 256)
hparams.add_hparam("temperature", 1.0)
return hparams
|
[
"def",
"glow_hparams",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"clip_grad_norm",
"=",
"None",
"hparams",
".",
"weight_decay",
"=",
"0.0",
"hparams",
".",
"learning_rate_constant",
"=",
"3e-4",
"hparams",
".",
"batch_size",
"=",
"32",
"# can be prev_level, prev_step or normal.",
"# see: glow_ops.merge_level_and_latent_dist",
"hparams",
".",
"add_hparam",
"(",
"\"level_scale\"",
",",
"\"prev_level\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"n_levels\"",
",",
"3",
")",
"hparams",
".",
"add_hparam",
"(",
"\"n_bits_x\"",
",",
"8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"depth\"",
",",
"32",
")",
"# Activation - Relu or Gatu",
"hparams",
".",
"add_hparam",
"(",
"\"activation\"",
",",
"\"relu\"",
")",
"# Coupling layer, additive or affine.",
"hparams",
".",
"add_hparam",
"(",
"\"coupling\"",
",",
"\"affine\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"coupling_width\"",
",",
"512",
")",
"hparams",
".",
"add_hparam",
"(",
"\"coupling_dropout\"",
",",
"0.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"top_prior\"",
",",
"\"single_conv\"",
")",
"# init_batch_size denotes the number of examples used for data-dependent",
"# initialization. A higher init_batch_size is required for training",
"# stability especially when hparams.batch_size is low.",
"hparams",
".",
"add_hparam",
"(",
"\"init_batch_size\"",
",",
"256",
")",
"hparams",
".",
"add_hparam",
"(",
"\"temperature\"",
",",
"1.0",
")",
"return",
"hparams"
] |
Glow Hparams.
|
[
"Glow",
"Hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow.py#L39-L65
|
21,710
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/transformer_aux.py
|
shift_and_pad
|
def shift_and_pad(tensor, shift, axis=0):
"""Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
"""
shape = tensor.shape
rank = len(shape)
assert 0 <= abs(axis) < rank
length = int(shape[axis])
assert 0 <= abs(shift) < length
paddings = [(0, 0)] * rank
begin = [0] * rank
size = [-1] * rank
if shift > 0:
paddings[axis] = (shift, 0)
size[axis] = length - shift
elif shift < 0:
paddings[axis] = (0, -shift)
begin[axis] = -shift
ret = tf.pad(tf.slice(tensor, begin, size), paddings)
return ret
|
python
|
def shift_and_pad(tensor, shift, axis=0):
"""Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
"""
shape = tensor.shape
rank = len(shape)
assert 0 <= abs(axis) < rank
length = int(shape[axis])
assert 0 <= abs(shift) < length
paddings = [(0, 0)] * rank
begin = [0] * rank
size = [-1] * rank
if shift > 0:
paddings[axis] = (shift, 0)
size[axis] = length - shift
elif shift < 0:
paddings[axis] = (0, -shift)
begin[axis] = -shift
ret = tf.pad(tf.slice(tensor, begin, size), paddings)
return ret
|
[
"def",
"shift_and_pad",
"(",
"tensor",
",",
"shift",
",",
"axis",
"=",
"0",
")",
":",
"shape",
"=",
"tensor",
".",
"shape",
"rank",
"=",
"len",
"(",
"shape",
")",
"assert",
"0",
"<=",
"abs",
"(",
"axis",
")",
"<",
"rank",
"length",
"=",
"int",
"(",
"shape",
"[",
"axis",
"]",
")",
"assert",
"0",
"<=",
"abs",
"(",
"shift",
")",
"<",
"length",
"paddings",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"*",
"rank",
"begin",
"=",
"[",
"0",
"]",
"*",
"rank",
"size",
"=",
"[",
"-",
"1",
"]",
"*",
"rank",
"if",
"shift",
">",
"0",
":",
"paddings",
"[",
"axis",
"]",
"=",
"(",
"shift",
",",
"0",
")",
"size",
"[",
"axis",
"]",
"=",
"length",
"-",
"shift",
"elif",
"shift",
"<",
"0",
":",
"paddings",
"[",
"axis",
"]",
"=",
"(",
"0",
",",
"-",
"shift",
")",
"begin",
"[",
"axis",
"]",
"=",
"-",
"shift",
"ret",
"=",
"tf",
".",
"pad",
"(",
"tf",
".",
"slice",
"(",
"tensor",
",",
"begin",
",",
"size",
")",
",",
"paddings",
")",
"return",
"ret"
] |
Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
|
[
"Shifts",
"and",
"pads",
"with",
"zero",
"along",
"an",
"axis",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_aux.py#L29-L64
|
21,711
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/base.py
|
pixels_from_softmax
|
def pixels_from_softmax(frame_logits, pure_sampling=False,
temperature=1.0, gumbel_noise_factor=0.2):
"""Given frame_logits from a per-pixel softmax, generate colors."""
# If we're purely sampling, just sample each pixel.
if pure_sampling or temperature == 0.0:
return common_layers.sample_with_temperature(frame_logits, temperature)
# Gumbel-sample from the pixel sofmax and average by pixel values.
pixel_range = tf.to_float(tf.range(256))
for _ in range(len(frame_logits.get_shape().as_list()) - 1):
pixel_range = tf.expand_dims(pixel_range, axis=0)
frame_logits = tf.nn.log_softmax(frame_logits)
gumbel_samples = discretization.gumbel_sample(
common_layers.shape_list(frame_logits)) * gumbel_noise_factor
frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1)
result = tf.reduce_sum(frame * pixel_range, axis=-1)
# Round on the forward pass, not on the backward one.
return result + tf.stop_gradient(tf.round(result) - result)
|
python
|
def pixels_from_softmax(frame_logits, pure_sampling=False,
temperature=1.0, gumbel_noise_factor=0.2):
"""Given frame_logits from a per-pixel softmax, generate colors."""
# If we're purely sampling, just sample each pixel.
if pure_sampling or temperature == 0.0:
return common_layers.sample_with_temperature(frame_logits, temperature)
# Gumbel-sample from the pixel sofmax and average by pixel values.
pixel_range = tf.to_float(tf.range(256))
for _ in range(len(frame_logits.get_shape().as_list()) - 1):
pixel_range = tf.expand_dims(pixel_range, axis=0)
frame_logits = tf.nn.log_softmax(frame_logits)
gumbel_samples = discretization.gumbel_sample(
common_layers.shape_list(frame_logits)) * gumbel_noise_factor
frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1)
result = tf.reduce_sum(frame * pixel_range, axis=-1)
# Round on the forward pass, not on the backward one.
return result + tf.stop_gradient(tf.round(result) - result)
|
[
"def",
"pixels_from_softmax",
"(",
"frame_logits",
",",
"pure_sampling",
"=",
"False",
",",
"temperature",
"=",
"1.0",
",",
"gumbel_noise_factor",
"=",
"0.2",
")",
":",
"# If we're purely sampling, just sample each pixel.",
"if",
"pure_sampling",
"or",
"temperature",
"==",
"0.0",
":",
"return",
"common_layers",
".",
"sample_with_temperature",
"(",
"frame_logits",
",",
"temperature",
")",
"# Gumbel-sample from the pixel sofmax and average by pixel values.",
"pixel_range",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"range",
"(",
"256",
")",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"frame_logits",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
")",
"-",
"1",
")",
":",
"pixel_range",
"=",
"tf",
".",
"expand_dims",
"(",
"pixel_range",
",",
"axis",
"=",
"0",
")",
"frame_logits",
"=",
"tf",
".",
"nn",
".",
"log_softmax",
"(",
"frame_logits",
")",
"gumbel_samples",
"=",
"discretization",
".",
"gumbel_sample",
"(",
"common_layers",
".",
"shape_list",
"(",
"frame_logits",
")",
")",
"*",
"gumbel_noise_factor",
"frame",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"(",
"frame_logits",
"+",
"gumbel_samples",
")",
"/",
"temperature",
",",
"axis",
"=",
"-",
"1",
")",
"result",
"=",
"tf",
".",
"reduce_sum",
"(",
"frame",
"*",
"pixel_range",
",",
"axis",
"=",
"-",
"1",
")",
"# Round on the forward pass, not on the backward one.",
"return",
"result",
"+",
"tf",
".",
"stop_gradient",
"(",
"tf",
".",
"round",
"(",
"result",
")",
"-",
"result",
")"
] |
Given frame_logits from a per-pixel softmax, generate colors.
|
[
"Given",
"frame_logits",
"from",
"a",
"per",
"-",
"pixel",
"softmax",
"generate",
"colors",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base.py#L40-L59
|
21,712
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/base.py
|
next_frame_base
|
def next_frame_base():
"""Common HParams for next_frame models."""
hparams = common_hparams.basic_params1()
# Loss cutoff.
hparams.add_hparam("video_modality_loss_cutoff", 0.01)
# Additional resizing the frames before feeding them to model.
hparams.add_hparam("preprocess_resize_frames", None)
# How many data points to suffle. Ideally should be part of problem not model!
hparams.add_hparam("shuffle_buffer_size", 128)
# Tiny mode. For faster tests.
hparams.add_hparam("tiny_mode", False)
# In case a model supports smaller/faster version.
hparams.add_hparam("small_mode", False)
# In case a model has stochastic version.
hparams.add_hparam("stochastic_model", False)
# Internal loss for recurrent models.
hparams.add_hparam("internal_loss", True)
# choose from: concat, multiplicative, multi_additive
hparams.add_hparam("action_injection", "multi_additive")
# Scheduled sampling method. Choose between
# ground_truth_only, prediction_only, prob, count, prob_inverse_exp.
hparams.add_hparam("scheduled_sampling_mode", "prediction_only")
hparams.add_hparam("scheduled_sampling_decay_steps", 10000)
hparams.add_hparam("scheduled_sampling_max_prob", 1.0)
hparams.add_hparam("scheduled_sampling_k", 900.0)
return hparams
|
python
|
def next_frame_base():
"""Common HParams for next_frame models."""
hparams = common_hparams.basic_params1()
# Loss cutoff.
hparams.add_hparam("video_modality_loss_cutoff", 0.01)
# Additional resizing the frames before feeding them to model.
hparams.add_hparam("preprocess_resize_frames", None)
# How many data points to suffle. Ideally should be part of problem not model!
hparams.add_hparam("shuffle_buffer_size", 128)
# Tiny mode. For faster tests.
hparams.add_hparam("tiny_mode", False)
# In case a model supports smaller/faster version.
hparams.add_hparam("small_mode", False)
# In case a model has stochastic version.
hparams.add_hparam("stochastic_model", False)
# Internal loss for recurrent models.
hparams.add_hparam("internal_loss", True)
# choose from: concat, multiplicative, multi_additive
hparams.add_hparam("action_injection", "multi_additive")
# Scheduled sampling method. Choose between
# ground_truth_only, prediction_only, prob, count, prob_inverse_exp.
hparams.add_hparam("scheduled_sampling_mode", "prediction_only")
hparams.add_hparam("scheduled_sampling_decay_steps", 10000)
hparams.add_hparam("scheduled_sampling_max_prob", 1.0)
hparams.add_hparam("scheduled_sampling_k", 900.0)
return hparams
|
[
"def",
"next_frame_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"# Loss cutoff.",
"hparams",
".",
"add_hparam",
"(",
"\"video_modality_loss_cutoff\"",
",",
"0.01",
")",
"# Additional resizing the frames before feeding them to model.",
"hparams",
".",
"add_hparam",
"(",
"\"preprocess_resize_frames\"",
",",
"None",
")",
"# How many data points to suffle. Ideally should be part of problem not model!",
"hparams",
".",
"add_hparam",
"(",
"\"shuffle_buffer_size\"",
",",
"128",
")",
"# Tiny mode. For faster tests.",
"hparams",
".",
"add_hparam",
"(",
"\"tiny_mode\"",
",",
"False",
")",
"# In case a model supports smaller/faster version.",
"hparams",
".",
"add_hparam",
"(",
"\"small_mode\"",
",",
"False",
")",
"# In case a model has stochastic version.",
"hparams",
".",
"add_hparam",
"(",
"\"stochastic_model\"",
",",
"False",
")",
"# Internal loss for recurrent models.",
"hparams",
".",
"add_hparam",
"(",
"\"internal_loss\"",
",",
"True",
")",
"# choose from: concat, multiplicative, multi_additive",
"hparams",
".",
"add_hparam",
"(",
"\"action_injection\"",
",",
"\"multi_additive\"",
")",
"# Scheduled sampling method. Choose between",
"# ground_truth_only, prediction_only, prob, count, prob_inverse_exp.",
"hparams",
".",
"add_hparam",
"(",
"\"scheduled_sampling_mode\"",
",",
"\"prediction_only\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"scheduled_sampling_decay_steps\"",
",",
"10000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"scheduled_sampling_max_prob\"",
",",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"scheduled_sampling_k\"",
",",
"900.0",
")",
"return",
"hparams"
] |
Common HParams for next_frame models.
|
[
"Common",
"HParams",
"for",
"next_frame",
"models",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base.py#L679-L704
|
21,713
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/gym_utils.py
|
remove_time_limit_wrapper
|
def remove_time_limit_wrapper(env):
"""Removes top level TimeLimit Wrapper.
Removes TimeLimit Wrapper from top level if exists, throws error if any other
TimeLimit Wrapper is present in stack.
Args:
env: environment
Returns:
the env with removed time limit wrapper.
"""
if isinstance(env, gym.wrappers.TimeLimit):
env = env.env
env_ = env
while isinstance(env_, gym.Wrapper):
if isinstance(env_, gym.wrappers.TimeLimit):
raise ValueError("Can remove only top-level TimeLimit gym.Wrapper.")
env_ = env_.env
return env
|
python
|
def remove_time_limit_wrapper(env):
"""Removes top level TimeLimit Wrapper.
Removes TimeLimit Wrapper from top level if exists, throws error if any other
TimeLimit Wrapper is present in stack.
Args:
env: environment
Returns:
the env with removed time limit wrapper.
"""
if isinstance(env, gym.wrappers.TimeLimit):
env = env.env
env_ = env
while isinstance(env_, gym.Wrapper):
if isinstance(env_, gym.wrappers.TimeLimit):
raise ValueError("Can remove only top-level TimeLimit gym.Wrapper.")
env_ = env_.env
return env
|
[
"def",
"remove_time_limit_wrapper",
"(",
"env",
")",
":",
"if",
"isinstance",
"(",
"env",
",",
"gym",
".",
"wrappers",
".",
"TimeLimit",
")",
":",
"env",
"=",
"env",
".",
"env",
"env_",
"=",
"env",
"while",
"isinstance",
"(",
"env_",
",",
"gym",
".",
"Wrapper",
")",
":",
"if",
"isinstance",
"(",
"env_",
",",
"gym",
".",
"wrappers",
".",
"TimeLimit",
")",
":",
"raise",
"ValueError",
"(",
"\"Can remove only top-level TimeLimit gym.Wrapper.\"",
")",
"env_",
"=",
"env_",
".",
"env",
"return",
"env"
] |
Removes top level TimeLimit Wrapper.
Removes TimeLimit Wrapper from top level if exists, throws error if any other
TimeLimit Wrapper is present in stack.
Args:
env: environment
Returns:
the env with removed time limit wrapper.
|
[
"Removes",
"top",
"level",
"TimeLimit",
"Wrapper",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/gym_utils.py#L129-L148
|
21,714
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/gym_utils.py
|
gym_env_wrapper
|
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
rendered_env_resize_to, sticky_actions):
"""Wraps a gym environment. see make_gym_env for details."""
# rl_env_max_episode_steps is None or int.
assert ((not rl_env_max_episode_steps) or
isinstance(rl_env_max_episode_steps, int))
wrap_with_time_limit = ((not rl_env_max_episode_steps) or
rl_env_max_episode_steps >= 0)
if wrap_with_time_limit:
env = remove_time_limit_wrapper(env)
if sticky_actions:
env = StickyActionEnv(env)
if maxskip_env:
env = MaxAndSkipEnv(env) # pylint: disable=redefined-variable-type
if rendered_env:
env = RenderedEnv(env, resize_to=rendered_env_resize_to)
if wrap_with_time_limit:
env = gym.wrappers.TimeLimit(
env, max_episode_steps=rl_env_max_episode_steps)
return env
|
python
|
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
rendered_env_resize_to, sticky_actions):
"""Wraps a gym environment. see make_gym_env for details."""
# rl_env_max_episode_steps is None or int.
assert ((not rl_env_max_episode_steps) or
isinstance(rl_env_max_episode_steps, int))
wrap_with_time_limit = ((not rl_env_max_episode_steps) or
rl_env_max_episode_steps >= 0)
if wrap_with_time_limit:
env = remove_time_limit_wrapper(env)
if sticky_actions:
env = StickyActionEnv(env)
if maxskip_env:
env = MaxAndSkipEnv(env) # pylint: disable=redefined-variable-type
if rendered_env:
env = RenderedEnv(env, resize_to=rendered_env_resize_to)
if wrap_with_time_limit:
env = gym.wrappers.TimeLimit(
env, max_episode_steps=rl_env_max_episode_steps)
return env
|
[
"def",
"gym_env_wrapper",
"(",
"env",
",",
"rl_env_max_episode_steps",
",",
"maxskip_env",
",",
"rendered_env",
",",
"rendered_env_resize_to",
",",
"sticky_actions",
")",
":",
"# rl_env_max_episode_steps is None or int.",
"assert",
"(",
"(",
"not",
"rl_env_max_episode_steps",
")",
"or",
"isinstance",
"(",
"rl_env_max_episode_steps",
",",
"int",
")",
")",
"wrap_with_time_limit",
"=",
"(",
"(",
"not",
"rl_env_max_episode_steps",
")",
"or",
"rl_env_max_episode_steps",
">=",
"0",
")",
"if",
"wrap_with_time_limit",
":",
"env",
"=",
"remove_time_limit_wrapper",
"(",
"env",
")",
"if",
"sticky_actions",
":",
"env",
"=",
"StickyActionEnv",
"(",
"env",
")",
"if",
"maxskip_env",
":",
"env",
"=",
"MaxAndSkipEnv",
"(",
"env",
")",
"# pylint: disable=redefined-variable-type",
"if",
"rendered_env",
":",
"env",
"=",
"RenderedEnv",
"(",
"env",
",",
"resize_to",
"=",
"rendered_env_resize_to",
")",
"if",
"wrap_with_time_limit",
":",
"env",
"=",
"gym",
".",
"wrappers",
".",
"TimeLimit",
"(",
"env",
",",
"max_episode_steps",
"=",
"rl_env_max_episode_steps",
")",
"return",
"env"
] |
Wraps a gym environment. see make_gym_env for details.
|
[
"Wraps",
"a",
"gym",
"environment",
".",
"see",
"make_gym_env",
"for",
"details",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/gym_utils.py#L151-L176
|
21,715
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/gym_utils.py
|
make_gym_env
|
def make_gym_env(name,
rl_env_max_episode_steps=-1,
maxskip_env=False,
rendered_env=False,
rendered_env_resize_to=None,
sticky_actions=False):
"""Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
"""
env = gym.make(name)
return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env,
rendered_env, rendered_env_resize_to, sticky_actions)
|
python
|
def make_gym_env(name,
rl_env_max_episode_steps=-1,
maxskip_env=False,
rendered_env=False,
rendered_env_resize_to=None,
sticky_actions=False):
"""Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
"""
env = gym.make(name)
return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env,
rendered_env, rendered_env_resize_to, sticky_actions)
|
[
"def",
"make_gym_env",
"(",
"name",
",",
"rl_env_max_episode_steps",
"=",
"-",
"1",
",",
"maxskip_env",
"=",
"False",
",",
"rendered_env",
"=",
"False",
",",
"rendered_env_resize_to",
"=",
"None",
",",
"sticky_actions",
"=",
"False",
")",
":",
"env",
"=",
"gym",
".",
"make",
"(",
"name",
")",
"return",
"gym_env_wrapper",
"(",
"env",
",",
"rl_env_max_episode_steps",
",",
"maxskip_env",
",",
"rendered_env",
",",
"rendered_env_resize_to",
",",
"sticky_actions",
")"
] |
Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
|
[
"Create",
"a",
"gym",
"env",
"optionally",
"with",
"a",
"time",
"limit",
"and",
"maxskip",
"wrapper",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/gym_utils.py#L179-L206
|
21,716
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/gym_utils.py
|
register_gym_env
|
def register_gym_env(class_entry_point, version="v0", kwargs=None):
"""Registers the class in Gym and returns the registered name and the env."""
split_on_colon = class_entry_point.split(":")
assert len(split_on_colon) == 2
class_name = split_on_colon[1]
# We have to add the version to conform to gym's API.
env_name = "T2TEnv-{}-{}".format(class_name, version)
gym.envs.register(id=env_name, entry_point=class_entry_point, kwargs=kwargs)
tf.logging.info("Entry Point [%s] registered with id [%s]", class_entry_point,
env_name)
return env_name, gym.make(env_name)
|
python
|
def register_gym_env(class_entry_point, version="v0", kwargs=None):
"""Registers the class in Gym and returns the registered name and the env."""
split_on_colon = class_entry_point.split(":")
assert len(split_on_colon) == 2
class_name = split_on_colon[1]
# We have to add the version to conform to gym's API.
env_name = "T2TEnv-{}-{}".format(class_name, version)
gym.envs.register(id=env_name, entry_point=class_entry_point, kwargs=kwargs)
tf.logging.info("Entry Point [%s] registered with id [%s]", class_entry_point,
env_name)
return env_name, gym.make(env_name)
|
[
"def",
"register_gym_env",
"(",
"class_entry_point",
",",
"version",
"=",
"\"v0\"",
",",
"kwargs",
"=",
"None",
")",
":",
"split_on_colon",
"=",
"class_entry_point",
".",
"split",
"(",
"\":\"",
")",
"assert",
"len",
"(",
"split_on_colon",
")",
"==",
"2",
"class_name",
"=",
"split_on_colon",
"[",
"1",
"]",
"# We have to add the version to conform to gym's API.",
"env_name",
"=",
"\"T2TEnv-{}-{}\"",
".",
"format",
"(",
"class_name",
",",
"version",
")",
"gym",
".",
"envs",
".",
"register",
"(",
"id",
"=",
"env_name",
",",
"entry_point",
"=",
"class_entry_point",
",",
"kwargs",
"=",
"kwargs",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Entry Point [%s] registered with id [%s]\"",
",",
"class_entry_point",
",",
"env_name",
")",
"return",
"env_name",
",",
"gym",
".",
"make",
"(",
"env_name",
")"
] |
Registers the class in Gym and returns the registered name and the env.
|
[
"Registers",
"the",
"class",
"in",
"Gym",
"and",
"returns",
"the",
"registered",
"name",
"and",
"the",
"env",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/gym_utils.py#L209-L223
|
21,717
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/gym_utils.py
|
MaxAndSkipEnv.step
|
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame doesn't matter.
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
|
python
|
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame doesn't matter.
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
|
[
"def",
"step",
"(",
"self",
",",
"action",
")",
":",
"total_reward",
"=",
"0.0",
"done",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_skip",
")",
":",
"obs",
",",
"reward",
",",
"done",
",",
"info",
"=",
"self",
".",
"env",
".",
"step",
"(",
"action",
")",
"if",
"i",
"==",
"self",
".",
"_skip",
"-",
"2",
":",
"self",
".",
"_obs_buffer",
"[",
"0",
"]",
"=",
"obs",
"if",
"i",
"==",
"self",
".",
"_skip",
"-",
"1",
":",
"self",
".",
"_obs_buffer",
"[",
"1",
"]",
"=",
"obs",
"total_reward",
"+=",
"reward",
"if",
"done",
":",
"break",
"# Note that the observation on the done=True frame doesn't matter.",
"max_frame",
"=",
"self",
".",
"_obs_buffer",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"return",
"max_frame",
",",
"total_reward",
",",
"done",
",",
"info"
] |
Repeat action, sum reward, and max over last observations.
|
[
"Repeat",
"action",
"sum",
"reward",
"and",
"max",
"over",
"last",
"observations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/gym_utils.py#L62-L77
|
21,718
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/all_problems.py
|
_handle_errors
|
def _handle_errors(errors):
"""Log out and possibly reraise errors during import."""
if not errors:
return
log_all = True # pylint: disable=unused-variable
err_msg = "T2T: skipped importing {num_missing} data_generators modules."
print(err_msg.format(num_missing=len(errors)))
for module, err in errors:
err_str = str(err)
if not _is_import_err_msg(err_str, module):
print("From module %s" % module)
raise err
if log_all:
print("Did not import module: %s; Cause: %s" % (module, err_str))
|
python
|
def _handle_errors(errors):
"""Log out and possibly reraise errors during import."""
if not errors:
return
log_all = True # pylint: disable=unused-variable
err_msg = "T2T: skipped importing {num_missing} data_generators modules."
print(err_msg.format(num_missing=len(errors)))
for module, err in errors:
err_str = str(err)
if not _is_import_err_msg(err_str, module):
print("From module %s" % module)
raise err
if log_all:
print("Did not import module: %s; Cause: %s" % (module, err_str))
|
[
"def",
"_handle_errors",
"(",
"errors",
")",
":",
"if",
"not",
"errors",
":",
"return",
"log_all",
"=",
"True",
"# pylint: disable=unused-variable",
"err_msg",
"=",
"\"T2T: skipped importing {num_missing} data_generators modules.\"",
"print",
"(",
"err_msg",
".",
"format",
"(",
"num_missing",
"=",
"len",
"(",
"errors",
")",
")",
")",
"for",
"module",
",",
"err",
"in",
"errors",
":",
"err_str",
"=",
"str",
"(",
"err",
")",
"if",
"not",
"_is_import_err_msg",
"(",
"err_str",
",",
"module",
")",
":",
"print",
"(",
"\"From module %s\"",
"%",
"module",
")",
"raise",
"err",
"if",
"log_all",
":",
"print",
"(",
"\"Did not import module: %s; Cause: %s\"",
"%",
"(",
"module",
",",
"err_str",
")",
")"
] |
Log out and possibly reraise errors during import.
|
[
"Log",
"out",
"and",
"possibly",
"reraise",
"errors",
"during",
"import",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/all_problems.py#L113-L126
|
21,719
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/hparams_lib.py
|
create_hparams
|
def create_hparams(hparams_set,
hparams_overrides_str="",
data_dir=None,
problem_name=None,
hparams_path=None):
"""Create HParams with data_dir and problem hparams, if kwargs provided."""
hparams = registry.hparams(hparams_set)
if hparams_path and tf.gfile.Exists(hparams_path):
hparams = create_hparams_from_json(hparams_path, hparams)
if data_dir:
hparams.add_hparam("data_dir", data_dir)
if hparams_overrides_str:
tf.logging.info("Overriding hparams in %s with %s", hparams_set,
hparams_overrides_str)
hparams = hparams.parse(hparams_overrides_str)
if problem_name:
add_problem_hparams(hparams, problem_name)
return hparams
|
python
|
def create_hparams(hparams_set,
hparams_overrides_str="",
data_dir=None,
problem_name=None,
hparams_path=None):
"""Create HParams with data_dir and problem hparams, if kwargs provided."""
hparams = registry.hparams(hparams_set)
if hparams_path and tf.gfile.Exists(hparams_path):
hparams = create_hparams_from_json(hparams_path, hparams)
if data_dir:
hparams.add_hparam("data_dir", data_dir)
if hparams_overrides_str:
tf.logging.info("Overriding hparams in %s with %s", hparams_set,
hparams_overrides_str)
hparams = hparams.parse(hparams_overrides_str)
if problem_name:
add_problem_hparams(hparams, problem_name)
return hparams
|
[
"def",
"create_hparams",
"(",
"hparams_set",
",",
"hparams_overrides_str",
"=",
"\"\"",
",",
"data_dir",
"=",
"None",
",",
"problem_name",
"=",
"None",
",",
"hparams_path",
"=",
"None",
")",
":",
"hparams",
"=",
"registry",
".",
"hparams",
"(",
"hparams_set",
")",
"if",
"hparams_path",
"and",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"hparams_path",
")",
":",
"hparams",
"=",
"create_hparams_from_json",
"(",
"hparams_path",
",",
"hparams",
")",
"if",
"data_dir",
":",
"hparams",
".",
"add_hparam",
"(",
"\"data_dir\"",
",",
"data_dir",
")",
"if",
"hparams_overrides_str",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Overriding hparams in %s with %s\"",
",",
"hparams_set",
",",
"hparams_overrides_str",
")",
"hparams",
"=",
"hparams",
".",
"parse",
"(",
"hparams_overrides_str",
")",
"if",
"problem_name",
":",
"add_problem_hparams",
"(",
"hparams",
",",
"problem_name",
")",
"return",
"hparams"
] |
Create HParams with data_dir and problem hparams, if kwargs provided.
|
[
"Create",
"HParams",
"with",
"data_dir",
"and",
"problem",
"hparams",
"if",
"kwargs",
"provided",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparams_lib.py#L42-L59
|
21,720
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/hparams_lib.py
|
create_hparams_from_json
|
def create_hparams_from_json(json_path, hparams=None):
"""Loading hparams from json; can also start from hparams if specified."""
tf.logging.info("Loading hparams from existing json %s" % json_path)
with tf.gfile.Open(json_path, "r") as f:
hparams_values = json.load(f)
# Prevent certain keys from overwriting the passed-in hparams.
# TODO(trandustin): Remove this hack after registries are available to avoid
# saving them as functions.
hparams_values.pop("bottom", None)
hparams_values.pop("loss", None)
hparams_values.pop("name", None)
hparams_values.pop("top", None)
hparams_values.pop("weights_fn", None)
new_hparams = hparam.HParams(**hparams_values)
# Some keys are in new_hparams but not hparams, so we need to be more
# careful than simply using parse_json() from HParams
if hparams: # hparams specified, so update values from json
for key in sorted(new_hparams.values().keys()):
if hasattr(hparams, key): # Overlapped keys
value = getattr(hparams, key)
new_value = getattr(new_hparams, key)
if value != new_value: # Different values
tf.logging.info("Overwrite key %s: %s -> %s" % (
key, value, new_value))
setattr(hparams, key, new_value)
else:
hparams = new_hparams
return hparams
|
python
|
def create_hparams_from_json(json_path, hparams=None):
"""Loading hparams from json; can also start from hparams if specified."""
tf.logging.info("Loading hparams from existing json %s" % json_path)
with tf.gfile.Open(json_path, "r") as f:
hparams_values = json.load(f)
# Prevent certain keys from overwriting the passed-in hparams.
# TODO(trandustin): Remove this hack after registries are available to avoid
# saving them as functions.
hparams_values.pop("bottom", None)
hparams_values.pop("loss", None)
hparams_values.pop("name", None)
hparams_values.pop("top", None)
hparams_values.pop("weights_fn", None)
new_hparams = hparam.HParams(**hparams_values)
# Some keys are in new_hparams but not hparams, so we need to be more
# careful than simply using parse_json() from HParams
if hparams: # hparams specified, so update values from json
for key in sorted(new_hparams.values().keys()):
if hasattr(hparams, key): # Overlapped keys
value = getattr(hparams, key)
new_value = getattr(new_hparams, key)
if value != new_value: # Different values
tf.logging.info("Overwrite key %s: %s -> %s" % (
key, value, new_value))
setattr(hparams, key, new_value)
else:
hparams = new_hparams
return hparams
|
[
"def",
"create_hparams_from_json",
"(",
"json_path",
",",
"hparams",
"=",
"None",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Loading hparams from existing json %s\"",
"%",
"json_path",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"json_path",
",",
"\"r\"",
")",
"as",
"f",
":",
"hparams_values",
"=",
"json",
".",
"load",
"(",
"f",
")",
"# Prevent certain keys from overwriting the passed-in hparams.",
"# TODO(trandustin): Remove this hack after registries are available to avoid",
"# saving them as functions.",
"hparams_values",
".",
"pop",
"(",
"\"bottom\"",
",",
"None",
")",
"hparams_values",
".",
"pop",
"(",
"\"loss\"",
",",
"None",
")",
"hparams_values",
".",
"pop",
"(",
"\"name\"",
",",
"None",
")",
"hparams_values",
".",
"pop",
"(",
"\"top\"",
",",
"None",
")",
"hparams_values",
".",
"pop",
"(",
"\"weights_fn\"",
",",
"None",
")",
"new_hparams",
"=",
"hparam",
".",
"HParams",
"(",
"*",
"*",
"hparams_values",
")",
"# Some keys are in new_hparams but not hparams, so we need to be more",
"# careful than simply using parse_json() from HParams",
"if",
"hparams",
":",
"# hparams specified, so update values from json",
"for",
"key",
"in",
"sorted",
"(",
"new_hparams",
".",
"values",
"(",
")",
".",
"keys",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"hparams",
",",
"key",
")",
":",
"# Overlapped keys",
"value",
"=",
"getattr",
"(",
"hparams",
",",
"key",
")",
"new_value",
"=",
"getattr",
"(",
"new_hparams",
",",
"key",
")",
"if",
"value",
"!=",
"new_value",
":",
"# Different values",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Overwrite key %s: %s -> %s\"",
"%",
"(",
"key",
",",
"value",
",",
"new_value",
")",
")",
"setattr",
"(",
"hparams",
",",
"key",
",",
"new_value",
")",
"else",
":",
"hparams",
"=",
"new_hparams",
"return",
"hparams"
] |
Loading hparams from json; can also start from hparams if specified.
|
[
"Loading",
"hparams",
"from",
"json",
";",
"can",
"also",
"start",
"from",
"hparams",
"if",
"specified",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparams_lib.py#L62-L90
|
21,721
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/hparams_lib.py
|
add_problem_hparams
|
def add_problem_hparams(hparams, problem_name_or_instance):
"""Add problem hparams for the problems."""
if isinstance(problem_name_or_instance, problem_lib.Problem):
problem = problem_name_or_instance
else:
problem = registry.problem(problem_name_or_instance)
p_hparams = problem.get_hparams(hparams)
hparams.problem = problem
hparams.problem_hparams = p_hparams
|
python
|
def add_problem_hparams(hparams, problem_name_or_instance):
"""Add problem hparams for the problems."""
if isinstance(problem_name_or_instance, problem_lib.Problem):
problem = problem_name_or_instance
else:
problem = registry.problem(problem_name_or_instance)
p_hparams = problem.get_hparams(hparams)
hparams.problem = problem
hparams.problem_hparams = p_hparams
|
[
"def",
"add_problem_hparams",
"(",
"hparams",
",",
"problem_name_or_instance",
")",
":",
"if",
"isinstance",
"(",
"problem_name_or_instance",
",",
"problem_lib",
".",
"Problem",
")",
":",
"problem",
"=",
"problem_name_or_instance",
"else",
":",
"problem",
"=",
"registry",
".",
"problem",
"(",
"problem_name_or_instance",
")",
"p_hparams",
"=",
"problem",
".",
"get_hparams",
"(",
"hparams",
")",
"hparams",
".",
"problem",
"=",
"problem",
"hparams",
".",
"problem_hparams",
"=",
"p_hparams"
] |
Add problem hparams for the problems.
|
[
"Add",
"problem",
"hparams",
"for",
"the",
"problems",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparams_lib.py#L93-L101
|
21,722
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/subject_verb_agreement.py
|
load_examples
|
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
"""Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
"""
infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tf.logging.info('Loading examples')
all_examples = []
for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
if i % 100000 == 0:
tf.logging.info('%d examples have been loaded....' % i)
ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
all_examples.append(ex)
random.seed(1)
random.shuffle(all_examples)
n_train = int(len(all_examples) * prop_train)
n_val = n_train + int(len(all_examples) * prop_val)
train = all_examples[:n_train]
val = all_examples[n_train:n_val]
test = []
for e in all_examples[n_val:]:
if e['n_intervening'] == e['n_diff_intervening']:
test.append(e)
return all_examples, train, val, test
|
python
|
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
"""Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
"""
infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tf.logging.info('Loading examples')
all_examples = []
for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
if i % 100000 == 0:
tf.logging.info('%d examples have been loaded....' % i)
ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
all_examples.append(ex)
random.seed(1)
random.shuffle(all_examples)
n_train = int(len(all_examples) * prop_train)
n_val = n_train + int(len(all_examples) * prop_val)
train = all_examples[:n_train]
val = all_examples[n_train:n_val]
test = []
for e in all_examples[n_val:]:
if e['n_intervening'] == e['n_diff_intervening']:
test.append(e)
return all_examples, train, val, test
|
[
"def",
"load_examples",
"(",
"tmp_dir",
",",
"prop_train",
"=",
"0.09",
",",
"prop_val",
"=",
"0.01",
")",
":",
"infile",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"tmp_dir",
",",
"_TAR",
",",
"_URL",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"'Loading examples'",
")",
"all_examples",
"=",
"[",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"csv",
".",
"DictReader",
"(",
"gzip",
".",
"open",
"(",
"infile",
")",
",",
"delimiter",
"=",
"'\\t'",
")",
")",
":",
"if",
"i",
"%",
"100000",
"==",
"0",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"'%d examples have been loaded....'",
"%",
"i",
")",
"ex",
"=",
"{",
"x",
":",
"int",
"(",
"y",
")",
"if",
"y",
".",
"isdigit",
"(",
")",
"else",
"y",
"for",
"x",
",",
"y",
"in",
"d",
".",
"items",
"(",
")",
"}",
"all_examples",
".",
"append",
"(",
"ex",
")",
"random",
".",
"seed",
"(",
"1",
")",
"random",
".",
"shuffle",
"(",
"all_examples",
")",
"n_train",
"=",
"int",
"(",
"len",
"(",
"all_examples",
")",
"*",
"prop_train",
")",
"n_val",
"=",
"n_train",
"+",
"int",
"(",
"len",
"(",
"all_examples",
")",
"*",
"prop_val",
")",
"train",
"=",
"all_examples",
"[",
":",
"n_train",
"]",
"val",
"=",
"all_examples",
"[",
"n_train",
":",
"n_val",
"]",
"test",
"=",
"[",
"]",
"for",
"e",
"in",
"all_examples",
"[",
"n_val",
":",
"]",
":",
"if",
"e",
"[",
"'n_intervening'",
"]",
"==",
"e",
"[",
"'n_diff_intervening'",
"]",
":",
"test",
".",
"append",
"(",
"e",
")",
"return",
"all_examples",
",",
"train",
",",
"val",
",",
"test"
] |
Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
|
[
"Loads",
"exampls",
"from",
"the",
"tsv",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/subject_verb_agreement.py#L77-L111
|
21,723
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cifar.py
|
_get_cifar
|
def _get_cifar(directory, url):
"""Download and extract CIFAR to directory unless it is there."""
filename = os.path.basename(url)
path = generator_utils.maybe_download(directory, filename, url)
tarfile.open(path, "r:gz").extractall(directory)
|
python
|
def _get_cifar(directory, url):
"""Download and extract CIFAR to directory unless it is there."""
filename = os.path.basename(url)
path = generator_utils.maybe_download(directory, filename, url)
tarfile.open(path, "r:gz").extractall(directory)
|
[
"def",
"_get_cifar",
"(",
"directory",
",",
"url",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
"path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"directory",
",",
"filename",
",",
"url",
")",
"tarfile",
".",
"open",
"(",
"path",
",",
"\"r:gz\"",
")",
".",
"extractall",
"(",
"directory",
")"
] |
Download and extract CIFAR to directory unless it is there.
|
[
"Download",
"and",
"extract",
"CIFAR",
"to",
"directory",
"unless",
"it",
"is",
"there",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cifar.py#L55-L59
|
21,724
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cifar.py
|
cifar_generator
|
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):
"""Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
"""
if cifar_version == "cifar10":
url = _CIFAR10_URL
train_files = _CIFAR10_TRAIN_FILES
test_files = _CIFAR10_TEST_FILES
prefix = _CIFAR10_PREFIX
image_size = _CIFAR10_IMAGE_SIZE
label_key = "labels"
elif cifar_version == "cifar100" or cifar_version == "cifar20":
url = _CIFAR100_URL
train_files = _CIFAR100_TRAIN_FILES
test_files = _CIFAR100_TEST_FILES
prefix = _CIFAR100_PREFIX
image_size = _CIFAR100_IMAGE_SIZE
if cifar_version == "cifar100":
label_key = "fine_labels"
else:
label_key = "coarse_labels"
_get_cifar(tmp_dir, url)
data_files = train_files if training else test_files
all_images, all_labels = [], []
for filename in data_files:
path = os.path.join(tmp_dir, prefix, filename)
with tf.gfile.Open(path, "rb") as f:
if six.PY2:
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding="latin1")
images = data["data"]
num_images = images.shape[0]
images = images.reshape((num_images, 3, image_size, image_size))
all_images.extend([
np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)
])
labels = data[label_key]
all_labels.extend([labels[j] for j in range(num_images)])
return image_utils.image_generator(
all_images[start_from:start_from + how_many],
all_labels[start_from:start_from + how_many])
|
python
|
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):
"""Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
"""
if cifar_version == "cifar10":
url = _CIFAR10_URL
train_files = _CIFAR10_TRAIN_FILES
test_files = _CIFAR10_TEST_FILES
prefix = _CIFAR10_PREFIX
image_size = _CIFAR10_IMAGE_SIZE
label_key = "labels"
elif cifar_version == "cifar100" or cifar_version == "cifar20":
url = _CIFAR100_URL
train_files = _CIFAR100_TRAIN_FILES
test_files = _CIFAR100_TEST_FILES
prefix = _CIFAR100_PREFIX
image_size = _CIFAR100_IMAGE_SIZE
if cifar_version == "cifar100":
label_key = "fine_labels"
else:
label_key = "coarse_labels"
_get_cifar(tmp_dir, url)
data_files = train_files if training else test_files
all_images, all_labels = [], []
for filename in data_files:
path = os.path.join(tmp_dir, prefix, filename)
with tf.gfile.Open(path, "rb") as f:
if six.PY2:
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding="latin1")
images = data["data"]
num_images = images.shape[0]
images = images.reshape((num_images, 3, image_size, image_size))
all_images.extend([
np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)
])
labels = data[label_key]
all_labels.extend([labels[j] for j in range(num_images)])
return image_utils.image_generator(
all_images[start_from:start_from + how_many],
all_labels[start_from:start_from + how_many])
|
[
"def",
"cifar_generator",
"(",
"cifar_version",
",",
"tmp_dir",
",",
"training",
",",
"how_many",
",",
"start_from",
"=",
"0",
")",
":",
"if",
"cifar_version",
"==",
"\"cifar10\"",
":",
"url",
"=",
"_CIFAR10_URL",
"train_files",
"=",
"_CIFAR10_TRAIN_FILES",
"test_files",
"=",
"_CIFAR10_TEST_FILES",
"prefix",
"=",
"_CIFAR10_PREFIX",
"image_size",
"=",
"_CIFAR10_IMAGE_SIZE",
"label_key",
"=",
"\"labels\"",
"elif",
"cifar_version",
"==",
"\"cifar100\"",
"or",
"cifar_version",
"==",
"\"cifar20\"",
":",
"url",
"=",
"_CIFAR100_URL",
"train_files",
"=",
"_CIFAR100_TRAIN_FILES",
"test_files",
"=",
"_CIFAR100_TEST_FILES",
"prefix",
"=",
"_CIFAR100_PREFIX",
"image_size",
"=",
"_CIFAR100_IMAGE_SIZE",
"if",
"cifar_version",
"==",
"\"cifar100\"",
":",
"label_key",
"=",
"\"fine_labels\"",
"else",
":",
"label_key",
"=",
"\"coarse_labels\"",
"_get_cifar",
"(",
"tmp_dir",
",",
"url",
")",
"data_files",
"=",
"train_files",
"if",
"training",
"else",
"test_files",
"all_images",
",",
"all_labels",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"filename",
"in",
"data_files",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"prefix",
",",
"filename",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"if",
"six",
".",
"PY2",
":",
"data",
"=",
"cPickle",
".",
"load",
"(",
"f",
")",
"else",
":",
"data",
"=",
"cPickle",
".",
"load",
"(",
"f",
",",
"encoding",
"=",
"\"latin1\"",
")",
"images",
"=",
"data",
"[",
"\"data\"",
"]",
"num_images",
"=",
"images",
".",
"shape",
"[",
"0",
"]",
"images",
"=",
"images",
".",
"reshape",
"(",
"(",
"num_images",
",",
"3",
",",
"image_size",
",",
"image_size",
")",
")",
"all_images",
".",
"extend",
"(",
"[",
"np",
".",
"squeeze",
"(",
"images",
"[",
"j",
"]",
")",
".",
"transpose",
"(",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"for",
"j",
"in",
"range",
"(",
"num_images",
")",
"]",
")",
"labels",
"=",
"data",
"[",
"label_key",
"]",
"all_labels",
".",
"extend",
"(",
"[",
"labels",
"[",
"j",
"]",
"for",
"j",
"in",
"range",
"(",
"num_images",
")",
"]",
")",
"return",
"image_utils",
".",
"image_generator",
"(",
"all_images",
"[",
"start_from",
":",
"start_from",
"+",
"how_many",
"]",
",",
"all_labels",
"[",
"start_from",
":",
"start_from",
"+",
"how_many",
"]",
")"
] |
Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
|
[
"Image",
"generator",
"for",
"CIFAR",
"-",
"10",
"and",
"100",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cifar.py#L62-L113
|
21,725
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_ppo_base
|
def rlmb_ppo_base():
"""HParams for PPO base."""
hparams = _rlmb_base()
ppo_params = dict(
base_algo="ppo",
base_algo_params="ppo_original_params",
# Number of real environments to train on simultaneously.
real_batch_size=1,
# Number of simulated environments to train on simultaneously.
simulated_batch_size=16,
eval_batch_size=32,
# Unused; number of PPO epochs is calculated from the real frame limit.
real_ppo_epochs_num=0,
# Number of frames that can be taken from the simulated environment before
# it diverges, used for training the agent.
ppo_epochs_num=1000, # This should be enough to see something
# Should be equal to simulated_rollout_length.
# TODO(koz4k): Uncouple this by outputing done from SimulatedBatchEnv.
ppo_epoch_length=hparams.simulated_rollout_length,
# Do not eval since simulated batch env does not produce dones
ppo_eval_every_epochs=0,
ppo_learning_rate_constant=1e-4, # Will be changed, just so it exists.
# This needs to be divisible by real_ppo_effective_num_agents.
real_ppo_epoch_length=16 * 200,
real_ppo_learning_rate_constant=1e-4,
real_ppo_effective_num_agents=16,
real_ppo_eval_every_epochs=0,
simulation_flip_first_random_for_beginning=True,
)
update_hparams(hparams, ppo_params)
return hparams
|
python
|
def rlmb_ppo_base():
"""HParams for PPO base."""
hparams = _rlmb_base()
ppo_params = dict(
base_algo="ppo",
base_algo_params="ppo_original_params",
# Number of real environments to train on simultaneously.
real_batch_size=1,
# Number of simulated environments to train on simultaneously.
simulated_batch_size=16,
eval_batch_size=32,
# Unused; number of PPO epochs is calculated from the real frame limit.
real_ppo_epochs_num=0,
# Number of frames that can be taken from the simulated environment before
# it diverges, used for training the agent.
ppo_epochs_num=1000, # This should be enough to see something
# Should be equal to simulated_rollout_length.
# TODO(koz4k): Uncouple this by outputing done from SimulatedBatchEnv.
ppo_epoch_length=hparams.simulated_rollout_length,
# Do not eval since simulated batch env does not produce dones
ppo_eval_every_epochs=0,
ppo_learning_rate_constant=1e-4, # Will be changed, just so it exists.
# This needs to be divisible by real_ppo_effective_num_agents.
real_ppo_epoch_length=16 * 200,
real_ppo_learning_rate_constant=1e-4,
real_ppo_effective_num_agents=16,
real_ppo_eval_every_epochs=0,
simulation_flip_first_random_for_beginning=True,
)
update_hparams(hparams, ppo_params)
return hparams
|
[
"def",
"rlmb_ppo_base",
"(",
")",
":",
"hparams",
"=",
"_rlmb_base",
"(",
")",
"ppo_params",
"=",
"dict",
"(",
"base_algo",
"=",
"\"ppo\"",
",",
"base_algo_params",
"=",
"\"ppo_original_params\"",
",",
"# Number of real environments to train on simultaneously.",
"real_batch_size",
"=",
"1",
",",
"# Number of simulated environments to train on simultaneously.",
"simulated_batch_size",
"=",
"16",
",",
"eval_batch_size",
"=",
"32",
",",
"# Unused; number of PPO epochs is calculated from the real frame limit.",
"real_ppo_epochs_num",
"=",
"0",
",",
"# Number of frames that can be taken from the simulated environment before",
"# it diverges, used for training the agent.",
"ppo_epochs_num",
"=",
"1000",
",",
"# This should be enough to see something",
"# Should be equal to simulated_rollout_length.",
"# TODO(koz4k): Uncouple this by outputing done from SimulatedBatchEnv.",
"ppo_epoch_length",
"=",
"hparams",
".",
"simulated_rollout_length",
",",
"# Do not eval since simulated batch env does not produce dones",
"ppo_eval_every_epochs",
"=",
"0",
",",
"ppo_learning_rate_constant",
"=",
"1e-4",
",",
"# Will be changed, just so it exists.",
"# This needs to be divisible by real_ppo_effective_num_agents.",
"real_ppo_epoch_length",
"=",
"16",
"*",
"200",
",",
"real_ppo_learning_rate_constant",
"=",
"1e-4",
",",
"real_ppo_effective_num_agents",
"=",
"16",
",",
"real_ppo_eval_every_epochs",
"=",
"0",
",",
"simulation_flip_first_random_for_beginning",
"=",
"True",
",",
")",
"update_hparams",
"(",
"hparams",
",",
"ppo_params",
")",
"return",
"hparams"
] |
HParams for PPO base.
|
[
"HParams",
"for",
"PPO",
"base",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L138-L171
|
21,726
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_dqn_base
|
def rlmb_dqn_base():
"""rlmb_dqn_base params."""
hparams = _rlmb_base()
simulated_rollout_length = 10
dqn_params = dict(
base_algo="dqn",
base_algo_params="dqn_original_params",
real_batch_size=1,
simulated_batch_size=16,
dqn_agent_generates_trainable_dones=False,
eval_batch_size=1,
# Must be equal to dqn_time_limit for now
simulated_rollout_length=simulated_rollout_length,
dqn_time_limit=simulated_rollout_length,
simulation_flip_first_random_for_beginning=False,
dqn_eval_episodes_num=3,
# TODO(kc): only for model-free compatibility, remove this
epochs_num=-1,
)
update_hparams(hparams, dqn_params)
return hparams
|
python
|
def rlmb_dqn_base():
"""rlmb_dqn_base params."""
hparams = _rlmb_base()
simulated_rollout_length = 10
dqn_params = dict(
base_algo="dqn",
base_algo_params="dqn_original_params",
real_batch_size=1,
simulated_batch_size=16,
dqn_agent_generates_trainable_dones=False,
eval_batch_size=1,
# Must be equal to dqn_time_limit for now
simulated_rollout_length=simulated_rollout_length,
dqn_time_limit=simulated_rollout_length,
simulation_flip_first_random_for_beginning=False,
dqn_eval_episodes_num=3,
# TODO(kc): only for model-free compatibility, remove this
epochs_num=-1,
)
update_hparams(hparams, dqn_params)
return hparams
|
[
"def",
"rlmb_dqn_base",
"(",
")",
":",
"hparams",
"=",
"_rlmb_base",
"(",
")",
"simulated_rollout_length",
"=",
"10",
"dqn_params",
"=",
"dict",
"(",
"base_algo",
"=",
"\"dqn\"",
",",
"base_algo_params",
"=",
"\"dqn_original_params\"",
",",
"real_batch_size",
"=",
"1",
",",
"simulated_batch_size",
"=",
"16",
",",
"dqn_agent_generates_trainable_dones",
"=",
"False",
",",
"eval_batch_size",
"=",
"1",
",",
"# Must be equal to dqn_time_limit for now",
"simulated_rollout_length",
"=",
"simulated_rollout_length",
",",
"dqn_time_limit",
"=",
"simulated_rollout_length",
",",
"simulation_flip_first_random_for_beginning",
"=",
"False",
",",
"dqn_eval_episodes_num",
"=",
"3",
",",
"# TODO(kc): only for model-free compatibility, remove this",
"epochs_num",
"=",
"-",
"1",
",",
")",
"update_hparams",
"(",
"hparams",
",",
"dqn_params",
")",
"return",
"hparams"
] |
rlmb_dqn_base params.
|
[
"rlmb_dqn_base",
"params",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L189-L210
|
21,727
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_ppo_quick
|
def rlmb_ppo_quick():
"""Base setting but quicker with only 2 epochs."""
hparams = rlmb_ppo_base()
hparams.epochs = 2
hparams.model_train_steps = 25000
hparams.ppo_epochs_num = 700
hparams.ppo_epoch_length = 50
return hparams
|
python
|
def rlmb_ppo_quick():
"""Base setting but quicker with only 2 epochs."""
hparams = rlmb_ppo_base()
hparams.epochs = 2
hparams.model_train_steps = 25000
hparams.ppo_epochs_num = 700
hparams.ppo_epoch_length = 50
return hparams
|
[
"def",
"rlmb_ppo_quick",
"(",
")",
":",
"hparams",
"=",
"rlmb_ppo_base",
"(",
")",
"hparams",
".",
"epochs",
"=",
"2",
"hparams",
".",
"model_train_steps",
"=",
"25000",
"hparams",
".",
"ppo_epochs_num",
"=",
"700",
"hparams",
".",
"ppo_epoch_length",
"=",
"50",
"return",
"hparams"
] |
Base setting but quicker with only 2 epochs.
|
[
"Base",
"setting",
"but",
"quicker",
"with",
"only",
"2",
"epochs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L234-L241
|
21,728
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_base_stochastic
|
def rlmb_base_stochastic():
"""Base setting with a stochastic next-frame model."""
hparams = rlmb_base()
hparams.initial_epoch_train_steps_multiplier = 5
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
|
python
|
def rlmb_base_stochastic():
"""Base setting with a stochastic next-frame model."""
hparams = rlmb_base()
hparams.initial_epoch_train_steps_multiplier = 5
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
|
[
"def",
"rlmb_base_stochastic",
"(",
")",
":",
"hparams",
"=",
"rlmb_base",
"(",
")",
"hparams",
".",
"initial_epoch_train_steps_multiplier",
"=",
"5",
"hparams",
".",
"generative_model",
"=",
"\"next_frame_basic_stochastic\"",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_basic_stochastic\"",
"return",
"hparams"
] |
Base setting with a stochastic next-frame model.
|
[
"Base",
"setting",
"with",
"a",
"stochastic",
"next",
"-",
"frame",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L294-L300
|
21,729
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_long_stochastic_discrete_simulation_deterministic_starts
|
def rlmb_long_stochastic_discrete_simulation_deterministic_starts():
"""Long setting with stochastic discrete model & deterministic sim starts."""
hparams = rlmb_base_stochastic_discrete()
hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long"
hparams.ppo_epochs_num = 1000
hparams.simulation_random_starts = False
return hparams
|
python
|
def rlmb_long_stochastic_discrete_simulation_deterministic_starts():
"""Long setting with stochastic discrete model & deterministic sim starts."""
hparams = rlmb_base_stochastic_discrete()
hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long"
hparams.ppo_epochs_num = 1000
hparams.simulation_random_starts = False
return hparams
|
[
"def",
"rlmb_long_stochastic_discrete_simulation_deterministic_starts",
"(",
")",
":",
"hparams",
"=",
"rlmb_base_stochastic_discrete",
"(",
")",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_basic_stochastic_discrete_long\"",
"hparams",
".",
"ppo_epochs_num",
"=",
"1000",
"hparams",
".",
"simulation_random_starts",
"=",
"False",
"return",
"hparams"
] |
Long setting with stochastic discrete model & deterministic sim starts.
|
[
"Long",
"setting",
"with",
"stochastic",
"discrete",
"model",
"&",
"deterministic",
"sim",
"starts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L403-L409
|
21,730
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_base_sv2p
|
def rlmb_base_sv2p():
"""Base setting with sv2p as world model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_atari"
return hparams
|
python
|
def rlmb_base_sv2p():
"""Base setting with sv2p as world model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_atari"
return hparams
|
[
"def",
"rlmb_base_sv2p",
"(",
")",
":",
"hparams",
"=",
"rlmb_base",
"(",
")",
"hparams",
".",
"learning_rate_bump",
"=",
"1.0",
"hparams",
".",
"generative_model",
"=",
"\"next_frame_sv2p\"",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_sv2p_atari\"",
"return",
"hparams"
] |
Base setting with sv2p as world model.
|
[
"Base",
"setting",
"with",
"sv2p",
"as",
"world",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L487-L493
|
21,731
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
_rlmb_tiny_overrides
|
def _rlmb_tiny_overrides():
"""Parameters to override for tiny setting excluding agent-related hparams."""
return dict(
epochs=1,
num_real_env_frames=128,
model_train_steps=2,
max_num_noops=1,
eval_max_num_noops=1,
generative_model_params="next_frame_tiny",
stop_loop_early=True,
resize_height_factor=2,
resize_width_factor=2,
wm_eval_rollout_ratios=[1],
rl_env_max_episode_steps=7,
eval_rl_env_max_episode_steps=7,
simulated_rollout_length=2,
eval_sampling_temps=[0.0, 1.0],
)
|
python
|
def _rlmb_tiny_overrides():
"""Parameters to override for tiny setting excluding agent-related hparams."""
return dict(
epochs=1,
num_real_env_frames=128,
model_train_steps=2,
max_num_noops=1,
eval_max_num_noops=1,
generative_model_params="next_frame_tiny",
stop_loop_early=True,
resize_height_factor=2,
resize_width_factor=2,
wm_eval_rollout_ratios=[1],
rl_env_max_episode_steps=7,
eval_rl_env_max_episode_steps=7,
simulated_rollout_length=2,
eval_sampling_temps=[0.0, 1.0],
)
|
[
"def",
"_rlmb_tiny_overrides",
"(",
")",
":",
"return",
"dict",
"(",
"epochs",
"=",
"1",
",",
"num_real_env_frames",
"=",
"128",
",",
"model_train_steps",
"=",
"2",
",",
"max_num_noops",
"=",
"1",
",",
"eval_max_num_noops",
"=",
"1",
",",
"generative_model_params",
"=",
"\"next_frame_tiny\"",
",",
"stop_loop_early",
"=",
"True",
",",
"resize_height_factor",
"=",
"2",
",",
"resize_width_factor",
"=",
"2",
",",
"wm_eval_rollout_ratios",
"=",
"[",
"1",
"]",
",",
"rl_env_max_episode_steps",
"=",
"7",
",",
"eval_rl_env_max_episode_steps",
"=",
"7",
",",
"simulated_rollout_length",
"=",
"2",
",",
"eval_sampling_temps",
"=",
"[",
"0.0",
",",
"1.0",
"]",
",",
")"
] |
Parameters to override for tiny setting excluding agent-related hparams.
|
[
"Parameters",
"to",
"override",
"for",
"tiny",
"setting",
"excluding",
"agent",
"-",
"related",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L537-L554
|
21,732
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_tiny_stochastic
|
def rlmb_tiny_stochastic():
"""Tiny setting with a stochastic next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
|
python
|
def rlmb_tiny_stochastic():
"""Tiny setting with a stochastic next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
|
[
"def",
"rlmb_tiny_stochastic",
"(",
")",
":",
"hparams",
"=",
"rlmb_ppo_tiny",
"(",
")",
"hparams",
".",
"epochs",
"=",
"1",
"# Too slow with 2 for regular runs.",
"hparams",
".",
"generative_model",
"=",
"\"next_frame_basic_stochastic\"",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_basic_stochastic\"",
"return",
"hparams"
] |
Tiny setting with a stochastic next-frame model.
|
[
"Tiny",
"setting",
"with",
"a",
"stochastic",
"next",
"-",
"frame",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L596-L602
|
21,733
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_tiny_recurrent
|
def rlmb_tiny_recurrent():
"""Tiny setting with a recurrent next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_recurrent"
hparams.generative_model_params = "next_frame_basic_recurrent"
return hparams
|
python
|
def rlmb_tiny_recurrent():
"""Tiny setting with a recurrent next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_recurrent"
hparams.generative_model_params = "next_frame_basic_recurrent"
return hparams
|
[
"def",
"rlmb_tiny_recurrent",
"(",
")",
":",
"hparams",
"=",
"rlmb_ppo_tiny",
"(",
")",
"hparams",
".",
"epochs",
"=",
"1",
"# Too slow with 2 for regular runs.",
"hparams",
".",
"generative_model",
"=",
"\"next_frame_basic_recurrent\"",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_basic_recurrent\"",
"return",
"hparams"
] |
Tiny setting with a recurrent next-frame model.
|
[
"Tiny",
"setting",
"with",
"a",
"recurrent",
"next",
"-",
"frame",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L606-L612
|
21,734
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_tiny_sv2p
|
def rlmb_tiny_sv2p():
"""Tiny setting with a tiny sv2p model."""
hparams = rlmb_ppo_tiny()
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_tiny"
hparams.grayscale = False
return hparams
|
python
|
def rlmb_tiny_sv2p():
"""Tiny setting with a tiny sv2p model."""
hparams = rlmb_ppo_tiny()
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_tiny"
hparams.grayscale = False
return hparams
|
[
"def",
"rlmb_tiny_sv2p",
"(",
")",
":",
"hparams",
"=",
"rlmb_ppo_tiny",
"(",
")",
"hparams",
".",
"generative_model",
"=",
"\"next_frame_sv2p\"",
"hparams",
".",
"generative_model_params",
"=",
"\"next_frame_sv2p_tiny\"",
"hparams",
".",
"grayscale",
"=",
"False",
"return",
"hparams"
] |
Tiny setting with a tiny sv2p model.
|
[
"Tiny",
"setting",
"with",
"a",
"tiny",
"sv2p",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L616-L622
|
21,735
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
rlmb_grid
|
def rlmb_grid(rhp):
"""Grid over games and frames, and 5 runs each for variance."""
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base = 100000
medium = base // 2
small = medium // 2
rhp.set_discrete("loop.num_real_env_frames", [base, medium, small])
# Dummy parameter to get 5 runs for each configuration
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
|
python
|
def rlmb_grid(rhp):
"""Grid over games and frames, and 5 runs each for variance."""
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base = 100000
medium = base // 2
small = medium // 2
rhp.set_discrete("loop.num_real_env_frames", [base, medium, small])
# Dummy parameter to get 5 runs for each configuration
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
|
[
"def",
"rlmb_grid",
"(",
"rhp",
")",
":",
"rhp",
".",
"set_categorical",
"(",
"\"loop.game\"",
",",
"[",
"\"breakout\"",
",",
"\"pong\"",
",",
"\"freeway\"",
"]",
")",
"base",
"=",
"100000",
"medium",
"=",
"base",
"//",
"2",
"small",
"=",
"medium",
"//",
"2",
"rhp",
".",
"set_discrete",
"(",
"\"loop.num_real_env_frames\"",
",",
"[",
"base",
",",
"medium",
",",
"small",
"]",
")",
"# Dummy parameter to get 5 runs for each configuration",
"rhp",
".",
"set_discrete",
"(",
"\"model.moe_loss_coef\"",
",",
"list",
"(",
"range",
"(",
"5",
")",
")",
")"
] |
Grid over games and frames, and 5 runs each for variance.
|
[
"Grid",
"over",
"games",
"and",
"frames",
"and",
"5",
"runs",
"each",
"for",
"variance",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L638-L647
|
21,736
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
merge_unscoped_hparams
|
def merge_unscoped_hparams(scopes_and_hparams):
"""Merge multiple HParams into one with scopes."""
merged_values = {}
for (scope, hparams) in scopes_and_hparams:
for key, value in six.iteritems(hparams.values()):
scoped_key = "%s.%s" % (scope, key)
merged_values[scoped_key] = value
return hparam.HParams(**merged_values)
|
python
|
def merge_unscoped_hparams(scopes_and_hparams):
"""Merge multiple HParams into one with scopes."""
merged_values = {}
for (scope, hparams) in scopes_and_hparams:
for key, value in six.iteritems(hparams.values()):
scoped_key = "%s.%s" % (scope, key)
merged_values[scoped_key] = value
return hparam.HParams(**merged_values)
|
[
"def",
"merge_unscoped_hparams",
"(",
"scopes_and_hparams",
")",
":",
"merged_values",
"=",
"{",
"}",
"for",
"(",
"scope",
",",
"hparams",
")",
"in",
"scopes_and_hparams",
":",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"hparams",
".",
"values",
"(",
")",
")",
":",
"scoped_key",
"=",
"\"%s.%s\"",
"%",
"(",
"scope",
",",
"key",
")",
"merged_values",
"[",
"scoped_key",
"]",
"=",
"value",
"return",
"hparam",
".",
"HParams",
"(",
"*",
"*",
"merged_values",
")"
] |
Merge multiple HParams into one with scopes.
|
[
"Merge",
"multiple",
"HParams",
"into",
"one",
"with",
"scopes",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L855-L863
|
21,737
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
split_scoped_hparams
|
def split_scoped_hparams(scopes, merged_hparams):
"""Split single HParams with scoped keys into multiple."""
split_values = {scope: {} for scope in scopes}
merged_values = merged_hparams.values()
for scoped_key, value in six.iteritems(merged_values):
scope = scoped_key.split(".")[0]
key = scoped_key[len(scope) + 1:]
split_values[scope][key] = value
return [
hparam.HParams(**split_values[scope]) for scope in scopes
]
|
python
|
def split_scoped_hparams(scopes, merged_hparams):
"""Split single HParams with scoped keys into multiple."""
split_values = {scope: {} for scope in scopes}
merged_values = merged_hparams.values()
for scoped_key, value in six.iteritems(merged_values):
scope = scoped_key.split(".")[0]
key = scoped_key[len(scope) + 1:]
split_values[scope][key] = value
return [
hparam.HParams(**split_values[scope]) for scope in scopes
]
|
[
"def",
"split_scoped_hparams",
"(",
"scopes",
",",
"merged_hparams",
")",
":",
"split_values",
"=",
"{",
"scope",
":",
"{",
"}",
"for",
"scope",
"in",
"scopes",
"}",
"merged_values",
"=",
"merged_hparams",
".",
"values",
"(",
")",
"for",
"scoped_key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"merged_values",
")",
":",
"scope",
"=",
"scoped_key",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"key",
"=",
"scoped_key",
"[",
"len",
"(",
"scope",
")",
"+",
"1",
":",
"]",
"split_values",
"[",
"scope",
"]",
"[",
"key",
"]",
"=",
"value",
"return",
"[",
"hparam",
".",
"HParams",
"(",
"*",
"*",
"split_values",
"[",
"scope",
"]",
")",
"for",
"scope",
"in",
"scopes",
"]"
] |
Split single HParams with scoped keys into multiple.
|
[
"Split",
"single",
"HParams",
"with",
"scoped",
"keys",
"into",
"multiple",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L866-L877
|
21,738
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/trainer_model_based_params.py
|
training_loop_hparams_from_scoped_overrides
|
def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):
"""Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
"""
trial_hp_overrides = scoped_overrides.values()
# Create loop, model, and ppo base HParams
loop_hp = create_loop_hparams()
model_hp_name = trial_hp_overrides.get(
"loop.generative_model_params", loop_hp.generative_model_params)
model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)
base_algo_params_name = trial_hp_overrides.get(
"loop.base_algo_params", loop_hp.base_algo_params)
algo_hp = registry.hparams(base_algo_params_name)
# Merge them and then override with the scoped overrides
combined_hp = merge_unscoped_hparams(
zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))
combined_hp.override_from_dict(trial_hp_overrides)
# Split out the component hparams
loop_hp, model_hp, algo_hp = (
split_scoped_hparams(HP_SCOPES, combined_hp))
# Dynamic register the model hp and set the new name in loop_hp
model_hp_name = "model_hp_%s" % str(trial_id)
dynamic_register_hparams(model_hp_name, model_hp)
loop_hp.generative_model_params = model_hp_name
# Dynamic register the algo hp and set the new name in loop_hp
algo_hp_name = "algo_hp_%s" % str(trial_id)
dynamic_register_hparams(algo_hp_name, algo_hp)
loop_hp.base_algo_params = algo_hp_name
return loop_hp
|
python
|
def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):
"""Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
"""
trial_hp_overrides = scoped_overrides.values()
# Create loop, model, and ppo base HParams
loop_hp = create_loop_hparams()
model_hp_name = trial_hp_overrides.get(
"loop.generative_model_params", loop_hp.generative_model_params)
model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)
base_algo_params_name = trial_hp_overrides.get(
"loop.base_algo_params", loop_hp.base_algo_params)
algo_hp = registry.hparams(base_algo_params_name)
# Merge them and then override with the scoped overrides
combined_hp = merge_unscoped_hparams(
zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))
combined_hp.override_from_dict(trial_hp_overrides)
# Split out the component hparams
loop_hp, model_hp, algo_hp = (
split_scoped_hparams(HP_SCOPES, combined_hp))
# Dynamic register the model hp and set the new name in loop_hp
model_hp_name = "model_hp_%s" % str(trial_id)
dynamic_register_hparams(model_hp_name, model_hp)
loop_hp.generative_model_params = model_hp_name
# Dynamic register the algo hp and set the new name in loop_hp
algo_hp_name = "algo_hp_%s" % str(trial_id)
dynamic_register_hparams(algo_hp_name, algo_hp)
loop_hp.base_algo_params = algo_hp_name
return loop_hp
|
[
"def",
"training_loop_hparams_from_scoped_overrides",
"(",
"scoped_overrides",
",",
"trial_id",
")",
":",
"trial_hp_overrides",
"=",
"scoped_overrides",
".",
"values",
"(",
")",
"# Create loop, model, and ppo base HParams",
"loop_hp",
"=",
"create_loop_hparams",
"(",
")",
"model_hp_name",
"=",
"trial_hp_overrides",
".",
"get",
"(",
"\"loop.generative_model_params\"",
",",
"loop_hp",
".",
"generative_model_params",
")",
"model_hp",
"=",
"registry",
".",
"hparams",
"(",
"model_hp_name",
")",
".",
"parse",
"(",
"FLAGS",
".",
"hparams",
")",
"base_algo_params_name",
"=",
"trial_hp_overrides",
".",
"get",
"(",
"\"loop.base_algo_params\"",
",",
"loop_hp",
".",
"base_algo_params",
")",
"algo_hp",
"=",
"registry",
".",
"hparams",
"(",
"base_algo_params_name",
")",
"# Merge them and then override with the scoped overrides",
"combined_hp",
"=",
"merge_unscoped_hparams",
"(",
"zip",
"(",
"HP_SCOPES",
",",
"[",
"loop_hp",
",",
"model_hp",
",",
"algo_hp",
"]",
")",
")",
"combined_hp",
".",
"override_from_dict",
"(",
"trial_hp_overrides",
")",
"# Split out the component hparams",
"loop_hp",
",",
"model_hp",
",",
"algo_hp",
"=",
"(",
"split_scoped_hparams",
"(",
"HP_SCOPES",
",",
"combined_hp",
")",
")",
"# Dynamic register the model hp and set the new name in loop_hp",
"model_hp_name",
"=",
"\"model_hp_%s\"",
"%",
"str",
"(",
"trial_id",
")",
"dynamic_register_hparams",
"(",
"model_hp_name",
",",
"model_hp",
")",
"loop_hp",
".",
"generative_model_params",
"=",
"model_hp_name",
"# Dynamic register the algo hp and set the new name in loop_hp",
"algo_hp_name",
"=",
"\"algo_hp_%s\"",
"%",
"str",
"(",
"trial_id",
")",
"dynamic_register_hparams",
"(",
"algo_hp_name",
",",
"algo_hp",
")",
"loop_hp",
".",
"base_algo_params",
"=",
"algo_hp_name",
"return",
"loop_hp"
] |
Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
|
[
"Create",
"HParams",
"suitable",
"for",
"training",
"loop",
"from",
"scoped",
"HParams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L880-L923
|
21,739
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player.py
|
PlayerEnv.get_keys_to_action
|
def get_keys_to_action(self):
"""Get mapping from keyboard keys to actions.
Required by gym.utils.play in environment or top level wrapper.
Returns:
{
Unicode code point for keyboard key: action (formatted for step()),
...
}
"""
# Based on gym AtariEnv.get_keys_to_action()
keyword_to_key = {
"UP": ord("w"),
"DOWN": ord("s"),
"LEFT": ord("a"),
"RIGHT": ord("d"),
"FIRE": ord(" "),
}
keys_to_action = {}
for action_id, action_meaning in enumerate(self.action_meanings):
keys_tuple = tuple(sorted([
key for keyword, key in keyword_to_key.items()
if keyword in action_meaning]))
assert keys_tuple not in keys_to_action
keys_to_action[keys_tuple] = action_id
# Special actions:
keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION
keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION
keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION
return keys_to_action
|
python
|
def get_keys_to_action(self):
"""Get mapping from keyboard keys to actions.
Required by gym.utils.play in environment or top level wrapper.
Returns:
{
Unicode code point for keyboard key: action (formatted for step()),
...
}
"""
# Based on gym AtariEnv.get_keys_to_action()
keyword_to_key = {
"UP": ord("w"),
"DOWN": ord("s"),
"LEFT": ord("a"),
"RIGHT": ord("d"),
"FIRE": ord(" "),
}
keys_to_action = {}
for action_id, action_meaning in enumerate(self.action_meanings):
keys_tuple = tuple(sorted([
key for keyword, key in keyword_to_key.items()
if keyword in action_meaning]))
assert keys_tuple not in keys_to_action
keys_to_action[keys_tuple] = action_id
# Special actions:
keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION
keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION
keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION
return keys_to_action
|
[
"def",
"get_keys_to_action",
"(",
"self",
")",
":",
"# Based on gym AtariEnv.get_keys_to_action()",
"keyword_to_key",
"=",
"{",
"\"UP\"",
":",
"ord",
"(",
"\"w\"",
")",
",",
"\"DOWN\"",
":",
"ord",
"(",
"\"s\"",
")",
",",
"\"LEFT\"",
":",
"ord",
"(",
"\"a\"",
")",
",",
"\"RIGHT\"",
":",
"ord",
"(",
"\"d\"",
")",
",",
"\"FIRE\"",
":",
"ord",
"(",
"\" \"",
")",
",",
"}",
"keys_to_action",
"=",
"{",
"}",
"for",
"action_id",
",",
"action_meaning",
"in",
"enumerate",
"(",
"self",
".",
"action_meanings",
")",
":",
"keys_tuple",
"=",
"tuple",
"(",
"sorted",
"(",
"[",
"key",
"for",
"keyword",
",",
"key",
"in",
"keyword_to_key",
".",
"items",
"(",
")",
"if",
"keyword",
"in",
"action_meaning",
"]",
")",
")",
"assert",
"keys_tuple",
"not",
"in",
"keys_to_action",
"keys_to_action",
"[",
"keys_tuple",
"]",
"=",
"action_id",
"# Special actions:",
"keys_to_action",
"[",
"(",
"ord",
"(",
"\"r\"",
")",
",",
")",
"]",
"=",
"self",
".",
"RETURN_DONE_ACTION",
"keys_to_action",
"[",
"(",
"ord",
"(",
"\"c\"",
")",
",",
")",
"]",
"=",
"self",
".",
"TOGGLE_WAIT_ACTION",
"keys_to_action",
"[",
"(",
"ord",
"(",
"\"n\"",
")",
",",
")",
"]",
"=",
"self",
".",
"WAIT_MODE_NOOP_ACTION",
"return",
"keys_to_action"
] |
Get mapping from keyboard keys to actions.
Required by gym.utils.play in environment or top level wrapper.
Returns:
{
Unicode code point for keyboard key: action (formatted for step()),
...
}
|
[
"Get",
"mapping",
"from",
"keyboard",
"keys",
"to",
"actions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player.py#L157-L191
|
21,740
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player.py
|
SimAndRealEnvPlayer._player_step_tuple
|
def _player_step_tuple(self, envs_step_tuples):
"""Construct observation, return usual step tuple.
Args:
envs_step_tuples: tuples.
Returns:
Step tuple: ob, reward, done, info
ob: concatenated images [simulated observation, real observation,
difference], with additional informations in header.
reward: real environment reward
done: True iff. envs_step_tuples['real_env'][2] is True
info: real environment info
"""
ob_real, reward_real, _, _ = envs_step_tuples["real_env"]
ob_sim, reward_sim, _, _ = envs_step_tuples["sim_env"]
ob_err = absolute_hinge_difference(ob_sim, ob_real)
ob_real_aug = self._augment_observation(ob_real, reward_real,
self.cumulative_real_reward)
ob_sim_aug = self._augment_observation(ob_sim, reward_sim,
self.cumulative_sim_reward)
ob_err_aug = self._augment_observation(
ob_err, reward_sim - reward_real,
self.cumulative_sim_reward - self.cumulative_real_reward
)
ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1)
_, reward, done, info = envs_step_tuples["real_env"]
return ob, reward, done, info
|
python
|
def _player_step_tuple(self, envs_step_tuples):
"""Construct observation, return usual step tuple.
Args:
envs_step_tuples: tuples.
Returns:
Step tuple: ob, reward, done, info
ob: concatenated images [simulated observation, real observation,
difference], with additional informations in header.
reward: real environment reward
done: True iff. envs_step_tuples['real_env'][2] is True
info: real environment info
"""
ob_real, reward_real, _, _ = envs_step_tuples["real_env"]
ob_sim, reward_sim, _, _ = envs_step_tuples["sim_env"]
ob_err = absolute_hinge_difference(ob_sim, ob_real)
ob_real_aug = self._augment_observation(ob_real, reward_real,
self.cumulative_real_reward)
ob_sim_aug = self._augment_observation(ob_sim, reward_sim,
self.cumulative_sim_reward)
ob_err_aug = self._augment_observation(
ob_err, reward_sim - reward_real,
self.cumulative_sim_reward - self.cumulative_real_reward
)
ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1)
_, reward, done, info = envs_step_tuples["real_env"]
return ob, reward, done, info
|
[
"def",
"_player_step_tuple",
"(",
"self",
",",
"envs_step_tuples",
")",
":",
"ob_real",
",",
"reward_real",
",",
"_",
",",
"_",
"=",
"envs_step_tuples",
"[",
"\"real_env\"",
"]",
"ob_sim",
",",
"reward_sim",
",",
"_",
",",
"_",
"=",
"envs_step_tuples",
"[",
"\"sim_env\"",
"]",
"ob_err",
"=",
"absolute_hinge_difference",
"(",
"ob_sim",
",",
"ob_real",
")",
"ob_real_aug",
"=",
"self",
".",
"_augment_observation",
"(",
"ob_real",
",",
"reward_real",
",",
"self",
".",
"cumulative_real_reward",
")",
"ob_sim_aug",
"=",
"self",
".",
"_augment_observation",
"(",
"ob_sim",
",",
"reward_sim",
",",
"self",
".",
"cumulative_sim_reward",
")",
"ob_err_aug",
"=",
"self",
".",
"_augment_observation",
"(",
"ob_err",
",",
"reward_sim",
"-",
"reward_real",
",",
"self",
".",
"cumulative_sim_reward",
"-",
"self",
".",
"cumulative_real_reward",
")",
"ob",
"=",
"np",
".",
"concatenate",
"(",
"[",
"ob_sim_aug",
",",
"ob_real_aug",
",",
"ob_err_aug",
"]",
",",
"axis",
"=",
"1",
")",
"_",
",",
"reward",
",",
"done",
",",
"info",
"=",
"envs_step_tuples",
"[",
"\"real_env\"",
"]",
"return",
"ob",
",",
"reward",
",",
"done",
",",
"info"
] |
Construct observation, return usual step tuple.
Args:
envs_step_tuples: tuples.
Returns:
Step tuple: ob, reward, done, info
ob: concatenated images [simulated observation, real observation,
difference], with additional informations in header.
reward: real environment reward
done: True iff. envs_step_tuples['real_env'][2] is True
info: real environment info
|
[
"Construct",
"observation",
"return",
"usual",
"step",
"tuple",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player.py#L345-L373
|
21,741
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player.py
|
SimAndRealEnvPlayer.reset
|
def reset(self):
"""Reset simulated and real environments."""
self._frame_counter = 0
ob_real = self.real_env.reset()
# Initialize simulated environment with frames from real one.
self.sim_env.add_to_initial_stack(ob_real)
for _ in range(3):
ob_real, _, _, _ = self.real_env.step(self.name_to_action_num["NOOP"])
self.sim_env.add_to_initial_stack(ob_real)
ob_sim = self.sim_env.reset()
assert np.all(ob_real == ob_sim)
self._last_step_tuples = self._pack_step_tuples((ob_real, 0, False, {}),
(ob_sim, 0, False, {}))
self.set_zero_cumulative_rewards()
ob, _, _, _ = self._player_step_tuple(self._last_step_tuples)
return ob
|
python
|
def reset(self):
"""Reset simulated and real environments."""
self._frame_counter = 0
ob_real = self.real_env.reset()
# Initialize simulated environment with frames from real one.
self.sim_env.add_to_initial_stack(ob_real)
for _ in range(3):
ob_real, _, _, _ = self.real_env.step(self.name_to_action_num["NOOP"])
self.sim_env.add_to_initial_stack(ob_real)
ob_sim = self.sim_env.reset()
assert np.all(ob_real == ob_sim)
self._last_step_tuples = self._pack_step_tuples((ob_real, 0, False, {}),
(ob_sim, 0, False, {}))
self.set_zero_cumulative_rewards()
ob, _, _, _ = self._player_step_tuple(self._last_step_tuples)
return ob
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_frame_counter",
"=",
"0",
"ob_real",
"=",
"self",
".",
"real_env",
".",
"reset",
"(",
")",
"# Initialize simulated environment with frames from real one.",
"self",
".",
"sim_env",
".",
"add_to_initial_stack",
"(",
"ob_real",
")",
"for",
"_",
"in",
"range",
"(",
"3",
")",
":",
"ob_real",
",",
"_",
",",
"_",
",",
"_",
"=",
"self",
".",
"real_env",
".",
"step",
"(",
"self",
".",
"name_to_action_num",
"[",
"\"NOOP\"",
"]",
")",
"self",
".",
"sim_env",
".",
"add_to_initial_stack",
"(",
"ob_real",
")",
"ob_sim",
"=",
"self",
".",
"sim_env",
".",
"reset",
"(",
")",
"assert",
"np",
".",
"all",
"(",
"ob_real",
"==",
"ob_sim",
")",
"self",
".",
"_last_step_tuples",
"=",
"self",
".",
"_pack_step_tuples",
"(",
"(",
"ob_real",
",",
"0",
",",
"False",
",",
"{",
"}",
")",
",",
"(",
"ob_sim",
",",
"0",
",",
"False",
",",
"{",
"}",
")",
")",
"self",
".",
"set_zero_cumulative_rewards",
"(",
")",
"ob",
",",
"_",
",",
"_",
",",
"_",
"=",
"self",
".",
"_player_step_tuple",
"(",
"self",
".",
"_last_step_tuples",
")",
"return",
"ob"
] |
Reset simulated and real environments.
|
[
"Reset",
"simulated",
"and",
"real",
"environments",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player.py#L375-L390
|
21,742
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player.py
|
SingleEnvPlayer._player_step_tuple
|
def _player_step_tuple(self, envs_step_tuples):
"""Augment observation, return usual step tuple."""
ob, reward, done, info = envs_step_tuples["env"]
ob = self._augment_observation(ob, reward, self.cumulative_reward)
return ob, reward, done, info
|
python
|
def _player_step_tuple(self, envs_step_tuples):
"""Augment observation, return usual step tuple."""
ob, reward, done, info = envs_step_tuples["env"]
ob = self._augment_observation(ob, reward, self.cumulative_reward)
return ob, reward, done, info
|
[
"def",
"_player_step_tuple",
"(",
"self",
",",
"envs_step_tuples",
")",
":",
"ob",
",",
"reward",
",",
"done",
",",
"info",
"=",
"envs_step_tuples",
"[",
"\"env\"",
"]",
"ob",
"=",
"self",
".",
"_augment_observation",
"(",
"ob",
",",
"reward",
",",
"self",
".",
"cumulative_reward",
")",
"return",
"ob",
",",
"reward",
",",
"done",
",",
"info"
] |
Augment observation, return usual step tuple.
|
[
"Augment",
"observation",
"return",
"usual",
"step",
"tuple",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player.py#L449-L453
|
21,743
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_audio.py
|
add_delta_deltas
|
def add_delta_deltas(filterbanks, name=None):
"""Compute time first and second-order derivative channels.
Args:
filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]
name: scope name
Returns:
float32 tensor with shape [batch_size, len, num_bins, 3]
"""
delta_filter = np.array([2, 1, 0, -1, -2])
delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full")
delta_filter_stack = np.array(
[[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,
list(delta_delta_filter)],
dtype=np.float32).T[:, None, None, :]
delta_filter_stack /= np.sqrt(
np.sum(delta_filter_stack**2, axis=0, keepdims=True))
filterbanks = tf.nn.conv2d(
filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC",
name=name)
return filterbanks
|
python
|
def add_delta_deltas(filterbanks, name=None):
"""Compute time first and second-order derivative channels.
Args:
filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]
name: scope name
Returns:
float32 tensor with shape [batch_size, len, num_bins, 3]
"""
delta_filter = np.array([2, 1, 0, -1, -2])
delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full")
delta_filter_stack = np.array(
[[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,
list(delta_delta_filter)],
dtype=np.float32).T[:, None, None, :]
delta_filter_stack /= np.sqrt(
np.sum(delta_filter_stack**2, axis=0, keepdims=True))
filterbanks = tf.nn.conv2d(
filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC",
name=name)
return filterbanks
|
[
"def",
"add_delta_deltas",
"(",
"filterbanks",
",",
"name",
"=",
"None",
")",
":",
"delta_filter",
"=",
"np",
".",
"array",
"(",
"[",
"2",
",",
"1",
",",
"0",
",",
"-",
"1",
",",
"-",
"2",
"]",
")",
"delta_delta_filter",
"=",
"scipy",
".",
"signal",
".",
"convolve",
"(",
"delta_filter",
",",
"delta_filter",
",",
"\"full\"",
")",
"delta_filter_stack",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
"]",
"*",
"4",
"+",
"[",
"1",
"]",
"+",
"[",
"0",
"]",
"*",
"4",
",",
"[",
"0",
"]",
"*",
"2",
"+",
"list",
"(",
"delta_filter",
")",
"+",
"[",
"0",
"]",
"*",
"2",
",",
"list",
"(",
"delta_delta_filter",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
".",
"T",
"[",
":",
",",
"None",
",",
"None",
",",
":",
"]",
"delta_filter_stack",
"/=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"delta_filter_stack",
"**",
"2",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")",
"filterbanks",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"filterbanks",
",",
"delta_filter_stack",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"\"SAME\"",
",",
"data_format",
"=",
"\"NHWC\"",
",",
"name",
"=",
"name",
")",
"return",
"filterbanks"
] |
Compute time first and second-order derivative channels.
Args:
filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]
name: scope name
Returns:
float32 tensor with shape [batch_size, len, num_bins, 3]
|
[
"Compute",
"time",
"first",
"and",
"second",
"-",
"order",
"derivative",
"channels",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_audio.py#L28-L52
|
21,744
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_audio.py
|
compute_mel_filterbank_features
|
def compute_mel_filterbank_features(
waveforms,
sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97,
frame_length=25, frame_step=10, fft_length=None,
window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True),
lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80,
log_noise_floor=1e-3, apply_mask=True):
"""Implement mel-filterbank extraction using tf ops.
Args:
waveforms: float32 tensor with shape [batch_size, max_len]
sample_rate: sampling rate of the waveform
dither: stddev of Gaussian noise added to waveform to prevent quantization
artefacts
preemphasis: waveform high-pass filtering constant
frame_length: frame length in ms
frame_step: frame_Step in ms
fft_length: number of fft bins
window_fn: windowing function
lower_edge_hertz: lowest frequency of the filterbank
upper_edge_hertz: highest frequency of the filterbank
num_mel_bins: filterbank size
log_noise_floor: clip small values to prevent numeric overflow in log
apply_mask: When working on a batch of samples, set padding frames to zero
Returns:
filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
"""
# `stfts` is a complex64 Tensor representing the short-time Fourier
# Transform of each signal in `signals`. Its shape is
# [batch_size, ?, fft_unique_bins]
# where fft_unique_bins = fft_length // 2 + 1
# Find the wave length: the largest index for which the value is !=0
# note that waveforms samples that are exactly 0.0 are quite common, so
# simply doing sum(waveforms != 0, axis=-1) will not work correctly.
wav_lens = tf.reduce_max(
tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) *
tf.to_int32(tf.not_equal(waveforms, 0.0)),
axis=-1) + 1
if dither > 0:
waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither)
if preemphasis > 0:
waveforms = waveforms[:, 1:] - preemphasis * waveforms[:, :-1]
wav_lens -= 1
frame_length = int(frame_length * sample_rate / 1e3)
frame_step = int(frame_step * sample_rate / 1e3)
if fft_length is None:
fft_length = int(2**(np.ceil(np.log2(frame_length))))
stfts = tf.contrib.signal.stft(
waveforms,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=True)
stft_lens = (wav_lens + (frame_step - 1)) // frame_step
masks = tf.to_float(tf.less_equal(
tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0),
tf.expand_dims(stft_lens, 1)))
# An energy spectrogram is the magnitude of the complex-valued STFT.
# A float32 Tensor of shape [batch_size, ?, 257].
magnitude_spectrograms = tf.abs(stfts)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrograms.shape[-1].value
linear_to_mel_weight_matrix = (
tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz))
mel_spectrograms = tf.tensordot(
magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for tensordot does not currently handle this case.
mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms))
if apply_mask:
log_mel_sgram *= tf.expand_dims(tf.to_float(masks), -1)
return tf.expand_dims(log_mel_sgram, -1, name="mel_sgrams")
|
python
|
def compute_mel_filterbank_features(
waveforms,
sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97,
frame_length=25, frame_step=10, fft_length=None,
window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True),
lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80,
log_noise_floor=1e-3, apply_mask=True):
"""Implement mel-filterbank extraction using tf ops.
Args:
waveforms: float32 tensor with shape [batch_size, max_len]
sample_rate: sampling rate of the waveform
dither: stddev of Gaussian noise added to waveform to prevent quantization
artefacts
preemphasis: waveform high-pass filtering constant
frame_length: frame length in ms
frame_step: frame_Step in ms
fft_length: number of fft bins
window_fn: windowing function
lower_edge_hertz: lowest frequency of the filterbank
upper_edge_hertz: highest frequency of the filterbank
num_mel_bins: filterbank size
log_noise_floor: clip small values to prevent numeric overflow in log
apply_mask: When working on a batch of samples, set padding frames to zero
Returns:
filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
"""
# `stfts` is a complex64 Tensor representing the short-time Fourier
# Transform of each signal in `signals`. Its shape is
# [batch_size, ?, fft_unique_bins]
# where fft_unique_bins = fft_length // 2 + 1
# Find the wave length: the largest index for which the value is !=0
# note that waveforms samples that are exactly 0.0 are quite common, so
# simply doing sum(waveforms != 0, axis=-1) will not work correctly.
wav_lens = tf.reduce_max(
tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) *
tf.to_int32(tf.not_equal(waveforms, 0.0)),
axis=-1) + 1
if dither > 0:
waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither)
if preemphasis > 0:
waveforms = waveforms[:, 1:] - preemphasis * waveforms[:, :-1]
wav_lens -= 1
frame_length = int(frame_length * sample_rate / 1e3)
frame_step = int(frame_step * sample_rate / 1e3)
if fft_length is None:
fft_length = int(2**(np.ceil(np.log2(frame_length))))
stfts = tf.contrib.signal.stft(
waveforms,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=True)
stft_lens = (wav_lens + (frame_step - 1)) // frame_step
masks = tf.to_float(tf.less_equal(
tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0),
tf.expand_dims(stft_lens, 1)))
# An energy spectrogram is the magnitude of the complex-valued STFT.
# A float32 Tensor of shape [batch_size, ?, 257].
magnitude_spectrograms = tf.abs(stfts)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrograms.shape[-1].value
linear_to_mel_weight_matrix = (
tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz))
mel_spectrograms = tf.tensordot(
magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for tensordot does not currently handle this case.
mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms))
if apply_mask:
log_mel_sgram *= tf.expand_dims(tf.to_float(masks), -1)
return tf.expand_dims(log_mel_sgram, -1, name="mel_sgrams")
|
[
"def",
"compute_mel_filterbank_features",
"(",
"waveforms",
",",
"sample_rate",
"=",
"16000",
",",
"dither",
"=",
"1.0",
"/",
"np",
".",
"iinfo",
"(",
"np",
".",
"int16",
")",
".",
"max",
",",
"preemphasis",
"=",
"0.97",
",",
"frame_length",
"=",
"25",
",",
"frame_step",
"=",
"10",
",",
"fft_length",
"=",
"None",
",",
"window_fn",
"=",
"functools",
".",
"partial",
"(",
"tf",
".",
"contrib",
".",
"signal",
".",
"hann_window",
",",
"periodic",
"=",
"True",
")",
",",
"lower_edge_hertz",
"=",
"80.0",
",",
"upper_edge_hertz",
"=",
"7600.0",
",",
"num_mel_bins",
"=",
"80",
",",
"log_noise_floor",
"=",
"1e-3",
",",
"apply_mask",
"=",
"True",
")",
":",
"# `stfts` is a complex64 Tensor representing the short-time Fourier",
"# Transform of each signal in `signals`. Its shape is",
"# [batch_size, ?, fft_unique_bins]",
"# where fft_unique_bins = fft_length // 2 + 1",
"# Find the wave length: the largest index for which the value is !=0",
"# note that waveforms samples that are exactly 0.0 are quite common, so",
"# simply doing sum(waveforms != 0, axis=-1) will not work correctly.",
"wav_lens",
"=",
"tf",
".",
"reduce_max",
"(",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"range",
"(",
"tf",
".",
"shape",
"(",
"waveforms",
")",
"[",
"1",
"]",
")",
",",
"0",
")",
"*",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"not_equal",
"(",
"waveforms",
",",
"0.0",
")",
")",
",",
"axis",
"=",
"-",
"1",
")",
"+",
"1",
"if",
"dither",
">",
"0",
":",
"waveforms",
"+=",
"tf",
".",
"random_normal",
"(",
"tf",
".",
"shape",
"(",
"waveforms",
")",
",",
"stddev",
"=",
"dither",
")",
"if",
"preemphasis",
">",
"0",
":",
"waveforms",
"=",
"waveforms",
"[",
":",
",",
"1",
":",
"]",
"-",
"preemphasis",
"*",
"waveforms",
"[",
":",
",",
":",
"-",
"1",
"]",
"wav_lens",
"-=",
"1",
"frame_length",
"=",
"int",
"(",
"frame_length",
"*",
"sample_rate",
"/",
"1e3",
")",
"frame_step",
"=",
"int",
"(",
"frame_step",
"*",
"sample_rate",
"/",
"1e3",
")",
"if",
"fft_length",
"is",
"None",
":",
"fft_length",
"=",
"int",
"(",
"2",
"**",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"frame_length",
")",
")",
")",
")",
"stfts",
"=",
"tf",
".",
"contrib",
".",
"signal",
".",
"stft",
"(",
"waveforms",
",",
"frame_length",
"=",
"frame_length",
",",
"frame_step",
"=",
"frame_step",
",",
"fft_length",
"=",
"fft_length",
",",
"window_fn",
"=",
"window_fn",
",",
"pad_end",
"=",
"True",
")",
"stft_lens",
"=",
"(",
"wav_lens",
"+",
"(",
"frame_step",
"-",
"1",
")",
")",
"//",
"frame_step",
"masks",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"less_equal",
"(",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"range",
"(",
"tf",
".",
"shape",
"(",
"stfts",
")",
"[",
"1",
"]",
")",
",",
"0",
")",
",",
"tf",
".",
"expand_dims",
"(",
"stft_lens",
",",
"1",
")",
")",
")",
"# An energy spectrogram is the magnitude of the complex-valued STFT.",
"# A float32 Tensor of shape [batch_size, ?, 257].",
"magnitude_spectrograms",
"=",
"tf",
".",
"abs",
"(",
"stfts",
")",
"# Warp the linear-scale, magnitude spectrograms into the mel-scale.",
"num_spectrogram_bins",
"=",
"magnitude_spectrograms",
".",
"shape",
"[",
"-",
"1",
"]",
".",
"value",
"linear_to_mel_weight_matrix",
"=",
"(",
"tf",
".",
"contrib",
".",
"signal",
".",
"linear_to_mel_weight_matrix",
"(",
"num_mel_bins",
",",
"num_spectrogram_bins",
",",
"sample_rate",
",",
"lower_edge_hertz",
",",
"upper_edge_hertz",
")",
")",
"mel_spectrograms",
"=",
"tf",
".",
"tensordot",
"(",
"magnitude_spectrograms",
",",
"linear_to_mel_weight_matrix",
",",
"1",
")",
"# Note: Shape inference for tensordot does not currently handle this case.",
"mel_spectrograms",
".",
"set_shape",
"(",
"magnitude_spectrograms",
".",
"shape",
"[",
":",
"-",
"1",
"]",
".",
"concatenate",
"(",
"linear_to_mel_weight_matrix",
".",
"shape",
"[",
"-",
"1",
":",
"]",
")",
")",
"log_mel_sgram",
"=",
"tf",
".",
"log",
"(",
"tf",
".",
"maximum",
"(",
"log_noise_floor",
",",
"mel_spectrograms",
")",
")",
"if",
"apply_mask",
":",
"log_mel_sgram",
"*=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"to_float",
"(",
"masks",
")",
",",
"-",
"1",
")",
"return",
"tf",
".",
"expand_dims",
"(",
"log_mel_sgram",
",",
"-",
"1",
",",
"name",
"=",
"\"mel_sgrams\"",
")"
] |
Implement mel-filterbank extraction using tf ops.
Args:
waveforms: float32 tensor with shape [batch_size, max_len]
sample_rate: sampling rate of the waveform
dither: stddev of Gaussian noise added to waveform to prevent quantization
artefacts
preemphasis: waveform high-pass filtering constant
frame_length: frame length in ms
frame_step: frame_Step in ms
fft_length: number of fft bins
window_fn: windowing function
lower_edge_hertz: lowest frequency of the filterbank
upper_edge_hertz: highest frequency of the filterbank
num_mel_bins: filterbank size
log_noise_floor: clip small values to prevent numeric overflow in log
apply_mask: When working on a batch of samples, set padding frames to zero
Returns:
filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
|
[
"Implement",
"mel",
"-",
"filterbank",
"extraction",
"using",
"tf",
"ops",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_audio.py#L55-L138
|
21,745
|
tensorflow/tensor2tensor
|
tensor2tensor/envs/env_problem_utils.py
|
play_env_problem_randomly
|
def play_env_problem_randomly(env_problem,
num_steps):
"""Plays the env problem by randomly sampling actions for `num_steps`."""
# Reset all environments.
env_problem.reset()
# Play all environments, sampling random actions each time.
for _ in range(num_steps):
# Sample batch_size actions from the action space and stack them.
actions = np.stack([env_problem.action_space.sample() for _ in range(
env_problem.batch_size)])
# Execute actions, observations are stored in `env_problem`.
_, _, dones, _ = env_problem.step(actions)
# Get the indices where we are done and reset those.
env_problem.reset(indices=done_indices(dones))
|
python
|
def play_env_problem_randomly(env_problem,
num_steps):
"""Plays the env problem by randomly sampling actions for `num_steps`."""
# Reset all environments.
env_problem.reset()
# Play all environments, sampling random actions each time.
for _ in range(num_steps):
# Sample batch_size actions from the action space and stack them.
actions = np.stack([env_problem.action_space.sample() for _ in range(
env_problem.batch_size)])
# Execute actions, observations are stored in `env_problem`.
_, _, dones, _ = env_problem.step(actions)
# Get the indices where we are done and reset those.
env_problem.reset(indices=done_indices(dones))
|
[
"def",
"play_env_problem_randomly",
"(",
"env_problem",
",",
"num_steps",
")",
":",
"# Reset all environments.",
"env_problem",
".",
"reset",
"(",
")",
"# Play all environments, sampling random actions each time.",
"for",
"_",
"in",
"range",
"(",
"num_steps",
")",
":",
"# Sample batch_size actions from the action space and stack them.",
"actions",
"=",
"np",
".",
"stack",
"(",
"[",
"env_problem",
".",
"action_space",
".",
"sample",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"env_problem",
".",
"batch_size",
")",
"]",
")",
"# Execute actions, observations are stored in `env_problem`.",
"_",
",",
"_",
",",
"dones",
",",
"_",
"=",
"env_problem",
".",
"step",
"(",
"actions",
")",
"# Get the indices where we are done and reset those.",
"env_problem",
".",
"reset",
"(",
"indices",
"=",
"done_indices",
"(",
"dones",
")",
")"
] |
Plays the env problem by randomly sampling actions for `num_steps`.
|
[
"Plays",
"the",
"env",
"problem",
"by",
"randomly",
"sampling",
"actions",
"for",
"num_steps",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem_utils.py#L30-L46
|
21,746
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cipher.py
|
generate_plaintext_random
|
def generate_plaintext_random(plain_vocab, distribution, train_samples,
length):
"""Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
"""
if distribution is not None:
assert len(distribution) == len(plain_vocab)
train_indices = np.random.choice(
range(len(plain_vocab)), (train_samples, length), p=distribution)
return train_indices
|
python
|
def generate_plaintext_random(plain_vocab, distribution, train_samples,
length):
"""Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
"""
if distribution is not None:
assert len(distribution) == len(plain_vocab)
train_indices = np.random.choice(
range(len(plain_vocab)), (train_samples, length), p=distribution)
return train_indices
|
[
"def",
"generate_plaintext_random",
"(",
"plain_vocab",
",",
"distribution",
",",
"train_samples",
",",
"length",
")",
":",
"if",
"distribution",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"distribution",
")",
"==",
"len",
"(",
"plain_vocab",
")",
"train_indices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"len",
"(",
"plain_vocab",
")",
")",
",",
"(",
"train_samples",
",",
"length",
")",
",",
"p",
"=",
"distribution",
")",
"return",
"train_indices"
] |
Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
|
[
"Generates",
"samples",
"of",
"text",
"from",
"the",
"provided",
"vocabulary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cipher.py#L154-L177
|
21,747
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cipher.py
|
encipher_shift
|
def encipher_shift(plaintext, plain_vocab, shift):
"""Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
python
|
def encipher_shift(plaintext, plain_vocab, shift):
"""Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
[
"def",
"encipher_shift",
"(",
"plaintext",
",",
"plain_vocab",
",",
"shift",
")",
":",
"ciphertext",
"=",
"[",
"]",
"cipher",
"=",
"ShiftEncryptionLayer",
"(",
"plain_vocab",
",",
"shift",
")",
"for",
"_",
",",
"sentence",
"in",
"enumerate",
"(",
"plaintext",
")",
":",
"cipher_sentence",
"=",
"[",
"]",
"for",
"_",
",",
"character",
"in",
"enumerate",
"(",
"sentence",
")",
":",
"encrypted_char",
"=",
"cipher",
".",
"encrypt_character",
"(",
"character",
")",
"cipher_sentence",
".",
"append",
"(",
"encrypted_char",
")",
"ciphertext",
".",
"append",
"(",
"cipher_sentence",
")",
"return",
"ciphertext"
] |
Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
|
[
"Encrypt",
"plain",
"text",
"with",
"a",
"single",
"shift",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cipher.py#L180-L200
|
21,748
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cipher.py
|
encipher_vigenere
|
def encipher_vigenere(plaintext, plain_vocab, key):
"""Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
# generate Vigenere table
layers = [
ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))
]
for i, sentence in enumerate(plaintext):
cipher_sentence = []
for j, character in enumerate(sentence):
key_idx = key[j % len(key)]
encrypted_char = layers[key_idx].encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
python
|
def encipher_vigenere(plaintext, plain_vocab, key):
"""Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
# generate Vigenere table
layers = [
ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))
]
for i, sentence in enumerate(plaintext):
cipher_sentence = []
for j, character in enumerate(sentence):
key_idx = key[j % len(key)]
encrypted_char = layers[key_idx].encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
[
"def",
"encipher_vigenere",
"(",
"plaintext",
",",
"plain_vocab",
",",
"key",
")",
":",
"ciphertext",
"=",
"[",
"]",
"# generate Vigenere table",
"layers",
"=",
"[",
"ShiftEncryptionLayer",
"(",
"plain_vocab",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"plain_vocab",
")",
")",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"plaintext",
")",
":",
"cipher_sentence",
"=",
"[",
"]",
"for",
"j",
",",
"character",
"in",
"enumerate",
"(",
"sentence",
")",
":",
"key_idx",
"=",
"key",
"[",
"j",
"%",
"len",
"(",
"key",
")",
"]",
"encrypted_char",
"=",
"layers",
"[",
"key_idx",
"]",
".",
"encrypt_character",
"(",
"character",
")",
"cipher_sentence",
".",
"append",
"(",
"encrypted_char",
")",
"ciphertext",
".",
"append",
"(",
"cipher_sentence",
")",
"return",
"ciphertext"
] |
Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text.
|
[
"Encrypt",
"plain",
"text",
"with",
"given",
"key",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cipher.py#L203-L228
|
21,749
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/super_lm.py
|
super_lm_moe
|
def super_lm_moe():
"""Add mixture of experts with ~1B params."""
hparams = super_lm_base()
hparams.layers = (
("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d")
hparams.moe_num_experts = 32
hparams.moe_hidden_sizes = "1024"
return hparams
|
python
|
def super_lm_moe():
"""Add mixture of experts with ~1B params."""
hparams = super_lm_base()
hparams.layers = (
("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d")
hparams.moe_num_experts = 32
hparams.moe_hidden_sizes = "1024"
return hparams
|
[
"def",
"super_lm_moe",
"(",
")",
":",
"hparams",
"=",
"super_lm_base",
"(",
")",
"hparams",
".",
"layers",
"=",
"(",
"(",
"\"n,att,m,d,a,\"",
"\"n,moe,m,d,a,\"",
")",
"*",
"4",
"+",
"\"n,ffn,d\"",
")",
"hparams",
".",
"moe_num_experts",
"=",
"32",
"hparams",
".",
"moe_hidden_sizes",
"=",
"\"1024\"",
"return",
"hparams"
] |
Add mixture of experts with ~1B params.
|
[
"Add",
"mixture",
"of",
"experts",
"with",
"~1B",
"params",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/super_lm.py#L334-L341
|
21,750
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe_tr_dense_2k
|
def xmoe_tr_dense_2k():
"""Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
"""
hparams = mtf_transformer2.mtf_bitransformer_base()
hparams.encoder_layers = ["self_att", "drd"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4
hparams.batch_size = 64
hparams.shared_embedding_and_softmax_weights = True
hparams.mesh_shape = "batch:8"
return hparams
|
python
|
def xmoe_tr_dense_2k():
"""Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
"""
hparams = mtf_transformer2.mtf_bitransformer_base()
hparams.encoder_layers = ["self_att", "drd"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4
hparams.batch_size = 64
hparams.shared_embedding_and_softmax_weights = True
hparams.mesh_shape = "batch:8"
return hparams
|
[
"def",
"xmoe_tr_dense_2k",
"(",
")",
":",
"hparams",
"=",
"mtf_transformer2",
".",
"mtf_bitransformer_base",
"(",
")",
"hparams",
".",
"encoder_layers",
"=",
"[",
"\"self_att\"",
",",
"\"drd\"",
"]",
"*",
"4",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"self_att\"",
",",
"\"enc_att\"",
",",
"\"drd\"",
"]",
"*",
"4",
"hparams",
".",
"batch_size",
"=",
"64",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"True",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:8\"",
"return",
"hparams"
] |
Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
|
[
"Series",
"of",
"architectural",
"experiments",
"on",
"Translation",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L30-L46
|
21,751
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe_dense_4k
|
def xmoe_dense_4k():
"""Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
# The following hparams are constant across all these experiments.
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
# We will vary the following parameters related to the ffn/moe layers.
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams
|
python
|
def xmoe_dense_4k():
"""Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
# The following hparams are constant across all these experiments.
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
# We will vary the following parameters related to the ffn/moe layers.
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams
|
[
"def",
"xmoe_dense_4k",
"(",
")",
":",
"hparams",
"=",
"mtf_transformer",
".",
"mtf_transformer_base_lm",
"(",
")",
"hparams",
".",
"attention_dropout",
"=",
"0.0",
"hparams",
".",
"relu_dropout",
"=",
"0.0",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"# The following hparams are constant across all these experiments.",
"hparams",
".",
"batch_size",
"=",
"128",
"hparams",
".",
"d_model",
"=",
"512",
"hparams",
".",
"d_kv",
"=",
"128",
"hparams",
".",
"num_heads",
"=",
"4",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"att\"",
",",
"\"drd\"",
"]",
"*",
"4",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay\"",
"# We will vary the following parameters related to the ffn/moe layers.",
"hparams",
".",
"d_ff",
"=",
"4096",
"hparams",
".",
"layout",
"=",
"\"batch:batch;vocab:model;d_ff:model;heads:model\"",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:8\"",
"return",
"hparams"
] |
Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
|
[
"Series",
"of",
"architectural",
"experiments",
"on",
"cheap",
"language",
"models",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L104-L147
|
21,752
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe_2d
|
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams
|
python
|
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams
|
[
"def",
"xmoe_2d",
"(",
")",
":",
"hparams",
"=",
"xmoe_top_2",
"(",
")",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"att\"",
",",
"\"hmoe\"",
"]",
"*",
"4",
"hparams",
".",
"mesh_shape",
"=",
"\"b0:2;b1:4\"",
"hparams",
".",
"outer_batch_size",
"=",
"4",
"hparams",
".",
"layout",
"=",
"\"outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0\"",
"hparams",
".",
"moe_num_experts",
"=",
"[",
"4",
",",
"4",
"]",
"return",
"hparams"
] |
Two-dimensional hierarchical mixture of 16 experts.
|
[
"Two",
"-",
"dimensional",
"hierarchical",
"mixture",
"of",
"16",
"experts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L185-L193
|
21,753
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe2_dense
|
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
|
python
|
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
|
[
"def",
"xmoe2_dense",
"(",
"sz",
")",
":",
"hparams",
"=",
"mtf_transformer",
".",
"mtf_transformer_paper_lm",
"(",
"sz",
")",
"hparams",
".",
"attention_dropout",
"=",
"0.0",
"hparams",
".",
"relu_dropout",
"=",
"0.0",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"hparams",
".",
"max_length",
"=",
"1024",
"hparams",
".",
"batch_size",
"=",
"128",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay*linear_decay\"",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"65536",
"hparams",
".",
"layout",
"=",
"\"batch:batch;vocab:model;d_ff:model;heads:model\"",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:32\"",
"return",
"hparams"
] |
Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
|
[
"Series",
"of",
"architectural",
"experiments",
"on",
"language",
"modeling",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L232-L267
|
21,754
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe2_v1
|
def xmoe2_v1():
"""Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
"""
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
|
python
|
def xmoe2_v1():
"""Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
"""
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
|
[
"def",
"xmoe2_v1",
"(",
")",
":",
"hparams",
"=",
"xmoe2_dense",
"(",
"0",
")",
"moe",
".",
"set_default_moe_hparams",
"(",
"hparams",
")",
"hparams",
".",
"decoder_layers",
"=",
"(",
"[",
"\"local_att\"",
",",
"\"local_att\"",
",",
"\"drd\"",
",",
"\"att\"",
",",
"\"drd\"",
",",
"\"local_att\"",
",",
"\"local_att\"",
",",
"\"hmoe\"",
"]",
"*",
"4",
")",
"[",
":",
"-",
"1",
"]",
"hparams",
".",
"d_ff",
"=",
"2048",
"hparams",
".",
"d_kv",
"=",
"128",
"hparams",
".",
"moe_hidden_size",
"=",
"32768",
"hparams",
".",
"mesh_shape",
"=",
"\"b0:4;b1:8\"",
"hparams",
".",
"layout",
"=",
"\"outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0\"",
"hparams",
".",
"outer_batch_size",
"=",
"4",
"hparams",
".",
"moe_num_experts",
"=",
"[",
"8",
",",
"4",
"]",
"hparams",
".",
"num_heads",
"=",
"4",
"return",
"hparams"
] |
Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
|
[
"Model",
"incorporating",
"mixture",
"-",
"of",
"-",
"experts",
"and",
"local",
"-",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L291-L314
|
21,755
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe2_v1_x128
|
def xmoe2_v1_x128():
"""128 experts, ~25B params - Train for 131072 steps on 8x8."""
hparams = xmoe2_v1()
hparams.moe_num_experts = [16, 8]
hparams.outer_batch_size = 8
hparams.mesh_shape = "b0:8;b1:16"
hparams.batch_size = 512
hparams.learning_rate_decay_steps = 16384
return hparams
|
python
|
def xmoe2_v1_x128():
"""128 experts, ~25B params - Train for 131072 steps on 8x8."""
hparams = xmoe2_v1()
hparams.moe_num_experts = [16, 8]
hparams.outer_batch_size = 8
hparams.mesh_shape = "b0:8;b1:16"
hparams.batch_size = 512
hparams.learning_rate_decay_steps = 16384
return hparams
|
[
"def",
"xmoe2_v1_x128",
"(",
")",
":",
"hparams",
"=",
"xmoe2_v1",
"(",
")",
"hparams",
".",
"moe_num_experts",
"=",
"[",
"16",
",",
"8",
"]",
"hparams",
".",
"outer_batch_size",
"=",
"8",
"hparams",
".",
"mesh_shape",
"=",
"\"b0:8;b1:16\"",
"hparams",
".",
"batch_size",
"=",
"512",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"16384",
"return",
"hparams"
] |
128 experts, ~25B params - Train for 131072 steps on 8x8.
|
[
"128",
"experts",
"~25B",
"params",
"-",
"Train",
"for",
"131072",
"steps",
"on",
"8x8",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L318-L326
|
21,756
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe2_tiny
|
def xmoe2_tiny():
"""Test on local cpu."""
hparams = xmoe2_v1()
hparams.decoder_layers = [
"local_att", "att", "compressed_att", "drd", "hmoe"]
hparams.d_model = 128
hparams.moe_hidden_size = 512
hparams.outer_batch_size = 0
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.activation_dtype = "float32"
return hparams
|
python
|
def xmoe2_tiny():
"""Test on local cpu."""
hparams = xmoe2_v1()
hparams.decoder_layers = [
"local_att", "att", "compressed_att", "drd", "hmoe"]
hparams.d_model = 128
hparams.moe_hidden_size = 512
hparams.outer_batch_size = 0
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.activation_dtype = "float32"
return hparams
|
[
"def",
"xmoe2_tiny",
"(",
")",
":",
"hparams",
"=",
"xmoe2_v1",
"(",
")",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"local_att\"",
",",
"\"att\"",
",",
"\"compressed_att\"",
",",
"\"drd\"",
",",
"\"hmoe\"",
"]",
"hparams",
".",
"d_model",
"=",
"128",
"hparams",
".",
"moe_hidden_size",
"=",
"512",
"hparams",
".",
"outer_batch_size",
"=",
"0",
"hparams",
".",
"batch_size",
"=",
"2",
"hparams",
".",
"mesh_shape",
"=",
"\"\"",
"hparams",
".",
"activation_dtype",
"=",
"\"float32\"",
"return",
"hparams"
] |
Test on local cpu.
|
[
"Test",
"on",
"local",
"cpu",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L330-L341
|
21,757
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
xmoe2_v1_l4k_compressed_c4
|
def xmoe2_v1_l4k_compressed_c4():
"""With compressed attention."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"compressed_att" if l == "att" else l for l in hparams.decoder_layers]
hparams.compression_factor = 4
return hparams
|
python
|
def xmoe2_v1_l4k_compressed_c4():
"""With compressed attention."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"compressed_att" if l == "att" else l for l in hparams.decoder_layers]
hparams.compression_factor = 4
return hparams
|
[
"def",
"xmoe2_v1_l4k_compressed_c4",
"(",
")",
":",
"hparams",
"=",
"xmoe2_v1_l4k",
"(",
")",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"compressed_att\"",
"if",
"l",
"==",
"\"att\"",
"else",
"l",
"for",
"l",
"in",
"hparams",
".",
"decoder_layers",
"]",
"hparams",
".",
"compression_factor",
"=",
"4",
"return",
"hparams"
] |
With compressed attention.
|
[
"With",
"compressed",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L374-L380
|
21,758
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
wiki_2x2_base
|
def wiki_2x2_base():
"""Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
# no dropout - dataset is big enough to avoid overfitting.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
# 4 sequences per core
hparams.batch_size = 32
# We don't use linear decay in these experiments, since we don't want
# a sharp jump in quality at the end of the training schedule.
# You can insert this once you find the right architecture.
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
# parameters for mixture-of-experts
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = ["att", "drd"] * 6
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams
|
python
|
def wiki_2x2_base():
"""Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
# no dropout - dataset is big enough to avoid overfitting.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
# 4 sequences per core
hparams.batch_size = 32
# We don't use linear decay in these experiments, since we don't want
# a sharp jump in quality at the end of the training schedule.
# You can insert this once you find the right architecture.
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
# parameters for mixture-of-experts
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = ["att", "drd"] * 6
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams
|
[
"def",
"wiki_2x2_base",
"(",
")",
":",
"hparams",
"=",
"mtf_transformer",
".",
"mtf_transformer_base_lm",
"(",
")",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"# no dropout - dataset is big enough to avoid overfitting.",
"hparams",
".",
"attention_dropout",
"=",
"0.0",
"hparams",
".",
"relu_dropout",
"=",
"0.0",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"hparams",
".",
"max_length",
"=",
"1024",
"# 4 sequences per core",
"hparams",
".",
"batch_size",
"=",
"32",
"# We don't use linear decay in these experiments, since we don't want",
"# a sharp jump in quality at the end of the training schedule.",
"# You can insert this once you find the right architecture.",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay\"",
"hparams",
".",
"mesh_shape",
"=",
"\"all:8\"",
"hparams",
".",
"layout",
"=",
"\"batch:all;experts:all\"",
"# parameters for mixture-of-experts",
"moe",
".",
"set_default_moe_hparams",
"(",
"hparams",
")",
"hparams",
".",
"moe_num_experts",
"=",
"16",
"hparams",
".",
"moe_hidden_size",
"=",
"8192",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"att\"",
",",
"\"drd\"",
"]",
"*",
"6",
"hparams",
".",
"d_model",
"=",
"1024",
"hparams",
".",
"d_ff",
"=",
"2048",
"hparams",
".",
"d_kv",
"=",
"128",
"hparams",
".",
"num_heads",
"=",
"4",
"return",
"hparams"
] |
Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
|
[
"Set",
"of",
"architectural",
"experiments",
"-",
"language",
"model",
"on",
"wikipedia",
"on",
"a",
"2x2",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L392-L427
|
21,759
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
denoise_z15
|
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams
|
python
|
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams
|
[
"def",
"denoise_z15",
"(",
")",
":",
"hparams",
"=",
"xmoe2_dense_0",
"(",
")",
"hparams",
".",
"decoder_type",
"=",
"\"denoising\"",
"hparams",
".",
"noising_spec_train",
"=",
"{",
"\"type\"",
":",
"\"random_zipfian\"",
",",
"\"prob\"",
":",
"0.15",
"}",
"hparams",
".",
"noising_use_eval_during_train",
"=",
"0.25",
"return",
"hparams"
] |
Replace tokens instead of masking.
|
[
"Replace",
"tokens",
"instead",
"of",
"masking",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L474-L480
|
21,760
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
denoise_v1_m15
|
def denoise_v1_m15():
"""Denoising experiment."""
hparams = xmoe2_v1()
# no local attention
# TODO(noam): non-masked version of local-attention
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
|
python
|
def denoise_v1_m15():
"""Denoising experiment."""
hparams = xmoe2_v1()
# no local attention
# TODO(noam): non-masked version of local-attention
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
|
[
"def",
"denoise_v1_m15",
"(",
")",
":",
"hparams",
"=",
"xmoe2_v1",
"(",
")",
"# no local attention",
"# TODO(noam): non-masked version of local-attention",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"att\"",
"if",
"l",
"==",
"\"local_att\"",
"else",
"l",
"for",
"l",
"in",
"hparams",
".",
"decoder_layers",
"]",
"hparams",
".",
"decoder_type",
"=",
"\"denoising\"",
"hparams",
".",
"noising_spec_train",
"=",
"{",
"\"type\"",
":",
"\"mask\"",
",",
"\"prob\"",
":",
"0.15",
"}",
"return",
"hparams"
] |
Denoising experiment.
|
[
"Denoising",
"experiment",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L503-L512
|
21,761
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/sari_hook.py
|
_get_ngram_counter
|
def _get_ngram_counter(ids, n):
"""Get a Counter with the ngrams of the given ID list.
Args:
ids: np.array or a list corresponding to a single sentence
n: n-gram size
Returns:
collections.Counter with ID tuples as keys and 1s as values.
"""
# Remove zero IDs used to pad the sequence.
ids = [token_id for token_id in ids if token_id != 0]
ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)]
ngrams = set(ngram_list)
counts = collections.Counter()
for ngram in ngrams:
counts[ngram] = 1
return counts
|
python
|
def _get_ngram_counter(ids, n):
"""Get a Counter with the ngrams of the given ID list.
Args:
ids: np.array or a list corresponding to a single sentence
n: n-gram size
Returns:
collections.Counter with ID tuples as keys and 1s as values.
"""
# Remove zero IDs used to pad the sequence.
ids = [token_id for token_id in ids if token_id != 0]
ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)]
ngrams = set(ngram_list)
counts = collections.Counter()
for ngram in ngrams:
counts[ngram] = 1
return counts
|
[
"def",
"_get_ngram_counter",
"(",
"ids",
",",
"n",
")",
":",
"# Remove zero IDs used to pad the sequence.",
"ids",
"=",
"[",
"token_id",
"for",
"token_id",
"in",
"ids",
"if",
"token_id",
"!=",
"0",
"]",
"ngram_list",
"=",
"[",
"tuple",
"(",
"ids",
"[",
"i",
":",
"i",
"+",
"n",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ids",
")",
"+",
"1",
"-",
"n",
")",
"]",
"ngrams",
"=",
"set",
"(",
"ngram_list",
")",
"counts",
"=",
"collections",
".",
"Counter",
"(",
")",
"for",
"ngram",
"in",
"ngrams",
":",
"counts",
"[",
"ngram",
"]",
"=",
"1",
"return",
"counts"
] |
Get a Counter with the ngrams of the given ID list.
Args:
ids: np.array or a list corresponding to a single sentence
n: n-gram size
Returns:
collections.Counter with ID tuples as keys and 1s as values.
|
[
"Get",
"a",
"Counter",
"with",
"the",
"ngrams",
"of",
"the",
"given",
"ID",
"list",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/sari_hook.py#L50-L67
|
21,762
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/sari_hook.py
|
_get_fbeta_score
|
def _get_fbeta_score(true_positives, selected, relevant, beta=1):
"""Compute Fbeta score.
Args:
true_positives: Number of true positive ngrams.
selected: Number of selected ngrams.
relevant: Number of relevant ngrams.
beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.
Returns:
Fbeta score.
"""
precision = 1
if selected > 0:
precision = true_positives / selected
if beta == 0:
return precision
recall = 1
if relevant > 0:
recall = true_positives / relevant
if precision > 0 and recall > 0:
beta2 = beta * beta
return (1 + beta2) * precision * recall / (beta2 * precision + recall)
else:
return 0
|
python
|
def _get_fbeta_score(true_positives, selected, relevant, beta=1):
"""Compute Fbeta score.
Args:
true_positives: Number of true positive ngrams.
selected: Number of selected ngrams.
relevant: Number of relevant ngrams.
beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.
Returns:
Fbeta score.
"""
precision = 1
if selected > 0:
precision = true_positives / selected
if beta == 0:
return precision
recall = 1
if relevant > 0:
recall = true_positives / relevant
if precision > 0 and recall > 0:
beta2 = beta * beta
return (1 + beta2) * precision * recall / (beta2 * precision + recall)
else:
return 0
|
[
"def",
"_get_fbeta_score",
"(",
"true_positives",
",",
"selected",
",",
"relevant",
",",
"beta",
"=",
"1",
")",
":",
"precision",
"=",
"1",
"if",
"selected",
">",
"0",
":",
"precision",
"=",
"true_positives",
"/",
"selected",
"if",
"beta",
"==",
"0",
":",
"return",
"precision",
"recall",
"=",
"1",
"if",
"relevant",
">",
"0",
":",
"recall",
"=",
"true_positives",
"/",
"relevant",
"if",
"precision",
">",
"0",
"and",
"recall",
">",
"0",
":",
"beta2",
"=",
"beta",
"*",
"beta",
"return",
"(",
"1",
"+",
"beta2",
")",
"*",
"precision",
"*",
"recall",
"/",
"(",
"beta2",
"*",
"precision",
"+",
"recall",
")",
"else",
":",
"return",
"0"
] |
Compute Fbeta score.
Args:
true_positives: Number of true positive ngrams.
selected: Number of selected ngrams.
relevant: Number of relevant ngrams.
beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.
Returns:
Fbeta score.
|
[
"Compute",
"Fbeta",
"score",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/sari_hook.py#L70-L94
|
21,763
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/sari_hook.py
|
get_sari_score
|
def get_sari_score(source_ids, prediction_ids, list_of_targets,
max_gram_size=4, beta_for_deletion=0):
"""Compute the SARI score for a single prediction and one or more targets.
Args:
source_ids: a list / np.array of SentencePiece IDs
prediction_ids: a list / np.array of SentencePiece IDs
list_of_targets: a list of target ID lists / np.arrays
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
beta_for_deletion: beta for deletion F score.
Returns:
the SARI score and its three components: add, keep, and deletion scores
"""
addition_scores = []
keep_scores = []
deletion_scores = []
for n in range(1, max_gram_size + 1):
source_counts = _get_ngram_counter(source_ids, n)
prediction_counts = _get_ngram_counter(prediction_ids, n)
# All ngrams in the targets with count 1.
target_counts = collections.Counter()
# All ngrams in the targets with count r/num_targets, where r is the number
# of targets where the ngram occurs.
weighted_target_counts = collections.Counter()
num_nonempty_targets = 0
for target_ids_i in list_of_targets:
target_counts_i = _get_ngram_counter(target_ids_i, n)
if target_counts_i:
weighted_target_counts += target_counts_i
num_nonempty_targets += 1
for gram in weighted_target_counts.keys():
weighted_target_counts[gram] /= num_nonempty_targets
target_counts[gram] = 1
keep_scores.append(get_keep_score(source_counts, prediction_counts,
weighted_target_counts))
deletion_scores.append(get_deletion_score(source_counts, prediction_counts,
weighted_target_counts,
beta_for_deletion))
addition_scores.append(get_addition_score(source_counts, prediction_counts,
target_counts))
avg_keep_score = sum(keep_scores) / max_gram_size
avg_addition_score = sum(addition_scores) / max_gram_size
avg_deletion_score = sum(deletion_scores) / max_gram_size
sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0
return sari, avg_keep_score, avg_addition_score, avg_deletion_score
|
python
|
def get_sari_score(source_ids, prediction_ids, list_of_targets,
max_gram_size=4, beta_for_deletion=0):
"""Compute the SARI score for a single prediction and one or more targets.
Args:
source_ids: a list / np.array of SentencePiece IDs
prediction_ids: a list / np.array of SentencePiece IDs
list_of_targets: a list of target ID lists / np.arrays
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
beta_for_deletion: beta for deletion F score.
Returns:
the SARI score and its three components: add, keep, and deletion scores
"""
addition_scores = []
keep_scores = []
deletion_scores = []
for n in range(1, max_gram_size + 1):
source_counts = _get_ngram_counter(source_ids, n)
prediction_counts = _get_ngram_counter(prediction_ids, n)
# All ngrams in the targets with count 1.
target_counts = collections.Counter()
# All ngrams in the targets with count r/num_targets, where r is the number
# of targets where the ngram occurs.
weighted_target_counts = collections.Counter()
num_nonempty_targets = 0
for target_ids_i in list_of_targets:
target_counts_i = _get_ngram_counter(target_ids_i, n)
if target_counts_i:
weighted_target_counts += target_counts_i
num_nonempty_targets += 1
for gram in weighted_target_counts.keys():
weighted_target_counts[gram] /= num_nonempty_targets
target_counts[gram] = 1
keep_scores.append(get_keep_score(source_counts, prediction_counts,
weighted_target_counts))
deletion_scores.append(get_deletion_score(source_counts, prediction_counts,
weighted_target_counts,
beta_for_deletion))
addition_scores.append(get_addition_score(source_counts, prediction_counts,
target_counts))
avg_keep_score = sum(keep_scores) / max_gram_size
avg_addition_score = sum(addition_scores) / max_gram_size
avg_deletion_score = sum(deletion_scores) / max_gram_size
sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0
return sari, avg_keep_score, avg_addition_score, avg_deletion_score
|
[
"def",
"get_sari_score",
"(",
"source_ids",
",",
"prediction_ids",
",",
"list_of_targets",
",",
"max_gram_size",
"=",
"4",
",",
"beta_for_deletion",
"=",
"0",
")",
":",
"addition_scores",
"=",
"[",
"]",
"keep_scores",
"=",
"[",
"]",
"deletion_scores",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"max_gram_size",
"+",
"1",
")",
":",
"source_counts",
"=",
"_get_ngram_counter",
"(",
"source_ids",
",",
"n",
")",
"prediction_counts",
"=",
"_get_ngram_counter",
"(",
"prediction_ids",
",",
"n",
")",
"# All ngrams in the targets with count 1.",
"target_counts",
"=",
"collections",
".",
"Counter",
"(",
")",
"# All ngrams in the targets with count r/num_targets, where r is the number",
"# of targets where the ngram occurs.",
"weighted_target_counts",
"=",
"collections",
".",
"Counter",
"(",
")",
"num_nonempty_targets",
"=",
"0",
"for",
"target_ids_i",
"in",
"list_of_targets",
":",
"target_counts_i",
"=",
"_get_ngram_counter",
"(",
"target_ids_i",
",",
"n",
")",
"if",
"target_counts_i",
":",
"weighted_target_counts",
"+=",
"target_counts_i",
"num_nonempty_targets",
"+=",
"1",
"for",
"gram",
"in",
"weighted_target_counts",
".",
"keys",
"(",
")",
":",
"weighted_target_counts",
"[",
"gram",
"]",
"/=",
"num_nonempty_targets",
"target_counts",
"[",
"gram",
"]",
"=",
"1",
"keep_scores",
".",
"append",
"(",
"get_keep_score",
"(",
"source_counts",
",",
"prediction_counts",
",",
"weighted_target_counts",
")",
")",
"deletion_scores",
".",
"append",
"(",
"get_deletion_score",
"(",
"source_counts",
",",
"prediction_counts",
",",
"weighted_target_counts",
",",
"beta_for_deletion",
")",
")",
"addition_scores",
".",
"append",
"(",
"get_addition_score",
"(",
"source_counts",
",",
"prediction_counts",
",",
"target_counts",
")",
")",
"avg_keep_score",
"=",
"sum",
"(",
"keep_scores",
")",
"/",
"max_gram_size",
"avg_addition_score",
"=",
"sum",
"(",
"addition_scores",
")",
"/",
"max_gram_size",
"avg_deletion_score",
"=",
"sum",
"(",
"deletion_scores",
")",
"/",
"max_gram_size",
"sari",
"=",
"(",
"avg_keep_score",
"+",
"avg_addition_score",
"+",
"avg_deletion_score",
")",
"/",
"3.0",
"return",
"sari",
",",
"avg_keep_score",
",",
"avg_addition_score",
",",
"avg_deletion_score"
] |
Compute the SARI score for a single prediction and one or more targets.
Args:
source_ids: a list / np.array of SentencePiece IDs
prediction_ids: a list / np.array of SentencePiece IDs
list_of_targets: a list of target ID lists / np.arrays
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
beta_for_deletion: beta for deletion F score.
Returns:
the SARI score and its three components: add, keep, and deletion scores
|
[
"Compute",
"the",
"SARI",
"score",
"for",
"a",
"single",
"prediction",
"and",
"one",
"or",
"more",
"targets",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/sari_hook.py#L132-L179
|
21,764
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mnist.py
|
_get_mnist
|
def _get_mnist(directory):
"""Download all MNIST files to directory unless they are there."""
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory, filename, _MNIST_URL + filename)
|
python
|
def _get_mnist(directory):
"""Download all MNIST files to directory unless they are there."""
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory, filename, _MNIST_URL + filename)
|
[
"def",
"_get_mnist",
"(",
"directory",
")",
":",
"for",
"filename",
"in",
"[",
"_MNIST_TRAIN_DATA_FILENAME",
",",
"_MNIST_TRAIN_LABELS_FILENAME",
",",
"_MNIST_TEST_DATA_FILENAME",
",",
"_MNIST_TEST_LABELS_FILENAME",
"]",
":",
"generator_utils",
".",
"maybe_download",
"(",
"directory",
",",
"filename",
",",
"_MNIST_URL",
"+",
"filename",
")"
] |
Download all MNIST files to directory unless they are there.
|
[
"Download",
"all",
"MNIST",
"files",
"to",
"directory",
"unless",
"they",
"are",
"there",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L42-L48
|
21,765
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mnist.py
|
_extract_mnist_images
|
def _extract_mnist_images(filename, num_images):
"""Extract images from an MNIST file into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1)
return data
|
python
|
def _extract_mnist_images(filename, num_images):
"""Extract images from an MNIST file into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1)
return data
|
[
"def",
"_extract_mnist_images",
"(",
"filename",
",",
"num_images",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
")",
"as",
"bytestream",
":",
"bytestream",
".",
"read",
"(",
"16",
")",
"buf",
"=",
"bytestream",
".",
"read",
"(",
"_MNIST_IMAGE_SIZE",
"*",
"_MNIST_IMAGE_SIZE",
"*",
"num_images",
")",
"data",
"=",
"np",
".",
"frombuffer",
"(",
"buf",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"data",
"=",
"data",
".",
"reshape",
"(",
"num_images",
",",
"_MNIST_IMAGE_SIZE",
",",
"_MNIST_IMAGE_SIZE",
",",
"1",
")",
"return",
"data"
] |
Extract images from an MNIST file into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
|
[
"Extract",
"images",
"from",
"an",
"MNIST",
"file",
"into",
"a",
"numpy",
"array",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L51-L66
|
21,766
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mnist.py
|
_extract_mnist_labels
|
def _extract_mnist_labels(filename, num_labels):
"""Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels]
"""
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
|
python
|
def _extract_mnist_labels(filename, num_labels):
"""Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels]
"""
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
|
[
"def",
"_extract_mnist_labels",
"(",
"filename",
",",
"num_labels",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
")",
"as",
"bytestream",
":",
"bytestream",
".",
"read",
"(",
"8",
")",
"buf",
"=",
"bytestream",
".",
"read",
"(",
"num_labels",
")",
"labels",
"=",
"np",
".",
"frombuffer",
"(",
"buf",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"return",
"labels"
] |
Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels]
|
[
"Extract",
"labels",
"from",
"an",
"MNIST",
"file",
"into",
"integers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L69-L83
|
21,767
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mnist.py
|
_get_fashion_mnist
|
def _get_fashion_mnist(directory):
"""Download all FashionMNIST files to directory unless they are there."""
# Fashion mnist files have the same names as MNIST.
# We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir.
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory,
_FASHION_MNIST_LOCAL_FILE_PREFIX + filename,
_FASHION_MNIST_URL + filename)
|
python
|
def _get_fashion_mnist(directory):
"""Download all FashionMNIST files to directory unless they are there."""
# Fashion mnist files have the same names as MNIST.
# We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir.
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory,
_FASHION_MNIST_LOCAL_FILE_PREFIX + filename,
_FASHION_MNIST_URL + filename)
|
[
"def",
"_get_fashion_mnist",
"(",
"directory",
")",
":",
"# Fashion mnist files have the same names as MNIST.",
"# We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir.",
"for",
"filename",
"in",
"[",
"_MNIST_TRAIN_DATA_FILENAME",
",",
"_MNIST_TRAIN_LABELS_FILENAME",
",",
"_MNIST_TEST_DATA_FILENAME",
",",
"_MNIST_TEST_LABELS_FILENAME",
"]",
":",
"generator_utils",
".",
"maybe_download",
"(",
"directory",
",",
"_FASHION_MNIST_LOCAL_FILE_PREFIX",
"+",
"filename",
",",
"_FASHION_MNIST_URL",
"+",
"filename",
")"
] |
Download all FashionMNIST files to directory unless they are there.
|
[
"Download",
"all",
"FashionMNIST",
"files",
"to",
"directory",
"unless",
"they",
"are",
"there",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L191-L201
|
21,768
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mnist.py
|
fashion_mnist_generator
|
def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):
"""Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
_get_fashion_mnist(tmp_dir)
d = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)
l = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
|
python
|
def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):
"""Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
_get_fashion_mnist(tmp_dir)
d = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)
l = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
|
[
"def",
"fashion_mnist_generator",
"(",
"tmp_dir",
",",
"training",
",",
"how_many",
",",
"start_from",
"=",
"0",
")",
":",
"_get_fashion_mnist",
"(",
"tmp_dir",
")",
"d",
"=",
"_FASHION_MNIST_LOCAL_FILE_PREFIX",
"+",
"(",
"_MNIST_TRAIN_DATA_FILENAME",
"if",
"training",
"else",
"_MNIST_TEST_DATA_FILENAME",
")",
"l",
"=",
"_FASHION_MNIST_LOCAL_FILE_PREFIX",
"+",
"(",
"_MNIST_TRAIN_LABELS_FILENAME",
"if",
"training",
"else",
"_MNIST_TEST_LABELS_FILENAME",
")",
"return",
"mnist_common_generator",
"(",
"tmp_dir",
",",
"training",
",",
"how_many",
",",
"d",
",",
"l",
",",
"start_from",
")"
] |
Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
|
[
"Image",
"generator",
"for",
"FashionMNIST",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L204-L221
|
21,769
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/timeseries_data_generator.py
|
generate_data
|
def generate_data(timeseries_length, timeseries_params):
"""Generates synthetic timeseries using input parameters.
Each generated timeseries has timeseries_length data points.
Parameters for each timeseries are specified by timeseries_params.
Args:
timeseries_length: Number of data points to generate for each timeseries.
timeseries_params: Parameters used to generate the timeseries. The following
parameters need to be specified for each timeseries:
m = Slope of the timeseries used to compute the timeseries trend.
b = y-intercept of the timeseries used to compute the timeseries trend.
A = Timeseries amplitude used to compute timeseries period.
freqcoeff = Frequency coefficient used to compute timeseries period.
rndA = Random amplitude used to inject noise into the timeseries.
fn = Base timeseries function (np.cos or np.sin).
Example params for two timeseries.
[{"m": 0.006, "b": 300.0, "A":50.0, "freqcoeff":1500.0, "rndA":15.0,
"fn": np.sin},
{"m": 0.000, "b": 500.0, "A":35.0, "freqcoeff":3500.0, "rndA":25.0,
"fn": np.cos}]
Returns:
Multi-timeseries (list of list).
"""
x = range(timeseries_length)
multi_timeseries = []
for p in timeseries_params:
# Trend
y1 = [p["m"] * i + p["b"] for i in x]
# Period
y2 = [p["A"] * p["fn"](i / p["freqcoeff"]) for i in x]
# Noise
y3 = np.random.normal(0, p["rndA"], timeseries_length).tolist()
# Sum of Trend, Period and Noise. Replace negative values with zero.
y = [max(a + b + c, 0) for a, b, c in zip(y1, y2, y3)]
multi_timeseries.append(y)
return multi_timeseries
|
python
|
def generate_data(timeseries_length, timeseries_params):
"""Generates synthetic timeseries using input parameters.
Each generated timeseries has timeseries_length data points.
Parameters for each timeseries are specified by timeseries_params.
Args:
timeseries_length: Number of data points to generate for each timeseries.
timeseries_params: Parameters used to generate the timeseries. The following
parameters need to be specified for each timeseries:
m = Slope of the timeseries used to compute the timeseries trend.
b = y-intercept of the timeseries used to compute the timeseries trend.
A = Timeseries amplitude used to compute timeseries period.
freqcoeff = Frequency coefficient used to compute timeseries period.
rndA = Random amplitude used to inject noise into the timeseries.
fn = Base timeseries function (np.cos or np.sin).
Example params for two timeseries.
[{"m": 0.006, "b": 300.0, "A":50.0, "freqcoeff":1500.0, "rndA":15.0,
"fn": np.sin},
{"m": 0.000, "b": 500.0, "A":35.0, "freqcoeff":3500.0, "rndA":25.0,
"fn": np.cos}]
Returns:
Multi-timeseries (list of list).
"""
x = range(timeseries_length)
multi_timeseries = []
for p in timeseries_params:
# Trend
y1 = [p["m"] * i + p["b"] for i in x]
# Period
y2 = [p["A"] * p["fn"](i / p["freqcoeff"]) for i in x]
# Noise
y3 = np.random.normal(0, p["rndA"], timeseries_length).tolist()
# Sum of Trend, Period and Noise. Replace negative values with zero.
y = [max(a + b + c, 0) for a, b, c in zip(y1, y2, y3)]
multi_timeseries.append(y)
return multi_timeseries
|
[
"def",
"generate_data",
"(",
"timeseries_length",
",",
"timeseries_params",
")",
":",
"x",
"=",
"range",
"(",
"timeseries_length",
")",
"multi_timeseries",
"=",
"[",
"]",
"for",
"p",
"in",
"timeseries_params",
":",
"# Trend",
"y1",
"=",
"[",
"p",
"[",
"\"m\"",
"]",
"*",
"i",
"+",
"p",
"[",
"\"b\"",
"]",
"for",
"i",
"in",
"x",
"]",
"# Period",
"y2",
"=",
"[",
"p",
"[",
"\"A\"",
"]",
"*",
"p",
"[",
"\"fn\"",
"]",
"(",
"i",
"/",
"p",
"[",
"\"freqcoeff\"",
"]",
")",
"for",
"i",
"in",
"x",
"]",
"# Noise",
"y3",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"p",
"[",
"\"rndA\"",
"]",
",",
"timeseries_length",
")",
".",
"tolist",
"(",
")",
"# Sum of Trend, Period and Noise. Replace negative values with zero.",
"y",
"=",
"[",
"max",
"(",
"a",
"+",
"b",
"+",
"c",
",",
"0",
")",
"for",
"a",
",",
"b",
",",
"c",
"in",
"zip",
"(",
"y1",
",",
"y2",
",",
"y3",
")",
"]",
"multi_timeseries",
".",
"append",
"(",
"y",
")",
"return",
"multi_timeseries"
] |
Generates synthetic timeseries using input parameters.
Each generated timeseries has timeseries_length data points.
Parameters for each timeseries are specified by timeseries_params.
Args:
timeseries_length: Number of data points to generate for each timeseries.
timeseries_params: Parameters used to generate the timeseries. The following
parameters need to be specified for each timeseries:
m = Slope of the timeseries used to compute the timeseries trend.
b = y-intercept of the timeseries used to compute the timeseries trend.
A = Timeseries amplitude used to compute timeseries period.
freqcoeff = Frequency coefficient used to compute timeseries period.
rndA = Random amplitude used to inject noise into the timeseries.
fn = Base timeseries function (np.cos or np.sin).
Example params for two timeseries.
[{"m": 0.006, "b": 300.0, "A":50.0, "freqcoeff":1500.0, "rndA":15.0,
"fn": np.sin},
{"m": 0.000, "b": 500.0, "A":35.0, "freqcoeff":3500.0, "rndA":25.0,
"fn": np.cos}]
Returns:
Multi-timeseries (list of list).
|
[
"Generates",
"synthetic",
"timeseries",
"using",
"input",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/timeseries_data_generator.py#L24-L63
|
21,770
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/basic_stochastic.py
|
next_frame_basic_stochastic_discrete
|
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams
|
python
|
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams
|
[
"def",
"next_frame_basic_stochastic_discrete",
"(",
")",
":",
"hparams",
"=",
"basic_deterministic_params",
".",
"next_frame_sampling",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"4",
"hparams",
".",
"video_num_target_frames",
"=",
"6",
"hparams",
".",
"scheduled_sampling_mode",
"=",
"\"prob_inverse_lin\"",
"hparams",
".",
"scheduled_sampling_decay_steps",
"=",
"40000",
"hparams",
".",
"scheduled_sampling_max_prob",
"=",
"1.0",
"hparams",
".",
"dropout",
"=",
"0.15",
"hparams",
".",
"filter_double_steps",
"=",
"3",
"hparams",
".",
"hidden_size",
"=",
"96",
"hparams",
".",
"learning_rate_constant",
"=",
"0.002",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"2000",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"linear_warmup * constant\"",
"hparams",
".",
"concat_internal_states",
"=",
"True",
"hparams",
".",
"video_modality_loss_cutoff",
"=",
"0.03",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_bits\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_noise\"",
",",
"0.1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"discretize_warmup_steps\"",
",",
"40000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_rnn_warmup_steps\"",
",",
"40000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_rnn_max_sampling\"",
",",
"0.5",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_use_max_probability\"",
",",
"0.8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"full_latent_tower\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_predictor_state_size\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_predictor_temperature\"",
",",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"complex_addn\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"recurrent_state_size\"",
",",
"64",
")",
"return",
"hparams"
] |
Basic 2-frame conv model with stochastic discrete latent.
|
[
"Basic",
"2",
"-",
"frame",
"conv",
"model",
"with",
"stochastic",
"discrete",
"latent",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_stochastic.py#L255-L282
|
21,771
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/basic_stochastic.py
|
next_frame_stochastic_discrete_range
|
def next_frame_stochastic_discrete_range(rhp):
"""Next frame stochastic discrete tuning grid."""
rhp.set_float("learning_rate_constant", 0.001, 0.01)
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 3, 5)
rhp.set_discrete("hidden_size", [64, 96, 128])
rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256])
rhp.set_discrete("video_num_target_frames", [4])
rhp.set_float("bottleneck_noise", 0.0, 0.2)
|
python
|
def next_frame_stochastic_discrete_range(rhp):
"""Next frame stochastic discrete tuning grid."""
rhp.set_float("learning_rate_constant", 0.001, 0.01)
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 3, 5)
rhp.set_discrete("hidden_size", [64, 96, 128])
rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256])
rhp.set_discrete("video_num_target_frames", [4])
rhp.set_float("bottleneck_noise", 0.0, 0.2)
|
[
"def",
"next_frame_stochastic_discrete_range",
"(",
"rhp",
")",
":",
"rhp",
".",
"set_float",
"(",
"\"learning_rate_constant\"",
",",
"0.001",
",",
"0.01",
")",
"rhp",
".",
"set_float",
"(",
"\"dropout\"",
",",
"0.2",
",",
"0.6",
")",
"rhp",
".",
"set_int",
"(",
"\"filter_double_steps\"",
",",
"3",
",",
"5",
")",
"rhp",
".",
"set_discrete",
"(",
"\"hidden_size\"",
",",
"[",
"64",
",",
"96",
",",
"128",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"bottleneck_bits\"",
",",
"[",
"32",
",",
"64",
",",
"128",
",",
"256",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"video_num_target_frames\"",
",",
"[",
"4",
"]",
")",
"rhp",
".",
"set_float",
"(",
"\"bottleneck_noise\"",
",",
"0.0",
",",
"0.2",
")"
] |
Next frame stochastic discrete tuning grid.
|
[
"Next",
"frame",
"stochastic",
"discrete",
"tuning",
"grid",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_stochastic.py#L295-L303
|
21,772
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
shapes
|
def shapes(x):
"""Get a structure of shapes for a structure of nested arrays."""
def shape(x):
try:
return x.shape
except Exception: # pylint: disable=broad-except
return []
return nested_map(x, shape)
|
python
|
def shapes(x):
"""Get a structure of shapes for a structure of nested arrays."""
def shape(x):
try:
return x.shape
except Exception: # pylint: disable=broad-except
return []
return nested_map(x, shape)
|
[
"def",
"shapes",
"(",
"x",
")",
":",
"def",
"shape",
"(",
"x",
")",
":",
"try",
":",
"return",
"x",
".",
"shape",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"return",
"[",
"]",
"return",
"nested_map",
"(",
"x",
",",
"shape",
")"
] |
Get a structure of shapes for a structure of nested arrays.
|
[
"Get",
"a",
"structure",
"of",
"shapes",
"for",
"a",
"structure",
"of",
"nested",
"arrays",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L169-L176
|
21,773
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
sizes
|
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size)
|
python
|
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size)
|
[
"def",
"sizes",
"(",
"x",
")",
":",
"def",
"size",
"(",
"x",
")",
":",
"try",
":",
"return",
"x",
".",
"size",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"return",
"0",
"return",
"nested_map",
"(",
"x",
",",
"size",
")"
] |
Get a structure of sizes for a structure of nested arrays.
|
[
"Get",
"a",
"structure",
"of",
"sizes",
"for",
"a",
"structure",
"of",
"nested",
"arrays",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L179-L186
|
21,774
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
_find_frame
|
def _find_frame(stack, start=0):
"""Find the frame with the caller on the stack."""
# We want to find the first place where the layer was called
# that is *not* an __init__ function of an inheriting layer.
frame = inspect.getframeinfo(stack[start][0])
# If we are in an init, move on.
if frame.function == '__init__':
return _find_frame(stack, start + 1)
return frame
|
python
|
def _find_frame(stack, start=0):
"""Find the frame with the caller on the stack."""
# We want to find the first place where the layer was called
# that is *not* an __init__ function of an inheriting layer.
frame = inspect.getframeinfo(stack[start][0])
# If we are in an init, move on.
if frame.function == '__init__':
return _find_frame(stack, start + 1)
return frame
|
[
"def",
"_find_frame",
"(",
"stack",
",",
"start",
"=",
"0",
")",
":",
"# We want to find the first place where the layer was called",
"# that is *not* an __init__ function of an inheriting layer.",
"frame",
"=",
"inspect",
".",
"getframeinfo",
"(",
"stack",
"[",
"start",
"]",
"[",
"0",
"]",
")",
"# If we are in an init, move on.",
"if",
"frame",
".",
"function",
"==",
"'__init__'",
":",
"return",
"_find_frame",
"(",
"stack",
",",
"start",
"+",
"1",
")",
"return",
"frame"
] |
Find the frame with the caller on the stack.
|
[
"Find",
"the",
"frame",
"with",
"the",
"caller",
"on",
"the",
"stack",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L189-L197
|
21,775
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
_shorten_file_path
|
def _shorten_file_path(line):
"""Shorten file path in error lines for more readable tracebacks."""
start = line.lower().find('file')
if start < 0:
return line
first_quote = line.find('"', start)
if first_quote < 0:
return line
second_quote = line.find('"', first_quote + 1)
if second_quote < 0:
return line
path = line[first_quote + 1:second_quote]
new_path = '/'.join(path.split('/')[-3:])
return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
|
python
|
def _shorten_file_path(line):
"""Shorten file path in error lines for more readable tracebacks."""
start = line.lower().find('file')
if start < 0:
return line
first_quote = line.find('"', start)
if first_quote < 0:
return line
second_quote = line.find('"', first_quote + 1)
if second_quote < 0:
return line
path = line[first_quote + 1:second_quote]
new_path = '/'.join(path.split('/')[-3:])
return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
|
[
"def",
"_shorten_file_path",
"(",
"line",
")",
":",
"start",
"=",
"line",
".",
"lower",
"(",
")",
".",
"find",
"(",
"'file'",
")",
"if",
"start",
"<",
"0",
":",
"return",
"line",
"first_quote",
"=",
"line",
".",
"find",
"(",
"'\"'",
",",
"start",
")",
"if",
"first_quote",
"<",
"0",
":",
"return",
"line",
"second_quote",
"=",
"line",
".",
"find",
"(",
"'\"'",
",",
"first_quote",
"+",
"1",
")",
"if",
"second_quote",
"<",
"0",
":",
"return",
"line",
"path",
"=",
"line",
"[",
"first_quote",
"+",
"1",
":",
"second_quote",
"]",
"new_path",
"=",
"'/'",
".",
"join",
"(",
"path",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"3",
":",
"]",
")",
"return",
"line",
"[",
":",
"first_quote",
"]",
"+",
"'[...]/'",
"+",
"new_path",
"+",
"line",
"[",
"second_quote",
"+",
"1",
":",
"]"
] |
Shorten file path in error lines for more readable tracebacks.
|
[
"Shorten",
"file",
"path",
"in",
"error",
"lines",
"for",
"more",
"readable",
"tracebacks",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L200-L213
|
21,776
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
_short_traceback
|
def _short_traceback(skip=3):
"""Cleaned-up form of traceback."""
counter, res = 0, []
# Skipping 3 lines by default: the top (useless) and self-call.
lines = traceback.format_exc().splitlines()[skip:]
for l in lines:
res.append(_shorten_file_path(l))
if counter % 2 == 1:
res.append('')
counter += 1
# If we see a LayerError, the traceback has already been processed.
if l.startswith('LayerError'):
# Skip 4 back except last as these are internal base-layer calls.
res = res[:-4] + [res[-1]]
res += lines[counter:]
break
return '\n'.join(res)
|
python
|
def _short_traceback(skip=3):
"""Cleaned-up form of traceback."""
counter, res = 0, []
# Skipping 3 lines by default: the top (useless) and self-call.
lines = traceback.format_exc().splitlines()[skip:]
for l in lines:
res.append(_shorten_file_path(l))
if counter % 2 == 1:
res.append('')
counter += 1
# If we see a LayerError, the traceback has already been processed.
if l.startswith('LayerError'):
# Skip 4 back except last as these are internal base-layer calls.
res = res[:-4] + [res[-1]]
res += lines[counter:]
break
return '\n'.join(res)
|
[
"def",
"_short_traceback",
"(",
"skip",
"=",
"3",
")",
":",
"counter",
",",
"res",
"=",
"0",
",",
"[",
"]",
"# Skipping 3 lines by default: the top (useless) and self-call.",
"lines",
"=",
"traceback",
".",
"format_exc",
"(",
")",
".",
"splitlines",
"(",
")",
"[",
"skip",
":",
"]",
"for",
"l",
"in",
"lines",
":",
"res",
".",
"append",
"(",
"_shorten_file_path",
"(",
"l",
")",
")",
"if",
"counter",
"%",
"2",
"==",
"1",
":",
"res",
".",
"append",
"(",
"''",
")",
"counter",
"+=",
"1",
"# If we see a LayerError, the traceback has already been processed.",
"if",
"l",
".",
"startswith",
"(",
"'LayerError'",
")",
":",
"# Skip 4 back except last as these are internal base-layer calls.",
"res",
"=",
"res",
"[",
":",
"-",
"4",
"]",
"+",
"[",
"res",
"[",
"-",
"1",
"]",
"]",
"res",
"+=",
"lines",
"[",
"counter",
":",
"]",
"break",
"return",
"'\\n'",
".",
"join",
"(",
"res",
")"
] |
Cleaned-up form of traceback.
|
[
"Cleaned",
"-",
"up",
"form",
"of",
"traceback",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L216-L232
|
21,777
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
layer
|
def layer(output_shape=None, new_parameters=None):
"""Create a layer class from a function."""
def layer_decorator(call):
"""Decorating the call function."""
def output_shape_fun(self, input_shape):
if output_shape is None:
return input_shape
kwargs = self._init_kwargs # pylint: disable=protected-access
return output_shape(input_shape, **kwargs)
def new_parameters_fun(self, input_shape, rng):
if new_parameters is None:
return ()
kwargs = self._init_kwargs # pylint: disable=protected-access
return new_parameters(input_shape, rng, **kwargs)
def call_fun(self, x, params=(), **kwargs):
"""The call function of the created class, derived from call."""
# Merge on-call kwargs with class-kwargs.
call_kwargs = kwargs.copy()
call_kwargs.update(self._init_kwargs) # pylint: disable=protected-access
# Call with the merged kwargs.
return call(x, params=params, **call_kwargs)
# Set doc for python help.
call_fun.__doc__ = call.__doc__
if output_shape is None:
output_shape_fun.__doc__ = output_shape.__doc__
if new_parameters is None:
new_parameters_fun.__doc__ = new_parameters.__doc__
# Create the class.
cls = type(call.__name__, (Layer,),
{'call': call_fun,
'output_shape': output_shape_fun,
'new_parameters': new_parameters_fun})
return cls
return layer_decorator
|
python
|
def layer(output_shape=None, new_parameters=None):
"""Create a layer class from a function."""
def layer_decorator(call):
"""Decorating the call function."""
def output_shape_fun(self, input_shape):
if output_shape is None:
return input_shape
kwargs = self._init_kwargs # pylint: disable=protected-access
return output_shape(input_shape, **kwargs)
def new_parameters_fun(self, input_shape, rng):
if new_parameters is None:
return ()
kwargs = self._init_kwargs # pylint: disable=protected-access
return new_parameters(input_shape, rng, **kwargs)
def call_fun(self, x, params=(), **kwargs):
"""The call function of the created class, derived from call."""
# Merge on-call kwargs with class-kwargs.
call_kwargs = kwargs.copy()
call_kwargs.update(self._init_kwargs) # pylint: disable=protected-access
# Call with the merged kwargs.
return call(x, params=params, **call_kwargs)
# Set doc for python help.
call_fun.__doc__ = call.__doc__
if output_shape is None:
output_shape_fun.__doc__ = output_shape.__doc__
if new_parameters is None:
new_parameters_fun.__doc__ = new_parameters.__doc__
# Create the class.
cls = type(call.__name__, (Layer,),
{'call': call_fun,
'output_shape': output_shape_fun,
'new_parameters': new_parameters_fun})
return cls
return layer_decorator
|
[
"def",
"layer",
"(",
"output_shape",
"=",
"None",
",",
"new_parameters",
"=",
"None",
")",
":",
"def",
"layer_decorator",
"(",
"call",
")",
":",
"\"\"\"Decorating the call function.\"\"\"",
"def",
"output_shape_fun",
"(",
"self",
",",
"input_shape",
")",
":",
"if",
"output_shape",
"is",
"None",
":",
"return",
"input_shape",
"kwargs",
"=",
"self",
".",
"_init_kwargs",
"# pylint: disable=protected-access",
"return",
"output_shape",
"(",
"input_shape",
",",
"*",
"*",
"kwargs",
")",
"def",
"new_parameters_fun",
"(",
"self",
",",
"input_shape",
",",
"rng",
")",
":",
"if",
"new_parameters",
"is",
"None",
":",
"return",
"(",
")",
"kwargs",
"=",
"self",
".",
"_init_kwargs",
"# pylint: disable=protected-access",
"return",
"new_parameters",
"(",
"input_shape",
",",
"rng",
",",
"*",
"*",
"kwargs",
")",
"def",
"call_fun",
"(",
"self",
",",
"x",
",",
"params",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"The call function of the created class, derived from call.\"\"\"",
"# Merge on-call kwargs with class-kwargs.",
"call_kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"call_kwargs",
".",
"update",
"(",
"self",
".",
"_init_kwargs",
")",
"# pylint: disable=protected-access",
"# Call with the merged kwargs.",
"return",
"call",
"(",
"x",
",",
"params",
"=",
"params",
",",
"*",
"*",
"call_kwargs",
")",
"# Set doc for python help.",
"call_fun",
".",
"__doc__",
"=",
"call",
".",
"__doc__",
"if",
"output_shape",
"is",
"None",
":",
"output_shape_fun",
".",
"__doc__",
"=",
"output_shape",
".",
"__doc__",
"if",
"new_parameters",
"is",
"None",
":",
"new_parameters_fun",
".",
"__doc__",
"=",
"new_parameters",
".",
"__doc__",
"# Create the class.",
"cls",
"=",
"type",
"(",
"call",
".",
"__name__",
",",
"(",
"Layer",
",",
")",
",",
"{",
"'call'",
":",
"call_fun",
",",
"'output_shape'",
":",
"output_shape_fun",
",",
"'new_parameters'",
":",
"new_parameters_fun",
"}",
")",
"return",
"cls",
"return",
"layer_decorator"
] |
Create a layer class from a function.
|
[
"Create",
"a",
"layer",
"class",
"from",
"a",
"function",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L238-L276
|
21,778
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/base.py
|
Layer.initialize
|
def initialize(self, input_shape, rng):
"""Initialize the layer given an input shape and rng.
Returns new_parameters(input_shape, rng) on the first call and () on any
subsequent call, as the layer is already initialized. This is used for
networks that share parameters, so the layer only produces them once.
Note that all arguments and return values can be tuples or dictionaries
or arbitraty nested structures composed of tuples and dictionaries.
Args:
input_shape: a tuple representing the shape of the input.
rng: random number generator.
Returns:
Newly created parameters on the first call and () on all subsequent calls.
"""
try:
# Re-using this layer, no new parameters.
if not self._first_init:
return ()
# First call of this layer, create parameters.
self._first_init = False
self._params = self.new_parameters(input_shape, rng)
return self._params
except Exception:
name, trace = self.__class__.__name__, _short_traceback()
raise LayerError(name, 'initialize', self._caller, input_shape, trace)
|
python
|
def initialize(self, input_shape, rng):
"""Initialize the layer given an input shape and rng.
Returns new_parameters(input_shape, rng) on the first call and () on any
subsequent call, as the layer is already initialized. This is used for
networks that share parameters, so the layer only produces them once.
Note that all arguments and return values can be tuples or dictionaries
or arbitraty nested structures composed of tuples and dictionaries.
Args:
input_shape: a tuple representing the shape of the input.
rng: random number generator.
Returns:
Newly created parameters on the first call and () on all subsequent calls.
"""
try:
# Re-using this layer, no new parameters.
if not self._first_init:
return ()
# First call of this layer, create parameters.
self._first_init = False
self._params = self.new_parameters(input_shape, rng)
return self._params
except Exception:
name, trace = self.__class__.__name__, _short_traceback()
raise LayerError(name, 'initialize', self._caller, input_shape, trace)
|
[
"def",
"initialize",
"(",
"self",
",",
"input_shape",
",",
"rng",
")",
":",
"try",
":",
"# Re-using this layer, no new parameters.",
"if",
"not",
"self",
".",
"_first_init",
":",
"return",
"(",
")",
"# First call of this layer, create parameters.",
"self",
".",
"_first_init",
"=",
"False",
"self",
".",
"_params",
"=",
"self",
".",
"new_parameters",
"(",
"input_shape",
",",
"rng",
")",
"return",
"self",
".",
"_params",
"except",
"Exception",
":",
"name",
",",
"trace",
"=",
"self",
".",
"__class__",
".",
"__name__",
",",
"_short_traceback",
"(",
")",
"raise",
"LayerError",
"(",
"name",
",",
"'initialize'",
",",
"self",
".",
"_caller",
",",
"input_shape",
",",
"trace",
")"
] |
Initialize the layer given an input shape and rng.
Returns new_parameters(input_shape, rng) on the first call and () on any
subsequent call, as the layer is already initialized. This is used for
networks that share parameters, so the layer only produces them once.
Note that all arguments and return values can be tuples or dictionaries
or arbitraty nested structures composed of tuples and dictionaries.
Args:
input_shape: a tuple representing the shape of the input.
rng: random number generator.
Returns:
Newly created parameters on the first call and () on all subsequent calls.
|
[
"Initialize",
"the",
"layer",
"given",
"an",
"input",
"shape",
"and",
"rng",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/base.py#L74-L102
|
21,779
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/wikisum.py
|
_wiki_articles
|
def _wiki_articles(shard_id, wikis_dir=None):
"""Generates WikipediaArticles from GCS that are part of shard shard_id."""
if not wikis_dir:
wikis_dir = WIKI_CONTENT_DIR
with tf.Graph().as_default():
dataset = tf.data.TFRecordDataset(
cc_utils.readahead(
os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)),
buffer_size=16 * 1000 * 1000)
def _parse_example(ex_ser):
"""Parse serialized Example containing Wikipedia article content."""
features = {
"url": tf.VarLenFeature(tf.string),
"title": tf.VarLenFeature(tf.string),
"section_titles": tf.VarLenFeature(tf.string),
"section_texts": tf.VarLenFeature(tf.string),
}
ex = tf.parse_single_example(ex_ser, features)
for k in ex.keys():
ex[k] = ex[k].values
ex["url"] = ex["url"][0]
ex["title"] = ex["title"][0]
return ex
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
except tf.errors.OutOfRangeError:
break
sections = [
WikipediaSection(title=text_encoder.to_unicode(title),
text=text_encoder.to_unicode(text))
for title, text in zip(ex["section_titles"], ex["section_texts"])
]
yield WikipediaArticle(
url=text_encoder.to_unicode(ex["url"]),
title=text_encoder.to_unicode(ex["title"]),
sections=sections)
|
python
|
def _wiki_articles(shard_id, wikis_dir=None):
"""Generates WikipediaArticles from GCS that are part of shard shard_id."""
if not wikis_dir:
wikis_dir = WIKI_CONTENT_DIR
with tf.Graph().as_default():
dataset = tf.data.TFRecordDataset(
cc_utils.readahead(
os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)),
buffer_size=16 * 1000 * 1000)
def _parse_example(ex_ser):
"""Parse serialized Example containing Wikipedia article content."""
features = {
"url": tf.VarLenFeature(tf.string),
"title": tf.VarLenFeature(tf.string),
"section_titles": tf.VarLenFeature(tf.string),
"section_texts": tf.VarLenFeature(tf.string),
}
ex = tf.parse_single_example(ex_ser, features)
for k in ex.keys():
ex[k] = ex[k].values
ex["url"] = ex["url"][0]
ex["title"] = ex["title"][0]
return ex
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
except tf.errors.OutOfRangeError:
break
sections = [
WikipediaSection(title=text_encoder.to_unicode(title),
text=text_encoder.to_unicode(text))
for title, text in zip(ex["section_titles"], ex["section_texts"])
]
yield WikipediaArticle(
url=text_encoder.to_unicode(ex["url"]),
title=text_encoder.to_unicode(ex["title"]),
sections=sections)
|
[
"def",
"_wiki_articles",
"(",
"shard_id",
",",
"wikis_dir",
"=",
"None",
")",
":",
"if",
"not",
"wikis_dir",
":",
"wikis_dir",
"=",
"WIKI_CONTENT_DIR",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"dataset",
"=",
"tf",
".",
"data",
".",
"TFRecordDataset",
"(",
"cc_utils",
".",
"readahead",
"(",
"os",
".",
"path",
".",
"join",
"(",
"wikis_dir",
",",
"WIKI_CONTENT_FILE",
"%",
"shard_id",
")",
")",
",",
"buffer_size",
"=",
"16",
"*",
"1000",
"*",
"1000",
")",
"def",
"_parse_example",
"(",
"ex_ser",
")",
":",
"\"\"\"Parse serialized Example containing Wikipedia article content.\"\"\"",
"features",
"=",
"{",
"\"url\"",
":",
"tf",
".",
"VarLenFeature",
"(",
"tf",
".",
"string",
")",
",",
"\"title\"",
":",
"tf",
".",
"VarLenFeature",
"(",
"tf",
".",
"string",
")",
",",
"\"section_titles\"",
":",
"tf",
".",
"VarLenFeature",
"(",
"tf",
".",
"string",
")",
",",
"\"section_texts\"",
":",
"tf",
".",
"VarLenFeature",
"(",
"tf",
".",
"string",
")",
",",
"}",
"ex",
"=",
"tf",
".",
"parse_single_example",
"(",
"ex_ser",
",",
"features",
")",
"for",
"k",
"in",
"ex",
".",
"keys",
"(",
")",
":",
"ex",
"[",
"k",
"]",
"=",
"ex",
"[",
"k",
"]",
".",
"values",
"ex",
"[",
"\"url\"",
"]",
"=",
"ex",
"[",
"\"url\"",
"]",
"[",
"0",
"]",
"ex",
"[",
"\"title\"",
"]",
"=",
"ex",
"[",
"\"title\"",
"]",
"[",
"0",
"]",
"return",
"ex",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"_parse_example",
",",
"num_parallel_calls",
"=",
"32",
")",
"dataset",
"=",
"dataset",
".",
"prefetch",
"(",
"100",
")",
"record_it",
"=",
"dataset",
".",
"make_one_shot_iterator",
"(",
")",
".",
"get_next",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"while",
"True",
":",
"try",
":",
"ex",
"=",
"sess",
".",
"run",
"(",
"record_it",
")",
"except",
"tf",
".",
"errors",
".",
"OutOfRangeError",
":",
"break",
"sections",
"=",
"[",
"WikipediaSection",
"(",
"title",
"=",
"text_encoder",
".",
"to_unicode",
"(",
"title",
")",
",",
"text",
"=",
"text_encoder",
".",
"to_unicode",
"(",
"text",
")",
")",
"for",
"title",
",",
"text",
"in",
"zip",
"(",
"ex",
"[",
"\"section_titles\"",
"]",
",",
"ex",
"[",
"\"section_texts\"",
"]",
")",
"]",
"yield",
"WikipediaArticle",
"(",
"url",
"=",
"text_encoder",
".",
"to_unicode",
"(",
"ex",
"[",
"\"url\"",
"]",
")",
",",
"title",
"=",
"text_encoder",
".",
"to_unicode",
"(",
"ex",
"[",
"\"title\"",
"]",
")",
",",
"sections",
"=",
"sections",
")"
] |
Generates WikipediaArticles from GCS that are part of shard shard_id.
|
[
"Generates",
"WikipediaArticles",
"from",
"GCS",
"that",
"are",
"part",
"of",
"shard",
"shard_id",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/wikisum.py#L279-L323
|
21,780
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/wikisum.py
|
rank_reference_paragraphs
|
def rank_reference_paragraphs(wiki_title, references_content, normalize=True):
"""Rank and return reference paragraphs by tf-idf score on title tokens."""
normalized_title = _normalize_text(wiki_title)
title_tokens = _tokens_to_score(
set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title))))
ref_paragraph_info = []
doc_counts = collections.defaultdict(int)
for ref in references_content:
for paragraph in ref.split("\n"):
normalized_paragraph = _normalize_text(paragraph)
if cc_utils.filter_paragraph(normalized_paragraph):
# Skip paragraph
continue
counts = _token_counts(normalized_paragraph, title_tokens)
for token in title_tokens:
if counts[token]:
doc_counts[token] += 1
content = normalized_paragraph if normalize else paragraph
info = {"content": content, "counts": counts}
ref_paragraph_info.append(info)
for info in ref_paragraph_info:
score = 0.
for token in title_tokens:
term_frequency = info["counts"][token]
inv_doc_frequency = (
float(len(ref_paragraph_info)) / max(doc_counts[token], 1))
score += term_frequency * math.log(inv_doc_frequency)
info["score"] = score
ref_paragraph_info.sort(key=lambda el: el["score"], reverse=True)
return [info["content"] for info in ref_paragraph_info]
|
python
|
def rank_reference_paragraphs(wiki_title, references_content, normalize=True):
"""Rank and return reference paragraphs by tf-idf score on title tokens."""
normalized_title = _normalize_text(wiki_title)
title_tokens = _tokens_to_score(
set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title))))
ref_paragraph_info = []
doc_counts = collections.defaultdict(int)
for ref in references_content:
for paragraph in ref.split("\n"):
normalized_paragraph = _normalize_text(paragraph)
if cc_utils.filter_paragraph(normalized_paragraph):
# Skip paragraph
continue
counts = _token_counts(normalized_paragraph, title_tokens)
for token in title_tokens:
if counts[token]:
doc_counts[token] += 1
content = normalized_paragraph if normalize else paragraph
info = {"content": content, "counts": counts}
ref_paragraph_info.append(info)
for info in ref_paragraph_info:
score = 0.
for token in title_tokens:
term_frequency = info["counts"][token]
inv_doc_frequency = (
float(len(ref_paragraph_info)) / max(doc_counts[token], 1))
score += term_frequency * math.log(inv_doc_frequency)
info["score"] = score
ref_paragraph_info.sort(key=lambda el: el["score"], reverse=True)
return [info["content"] for info in ref_paragraph_info]
|
[
"def",
"rank_reference_paragraphs",
"(",
"wiki_title",
",",
"references_content",
",",
"normalize",
"=",
"True",
")",
":",
"normalized_title",
"=",
"_normalize_text",
"(",
"wiki_title",
")",
"title_tokens",
"=",
"_tokens_to_score",
"(",
"set",
"(",
"tokenizer",
".",
"encode",
"(",
"text_encoder",
".",
"native_to_unicode",
"(",
"normalized_title",
")",
")",
")",
")",
"ref_paragraph_info",
"=",
"[",
"]",
"doc_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"ref",
"in",
"references_content",
":",
"for",
"paragraph",
"in",
"ref",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"normalized_paragraph",
"=",
"_normalize_text",
"(",
"paragraph",
")",
"if",
"cc_utils",
".",
"filter_paragraph",
"(",
"normalized_paragraph",
")",
":",
"# Skip paragraph",
"continue",
"counts",
"=",
"_token_counts",
"(",
"normalized_paragraph",
",",
"title_tokens",
")",
"for",
"token",
"in",
"title_tokens",
":",
"if",
"counts",
"[",
"token",
"]",
":",
"doc_counts",
"[",
"token",
"]",
"+=",
"1",
"content",
"=",
"normalized_paragraph",
"if",
"normalize",
"else",
"paragraph",
"info",
"=",
"{",
"\"content\"",
":",
"content",
",",
"\"counts\"",
":",
"counts",
"}",
"ref_paragraph_info",
".",
"append",
"(",
"info",
")",
"for",
"info",
"in",
"ref_paragraph_info",
":",
"score",
"=",
"0.",
"for",
"token",
"in",
"title_tokens",
":",
"term_frequency",
"=",
"info",
"[",
"\"counts\"",
"]",
"[",
"token",
"]",
"inv_doc_frequency",
"=",
"(",
"float",
"(",
"len",
"(",
"ref_paragraph_info",
")",
")",
"/",
"max",
"(",
"doc_counts",
"[",
"token",
"]",
",",
"1",
")",
")",
"score",
"+=",
"term_frequency",
"*",
"math",
".",
"log",
"(",
"inv_doc_frequency",
")",
"info",
"[",
"\"score\"",
"]",
"=",
"score",
"ref_paragraph_info",
".",
"sort",
"(",
"key",
"=",
"lambda",
"el",
":",
"el",
"[",
"\"score\"",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"[",
"info",
"[",
"\"content\"",
"]",
"for",
"info",
"in",
"ref_paragraph_info",
"]"
] |
Rank and return reference paragraphs by tf-idf score on title tokens.
|
[
"Rank",
"and",
"return",
"reference",
"paragraphs",
"by",
"tf",
"-",
"idf",
"score",
"on",
"title",
"tokens",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/wikisum.py#L348-L379
|
21,781
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/wikisum.py
|
_encode_wiki_sections
|
def _encode_wiki_sections(sections, vocab):
"""Encodes sections with vocab. Returns ids and section boundaries."""
ids = []
section_boundaries = []
for i, section in enumerate(sections):
if i > 0:
# Skip including article title
ids.extend(vocab.encode(_format_title(_normalize_text(section.title))))
ids.extend(vocab.encode(_normalize_text(section.text)))
section_boundaries.append(len(ids))
return ids, section_boundaries
|
python
|
def _encode_wiki_sections(sections, vocab):
"""Encodes sections with vocab. Returns ids and section boundaries."""
ids = []
section_boundaries = []
for i, section in enumerate(sections):
if i > 0:
# Skip including article title
ids.extend(vocab.encode(_format_title(_normalize_text(section.title))))
ids.extend(vocab.encode(_normalize_text(section.text)))
section_boundaries.append(len(ids))
return ids, section_boundaries
|
[
"def",
"_encode_wiki_sections",
"(",
"sections",
",",
"vocab",
")",
":",
"ids",
"=",
"[",
"]",
"section_boundaries",
"=",
"[",
"]",
"for",
"i",
",",
"section",
"in",
"enumerate",
"(",
"sections",
")",
":",
"if",
"i",
">",
"0",
":",
"# Skip including article title",
"ids",
".",
"extend",
"(",
"vocab",
".",
"encode",
"(",
"_format_title",
"(",
"_normalize_text",
"(",
"section",
".",
"title",
")",
")",
")",
")",
"ids",
".",
"extend",
"(",
"vocab",
".",
"encode",
"(",
"_normalize_text",
"(",
"section",
".",
"text",
")",
")",
")",
"section_boundaries",
".",
"append",
"(",
"len",
"(",
"ids",
")",
")",
"return",
"ids",
",",
"section_boundaries"
] |
Encodes sections with vocab. Returns ids and section boundaries.
|
[
"Encodes",
"sections",
"with",
"vocab",
".",
"Returns",
"ids",
"and",
"section",
"boundaries",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/wikisum.py#L488-L499
|
21,782
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/wikisum.py
|
extract_references_from_wets
|
def extract_references_from_wets(wet_files, metadata_dir, out_dir,
tmp_dir=None):
"""Extract references from WET files into sharded output files."""
# Setup output files
shard_files = make_ref_shard_files(out_dir)
num_refs = 0
for i, wet_file in enumerate(wet_files):
num_refs_in_wet = 0
tf.logging.info("Processing file %d", i)
# Read metadata file
metadata_fname = os.path.join(
metadata_dir, os.path.basename(wet_file)) + cc_utils.METADTA_SUFFIX
with tf.gfile.Open(cc_utils.readahead(metadata_fname)) as f:
wet_metadata = json.loads(f.read())
if not wet_metadata:
# No references in this WET file
continue
if wet_file.startswith("http"):
# download
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
record_gen = cc_utils.wet_records_from_url(wet_file, tmp_dir)
else:
# local
record_gen = cc_utils.wet_records_from_file_obj(
cc_utils.gzip_memfile(wet_file), take_ownership=True)
for wet_record in record_gen:
shard_ids = wet_metadata.get(wet_record.url)
if not shard_ids:
# URL not in dataset
continue
# Serialize and write out
ex = _make_example_from_record(wet_record)
ex_str = ex.SerializeToString()
for shard_id in shard_ids:
shard_files[shard_id].write(ex_str)
num_refs += 1
num_refs_in_wet += 1
tf.logging.info("Wrote out %d references for this WET", num_refs_in_wet)
tf.logging.info("Wrote out %d references total", num_refs)
# Cleanup
for shard_file in shard_files:
shard_file.close()
|
python
|
def extract_references_from_wets(wet_files, metadata_dir, out_dir,
tmp_dir=None):
"""Extract references from WET files into sharded output files."""
# Setup output files
shard_files = make_ref_shard_files(out_dir)
num_refs = 0
for i, wet_file in enumerate(wet_files):
num_refs_in_wet = 0
tf.logging.info("Processing file %d", i)
# Read metadata file
metadata_fname = os.path.join(
metadata_dir, os.path.basename(wet_file)) + cc_utils.METADTA_SUFFIX
with tf.gfile.Open(cc_utils.readahead(metadata_fname)) as f:
wet_metadata = json.loads(f.read())
if not wet_metadata:
# No references in this WET file
continue
if wet_file.startswith("http"):
# download
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
record_gen = cc_utils.wet_records_from_url(wet_file, tmp_dir)
else:
# local
record_gen = cc_utils.wet_records_from_file_obj(
cc_utils.gzip_memfile(wet_file), take_ownership=True)
for wet_record in record_gen:
shard_ids = wet_metadata.get(wet_record.url)
if not shard_ids:
# URL not in dataset
continue
# Serialize and write out
ex = _make_example_from_record(wet_record)
ex_str = ex.SerializeToString()
for shard_id in shard_ids:
shard_files[shard_id].write(ex_str)
num_refs += 1
num_refs_in_wet += 1
tf.logging.info("Wrote out %d references for this WET", num_refs_in_wet)
tf.logging.info("Wrote out %d references total", num_refs)
# Cleanup
for shard_file in shard_files:
shard_file.close()
|
[
"def",
"extract_references_from_wets",
"(",
"wet_files",
",",
"metadata_dir",
",",
"out_dir",
",",
"tmp_dir",
"=",
"None",
")",
":",
"# Setup output files",
"shard_files",
"=",
"make_ref_shard_files",
"(",
"out_dir",
")",
"num_refs",
"=",
"0",
"for",
"i",
",",
"wet_file",
"in",
"enumerate",
"(",
"wet_files",
")",
":",
"num_refs_in_wet",
"=",
"0",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Processing file %d\"",
",",
"i",
")",
"# Read metadata file",
"metadata_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"metadata_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"wet_file",
")",
")",
"+",
"cc_utils",
".",
"METADTA_SUFFIX",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"cc_utils",
".",
"readahead",
"(",
"metadata_fname",
")",
")",
"as",
"f",
":",
"wet_metadata",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"not",
"wet_metadata",
":",
"# No references in this WET file",
"continue",
"if",
"wet_file",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"# download",
"if",
"not",
"tmp_dir",
":",
"tmp_dir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"record_gen",
"=",
"cc_utils",
".",
"wet_records_from_url",
"(",
"wet_file",
",",
"tmp_dir",
")",
"else",
":",
"# local",
"record_gen",
"=",
"cc_utils",
".",
"wet_records_from_file_obj",
"(",
"cc_utils",
".",
"gzip_memfile",
"(",
"wet_file",
")",
",",
"take_ownership",
"=",
"True",
")",
"for",
"wet_record",
"in",
"record_gen",
":",
"shard_ids",
"=",
"wet_metadata",
".",
"get",
"(",
"wet_record",
".",
"url",
")",
"if",
"not",
"shard_ids",
":",
"# URL not in dataset",
"continue",
"# Serialize and write out",
"ex",
"=",
"_make_example_from_record",
"(",
"wet_record",
")",
"ex_str",
"=",
"ex",
".",
"SerializeToString",
"(",
")",
"for",
"shard_id",
"in",
"shard_ids",
":",
"shard_files",
"[",
"shard_id",
"]",
".",
"write",
"(",
"ex_str",
")",
"num_refs",
"+=",
"1",
"num_refs_in_wet",
"+=",
"1",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Wrote out %d references for this WET\"",
",",
"num_refs_in_wet",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Wrote out %d references total\"",
",",
"num_refs",
")",
"# Cleanup",
"for",
"shard_file",
"in",
"shard_files",
":",
"shard_file",
".",
"close",
"(",
")"
] |
Extract references from WET files into sharded output files.
|
[
"Extract",
"references",
"from",
"WET",
"files",
"into",
"sharded",
"output",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/wikisum.py#L506-L557
|
21,783
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wiki.py
|
_dump_to_pages
|
def _dump_to_pages(dump):
"""Extract pages from an xml dump.
Args:
dump: a unicode string
Returns:
a list of unicode strings
"""
pos = 0
ret = []
start_tag = u"<page>\n"
end_tag = u"</page>\n"
while True:
start_pos = dump.find(start_tag, pos)
if start_pos == -1:
break
start_pos += len(start_tag)
end_pos = dump.find(end_tag, start_pos)
if end_pos == -1:
break
ret.append(dump[start_pos:end_pos])
pos = end_pos + len(end_tag)
return ret
|
python
|
def _dump_to_pages(dump):
"""Extract pages from an xml dump.
Args:
dump: a unicode string
Returns:
a list of unicode strings
"""
pos = 0
ret = []
start_tag = u"<page>\n"
end_tag = u"</page>\n"
while True:
start_pos = dump.find(start_tag, pos)
if start_pos == -1:
break
start_pos += len(start_tag)
end_pos = dump.find(end_tag, start_pos)
if end_pos == -1:
break
ret.append(dump[start_pos:end_pos])
pos = end_pos + len(end_tag)
return ret
|
[
"def",
"_dump_to_pages",
"(",
"dump",
")",
":",
"pos",
"=",
"0",
"ret",
"=",
"[",
"]",
"start_tag",
"=",
"u\"<page>\\n\"",
"end_tag",
"=",
"u\"</page>\\n\"",
"while",
"True",
":",
"start_pos",
"=",
"dump",
".",
"find",
"(",
"start_tag",
",",
"pos",
")",
"if",
"start_pos",
"==",
"-",
"1",
":",
"break",
"start_pos",
"+=",
"len",
"(",
"start_tag",
")",
"end_pos",
"=",
"dump",
".",
"find",
"(",
"end_tag",
",",
"start_pos",
")",
"if",
"end_pos",
"==",
"-",
"1",
":",
"break",
"ret",
".",
"append",
"(",
"dump",
"[",
"start_pos",
":",
"end_pos",
"]",
")",
"pos",
"=",
"end_pos",
"+",
"len",
"(",
"end_tag",
")",
"return",
"ret"
] |
Extract pages from an xml dump.
Args:
dump: a unicode string
Returns:
a list of unicode strings
|
[
"Extract",
"pages",
"from",
"an",
"xml",
"dump",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki.py#L245-L267
|
21,784
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wiki.py
|
_page_to_text
|
def _page_to_text(page):
"""Extract the text from a page.
Args:
page: a unicode string
Returns:
a unicode string
"""
# text start tag looks like "<text ..otherstuff>"
start_pos = page.find(u"<text")
assert start_pos != -1
end_tag_pos = page.find(u">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(u">")
end_pos = page.find(u"</text>")
if end_pos == -1:
return u""
return page[end_tag_pos:end_pos]
|
python
|
def _page_to_text(page):
"""Extract the text from a page.
Args:
page: a unicode string
Returns:
a unicode string
"""
# text start tag looks like "<text ..otherstuff>"
start_pos = page.find(u"<text")
assert start_pos != -1
end_tag_pos = page.find(u">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(u">")
end_pos = page.find(u"</text>")
if end_pos == -1:
return u""
return page[end_tag_pos:end_pos]
|
[
"def",
"_page_to_text",
"(",
"page",
")",
":",
"# text start tag looks like \"<text ..otherstuff>\"",
"start_pos",
"=",
"page",
".",
"find",
"(",
"u\"<text\"",
")",
"assert",
"start_pos",
"!=",
"-",
"1",
"end_tag_pos",
"=",
"page",
".",
"find",
"(",
"u\">\"",
",",
"start_pos",
")",
"assert",
"end_tag_pos",
"!=",
"-",
"1",
"end_tag_pos",
"+=",
"len",
"(",
"u\">\"",
")",
"end_pos",
"=",
"page",
".",
"find",
"(",
"u\"</text>\"",
")",
"if",
"end_pos",
"==",
"-",
"1",
":",
"return",
"u\"\"",
"return",
"page",
"[",
"end_tag_pos",
":",
"end_pos",
"]"
] |
Extract the text from a page.
Args:
page: a unicode string
Returns:
a unicode string
|
[
"Extract",
"the",
"text",
"from",
"a",
"page",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki.py#L289-L306
|
21,785
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wiki.py
|
_find_and_replace
|
def _find_and_replace(text, start_string, end_string, replace_fn):
"""Remove everything found between instances of start_string and end_string.
Replace each such instance with replace_fn(removed_text)
e.g. _find_and_replace(u"the [[fat]] cat [[sat]]", u"[[", u"]]", lambda x: x)
= u"the fat cat sat"
Args:
text: a unicode string
start_string: a unicode string
end_string: a unicode string
replace_fn: a unary function from unicode string to unicode string
Returns:
a string
"""
ret = u""
current_pos = 0
while True:
start_pos = text.find(start_string, current_pos)
if start_pos == -1:
ret += text[current_pos:]
break
ret += text[current_pos:start_pos]
end_pos = text.find(end_string, start_pos + len(start_string))
if end_pos == -1:
break
ret += replace_fn(text[start_pos + len(start_string):end_pos])
current_pos = end_pos + len(end_string)
return ret
|
python
|
def _find_and_replace(text, start_string, end_string, replace_fn):
"""Remove everything found between instances of start_string and end_string.
Replace each such instance with replace_fn(removed_text)
e.g. _find_and_replace(u"the [[fat]] cat [[sat]]", u"[[", u"]]", lambda x: x)
= u"the fat cat sat"
Args:
text: a unicode string
start_string: a unicode string
end_string: a unicode string
replace_fn: a unary function from unicode string to unicode string
Returns:
a string
"""
ret = u""
current_pos = 0
while True:
start_pos = text.find(start_string, current_pos)
if start_pos == -1:
ret += text[current_pos:]
break
ret += text[current_pos:start_pos]
end_pos = text.find(end_string, start_pos + len(start_string))
if end_pos == -1:
break
ret += replace_fn(text[start_pos + len(start_string):end_pos])
current_pos = end_pos + len(end_string)
return ret
|
[
"def",
"_find_and_replace",
"(",
"text",
",",
"start_string",
",",
"end_string",
",",
"replace_fn",
")",
":",
"ret",
"=",
"u\"\"",
"current_pos",
"=",
"0",
"while",
"True",
":",
"start_pos",
"=",
"text",
".",
"find",
"(",
"start_string",
",",
"current_pos",
")",
"if",
"start_pos",
"==",
"-",
"1",
":",
"ret",
"+=",
"text",
"[",
"current_pos",
":",
"]",
"break",
"ret",
"+=",
"text",
"[",
"current_pos",
":",
"start_pos",
"]",
"end_pos",
"=",
"text",
".",
"find",
"(",
"end_string",
",",
"start_pos",
"+",
"len",
"(",
"start_string",
")",
")",
"if",
"end_pos",
"==",
"-",
"1",
":",
"break",
"ret",
"+=",
"replace_fn",
"(",
"text",
"[",
"start_pos",
"+",
"len",
"(",
"start_string",
")",
":",
"end_pos",
"]",
")",
"current_pos",
"=",
"end_pos",
"+",
"len",
"(",
"end_string",
")",
"return",
"ret"
] |
Remove everything found between instances of start_string and end_string.
Replace each such instance with replace_fn(removed_text)
e.g. _find_and_replace(u"the [[fat]] cat [[sat]]", u"[[", u"]]", lambda x: x)
= u"the fat cat sat"
Args:
text: a unicode string
start_string: a unicode string
end_string: a unicode string
replace_fn: a unary function from unicode string to unicode string
Returns:
a string
|
[
"Remove",
"everything",
"found",
"between",
"instances",
"of",
"start_string",
"and",
"end_string",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki.py#L309-L339
|
21,786
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/vqa_self_attention.py
|
prepare_question_encoder
|
def prepare_question_encoder(inputs, hparams):
"""Prepare question encoder.
Args:
inputs: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = inputs
# Usual case - not a packed dataset.
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
if hparams.pos == "timing":
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
None)
return (encoder_input, encoder_self_attention_bias)
|
python
|
def prepare_question_encoder(inputs, hparams):
"""Prepare question encoder.
Args:
inputs: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = inputs
# Usual case - not a packed dataset.
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
if hparams.pos == "timing":
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
None)
return (encoder_input, encoder_self_attention_bias)
|
[
"def",
"prepare_question_encoder",
"(",
"inputs",
",",
"hparams",
")",
":",
"encoder_input",
"=",
"inputs",
"# Usual case - not a packed dataset.",
"encoder_padding",
"=",
"common_attention",
".",
"embedding_to_padding",
"(",
"encoder_input",
")",
"ignore_padding",
"=",
"common_attention",
".",
"attention_bias_ignore_padding",
"(",
"encoder_padding",
")",
"encoder_self_attention_bias",
"=",
"ignore_padding",
"if",
"hparams",
".",
"pos",
"==",
"\"timing\"",
":",
"encoder_input",
"=",
"common_attention",
".",
"add_timing_signal_1d",
"(",
"encoder_input",
")",
"elif",
"hparams",
".",
"pos",
"==",
"\"emb\"",
":",
"encoder_input",
"=",
"common_attention",
".",
"add_positional_embedding",
"(",
"encoder_input",
",",
"hparams",
".",
"max_length",
",",
"\"inputs_positional_embedding\"",
",",
"None",
")",
"return",
"(",
"encoder_input",
",",
"encoder_self_attention_bias",
")"
] |
Prepare question encoder.
Args:
inputs: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
|
[
"Prepare",
"question",
"encoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_self_attention.py#L316-L339
|
21,787
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/vqa_self_attention.py
|
prepare_image_question_encoder
|
def prepare_image_question_encoder(image_feat, question, hparams):
"""Prepare encoder.
Args:
image_feat: a Tensor.
question: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = tf.concat([image_feat, question], axis=1)
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
# Usual case - not a packed dataset.
if hparams.pos == "timing":
question = common_attention.add_timing_signal_1d(question)
elif hparams.pos == "emb":
question = common_attention.add_positional_embedding(
question, hparams.max_length, "inputs_positional_embedding",
None)
encoder_input = tf.concat([image_feat, question], axis=1)
return (encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias)
|
python
|
def prepare_image_question_encoder(image_feat, question, hparams):
"""Prepare encoder.
Args:
image_feat: a Tensor.
question: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = tf.concat([image_feat, question], axis=1)
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
# Usual case - not a packed dataset.
if hparams.pos == "timing":
question = common_attention.add_timing_signal_1d(question)
elif hparams.pos == "emb":
question = common_attention.add_positional_embedding(
question, hparams.max_length, "inputs_positional_embedding",
None)
encoder_input = tf.concat([image_feat, question], axis=1)
return (encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias)
|
[
"def",
"prepare_image_question_encoder",
"(",
"image_feat",
",",
"question",
",",
"hparams",
")",
":",
"encoder_input",
"=",
"tf",
".",
"concat",
"(",
"[",
"image_feat",
",",
"question",
"]",
",",
"axis",
"=",
"1",
")",
"encoder_padding",
"=",
"common_attention",
".",
"embedding_to_padding",
"(",
"encoder_input",
")",
"ignore_padding",
"=",
"common_attention",
".",
"attention_bias_ignore_padding",
"(",
"encoder_padding",
")",
"encoder_self_attention_bias",
"=",
"ignore_padding",
"encoder_decoder_attention_bias",
"=",
"ignore_padding",
"# Usual case - not a packed dataset.",
"if",
"hparams",
".",
"pos",
"==",
"\"timing\"",
":",
"question",
"=",
"common_attention",
".",
"add_timing_signal_1d",
"(",
"question",
")",
"elif",
"hparams",
".",
"pos",
"==",
"\"emb\"",
":",
"question",
"=",
"common_attention",
".",
"add_positional_embedding",
"(",
"question",
",",
"hparams",
".",
"max_length",
",",
"\"inputs_positional_embedding\"",
",",
"None",
")",
"encoder_input",
"=",
"tf",
".",
"concat",
"(",
"[",
"image_feat",
",",
"question",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"(",
"encoder_input",
",",
"encoder_self_attention_bias",
",",
"encoder_decoder_attention_bias",
")"
] |
Prepare encoder.
Args:
image_feat: a Tensor.
question: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
|
[
"Prepare",
"encoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_self_attention.py#L448-L477
|
21,788
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/vqa_self_attention.py
|
iterative_encoder_decoder
|
def iterative_encoder_decoder(encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias,
query,
hparams):
"""Iterative encoder decoder."""
for _ in range(hparams.num_rec_steps):
with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
encoder_output = image_question_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
query)
decoder_output = decoder(
query,
encoder_output,
None,
encoder_decoder_attention_bias,
hparams)
encoder_input = encoder_output
query = decoder_output
return decoder_output
|
python
|
def iterative_encoder_decoder(encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias,
query,
hparams):
"""Iterative encoder decoder."""
for _ in range(hparams.num_rec_steps):
with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
encoder_output = image_question_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
query)
decoder_output = decoder(
query,
encoder_output,
None,
encoder_decoder_attention_bias,
hparams)
encoder_input = encoder_output
query = decoder_output
return decoder_output
|
[
"def",
"iterative_encoder_decoder",
"(",
"encoder_input",
",",
"encoder_self_attention_bias",
",",
"encoder_decoder_attention_bias",
",",
"query",
",",
"hparams",
")",
":",
"for",
"_",
"in",
"range",
"(",
"hparams",
".",
"num_rec_steps",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"step\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"encoder_output",
"=",
"image_question_encoder",
"(",
"encoder_input",
",",
"encoder_self_attention_bias",
",",
"hparams",
",",
"query",
")",
"decoder_output",
"=",
"decoder",
"(",
"query",
",",
"encoder_output",
",",
"None",
",",
"encoder_decoder_attention_bias",
",",
"hparams",
")",
"encoder_input",
"=",
"encoder_output",
"query",
"=",
"decoder_output",
"return",
"decoder_output"
] |
Iterative encoder decoder.
|
[
"Iterative",
"encoder",
"decoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_self_attention.py#L654-L678
|
21,789
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/vqa_self_attention.py
|
vqa_self_attention_feature_batch1024_big
|
def vqa_self_attention_feature_batch1024_big():
"""Big model."""
hparams = vqa_self_attention_feature_batch1024()
hparams.learning_rate_constant = 7e-4
hparams.batch_size = 256
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
|
python
|
def vqa_self_attention_feature_batch1024_big():
"""Big model."""
hparams = vqa_self_attention_feature_batch1024()
hparams.learning_rate_constant = 7e-4
hparams.batch_size = 256
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
|
[
"def",
"vqa_self_attention_feature_batch1024_big",
"(",
")",
":",
"hparams",
"=",
"vqa_self_attention_feature_batch1024",
"(",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"7e-4",
"hparams",
".",
"batch_size",
"=",
"256",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"filter_size",
"=",
"4096",
"hparams",
".",
"num_heads",
"=",
"16",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.3",
"hparams",
".",
"attention_dropout",
"=",
"0.3",
"hparams",
".",
"relu_dropout",
"=",
"0.3",
"return",
"hparams"
] |
Big model.
|
[
"Big",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_self_attention.py#L774-L785
|
21,790
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
_bucket_boundaries
|
def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):
"""A default set of length-bucket boundaries."""
assert length_bucket_step > 1.0
x = min_length
boundaries = []
while x < max_length:
boundaries.append(x)
x = max(x + 1, int(x * length_bucket_step))
return boundaries
|
python
|
def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):
"""A default set of length-bucket boundaries."""
assert length_bucket_step > 1.0
x = min_length
boundaries = []
while x < max_length:
boundaries.append(x)
x = max(x + 1, int(x * length_bucket_step))
return boundaries
|
[
"def",
"_bucket_boundaries",
"(",
"max_length",
",",
"min_length",
"=",
"8",
",",
"length_bucket_step",
"=",
"1.1",
")",
":",
"assert",
"length_bucket_step",
">",
"1.0",
"x",
"=",
"min_length",
"boundaries",
"=",
"[",
"]",
"while",
"x",
"<",
"max_length",
":",
"boundaries",
".",
"append",
"(",
"x",
")",
"x",
"=",
"max",
"(",
"x",
"+",
"1",
",",
"int",
"(",
"x",
"*",
"length_bucket_step",
")",
")",
"return",
"boundaries"
] |
A default set of length-bucket boundaries.
|
[
"A",
"default",
"set",
"of",
"length",
"-",
"bucket",
"boundaries",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L69-L77
|
21,791
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
batching_scheme
|
def batching_scheme(batch_size,
max_length,
min_length_bucket,
length_bucket_step,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1,
min_length=0):
"""A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length
"""
max_length = max_length or batch_size
if max_length < min_length:
raise ValueError("max_length must be greater or equal to min_length")
boundaries = _bucket_boundaries(max_length, min_length_bucket,
length_bucket_step)
boundaries = [boundary * length_multiplier for boundary in boundaries]
max_length *= length_multiplier
batch_sizes = [
max(1, batch_size // length) for length in boundaries + [max_length]
]
max_batch_size = max(batch_sizes)
# Since the Datasets API only allows a single constant for window_size,
# and it needs divide all bucket_batch_sizes, we pick a highly-composite
# window size and then round down all batch sizes to divisors of that window
# size, so that a window can always be divided evenly into batches.
# TODO(noam): remove this when Dataset API improves.
highly_composite_numbers = [
1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680,
2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440,
83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280,
720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480,
7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400,
36756720, 43243200, 61261200, 73513440, 110270160
]
window_size = max(
[i for i in highly_composite_numbers if i <= 3 * max_batch_size])
divisors = [i for i in range(1, window_size + 1) if window_size % i == 0]
batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes]
window_size *= shard_multiplier
batch_sizes = [bs * shard_multiplier for bs in batch_sizes]
# The Datasets API splits one window into multiple batches, which
# produces runs of many consecutive batches of the same size. This
# is bad for training. To solve this, we will shuffle the batches
# using a queue which must be several times as large as the maximum
# number of batches per window.
max_batches_per_window = window_size // min(batch_sizes)
shuffle_queue_size = max_batches_per_window * 3
ret = {
"boundaries": boundaries,
"batch_sizes": batch_sizes,
"min_length": min_length,
"max_length": (max_length if drop_long_sequences else 10**9),
"shuffle_queue_size": shuffle_queue_size,
}
return ret
|
python
|
def batching_scheme(batch_size,
max_length,
min_length_bucket,
length_bucket_step,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1,
min_length=0):
"""A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length
"""
max_length = max_length or batch_size
if max_length < min_length:
raise ValueError("max_length must be greater or equal to min_length")
boundaries = _bucket_boundaries(max_length, min_length_bucket,
length_bucket_step)
boundaries = [boundary * length_multiplier for boundary in boundaries]
max_length *= length_multiplier
batch_sizes = [
max(1, batch_size // length) for length in boundaries + [max_length]
]
max_batch_size = max(batch_sizes)
# Since the Datasets API only allows a single constant for window_size,
# and it needs divide all bucket_batch_sizes, we pick a highly-composite
# window size and then round down all batch sizes to divisors of that window
# size, so that a window can always be divided evenly into batches.
# TODO(noam): remove this when Dataset API improves.
highly_composite_numbers = [
1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680,
2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440,
83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280,
720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480,
7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400,
36756720, 43243200, 61261200, 73513440, 110270160
]
window_size = max(
[i for i in highly_composite_numbers if i <= 3 * max_batch_size])
divisors = [i for i in range(1, window_size + 1) if window_size % i == 0]
batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes]
window_size *= shard_multiplier
batch_sizes = [bs * shard_multiplier for bs in batch_sizes]
# The Datasets API splits one window into multiple batches, which
# produces runs of many consecutive batches of the same size. This
# is bad for training. To solve this, we will shuffle the batches
# using a queue which must be several times as large as the maximum
# number of batches per window.
max_batches_per_window = window_size // min(batch_sizes)
shuffle_queue_size = max_batches_per_window * 3
ret = {
"boundaries": boundaries,
"batch_sizes": batch_sizes,
"min_length": min_length,
"max_length": (max_length if drop_long_sequences else 10**9),
"shuffle_queue_size": shuffle_queue_size,
}
return ret
|
[
"def",
"batching_scheme",
"(",
"batch_size",
",",
"max_length",
",",
"min_length_bucket",
",",
"length_bucket_step",
",",
"drop_long_sequences",
"=",
"False",
",",
"shard_multiplier",
"=",
"1",
",",
"length_multiplier",
"=",
"1",
",",
"min_length",
"=",
"0",
")",
":",
"max_length",
"=",
"max_length",
"or",
"batch_size",
"if",
"max_length",
"<",
"min_length",
":",
"raise",
"ValueError",
"(",
"\"max_length must be greater or equal to min_length\"",
")",
"boundaries",
"=",
"_bucket_boundaries",
"(",
"max_length",
",",
"min_length_bucket",
",",
"length_bucket_step",
")",
"boundaries",
"=",
"[",
"boundary",
"*",
"length_multiplier",
"for",
"boundary",
"in",
"boundaries",
"]",
"max_length",
"*=",
"length_multiplier",
"batch_sizes",
"=",
"[",
"max",
"(",
"1",
",",
"batch_size",
"//",
"length",
")",
"for",
"length",
"in",
"boundaries",
"+",
"[",
"max_length",
"]",
"]",
"max_batch_size",
"=",
"max",
"(",
"batch_sizes",
")",
"# Since the Datasets API only allows a single constant for window_size,",
"# and it needs divide all bucket_batch_sizes, we pick a highly-composite",
"# window size and then round down all batch sizes to divisors of that window",
"# size, so that a window can always be divided evenly into batches.",
"# TODO(noam): remove this when Dataset API improves.",
"highly_composite_numbers",
"=",
"[",
"1",
",",
"2",
",",
"4",
",",
"6",
",",
"12",
",",
"24",
",",
"36",
",",
"48",
",",
"60",
",",
"120",
",",
"180",
",",
"240",
",",
"360",
",",
"720",
",",
"840",
",",
"1260",
",",
"1680",
",",
"2520",
",",
"5040",
",",
"7560",
",",
"10080",
",",
"15120",
",",
"20160",
",",
"25200",
",",
"27720",
",",
"45360",
",",
"50400",
",",
"55440",
",",
"83160",
",",
"110880",
",",
"166320",
",",
"221760",
",",
"277200",
",",
"332640",
",",
"498960",
",",
"554400",
",",
"665280",
",",
"720720",
",",
"1081080",
",",
"1441440",
",",
"2162160",
",",
"2882880",
",",
"3603600",
",",
"4324320",
",",
"6486480",
",",
"7207200",
",",
"8648640",
",",
"10810800",
",",
"14414400",
",",
"17297280",
",",
"21621600",
",",
"32432400",
",",
"36756720",
",",
"43243200",
",",
"61261200",
",",
"73513440",
",",
"110270160",
"]",
"window_size",
"=",
"max",
"(",
"[",
"i",
"for",
"i",
"in",
"highly_composite_numbers",
"if",
"i",
"<=",
"3",
"*",
"max_batch_size",
"]",
")",
"divisors",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"window_size",
"+",
"1",
")",
"if",
"window_size",
"%",
"i",
"==",
"0",
"]",
"batch_sizes",
"=",
"[",
"max",
"(",
"[",
"d",
"for",
"d",
"in",
"divisors",
"if",
"d",
"<=",
"bs",
"]",
")",
"for",
"bs",
"in",
"batch_sizes",
"]",
"window_size",
"*=",
"shard_multiplier",
"batch_sizes",
"=",
"[",
"bs",
"*",
"shard_multiplier",
"for",
"bs",
"in",
"batch_sizes",
"]",
"# The Datasets API splits one window into multiple batches, which",
"# produces runs of many consecutive batches of the same size. This",
"# is bad for training. To solve this, we will shuffle the batches",
"# using a queue which must be several times as large as the maximum",
"# number of batches per window.",
"max_batches_per_window",
"=",
"window_size",
"//",
"min",
"(",
"batch_sizes",
")",
"shuffle_queue_size",
"=",
"max_batches_per_window",
"*",
"3",
"ret",
"=",
"{",
"\"boundaries\"",
":",
"boundaries",
",",
"\"batch_sizes\"",
":",
"batch_sizes",
",",
"\"min_length\"",
":",
"min_length",
",",
"\"max_length\"",
":",
"(",
"max_length",
"if",
"drop_long_sequences",
"else",
"10",
"**",
"9",
")",
",",
"\"shuffle_queue_size\"",
":",
"shuffle_queue_size",
",",
"}",
"return",
"ret"
] |
A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length
|
[
"A",
"batching",
"scheme",
"based",
"on",
"model",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L80-L164
|
21,792
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
hparams_to_batching_scheme
|
def hparams_to_batching_scheme(hparams,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1):
"""Wrapper around _batching_scheme with hparams."""
return batching_scheme(
batch_size=hparams.batch_size,
min_length=hparams.min_length,
max_length=hparams.max_length,
min_length_bucket=hparams.min_length_bucket,
length_bucket_step=hparams.length_bucket_step,
drop_long_sequences=drop_long_sequences,
shard_multiplier=shard_multiplier,
length_multiplier=length_multiplier)
|
python
|
def hparams_to_batching_scheme(hparams,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1):
"""Wrapper around _batching_scheme with hparams."""
return batching_scheme(
batch_size=hparams.batch_size,
min_length=hparams.min_length,
max_length=hparams.max_length,
min_length_bucket=hparams.min_length_bucket,
length_bucket_step=hparams.length_bucket_step,
drop_long_sequences=drop_long_sequences,
shard_multiplier=shard_multiplier,
length_multiplier=length_multiplier)
|
[
"def",
"hparams_to_batching_scheme",
"(",
"hparams",
",",
"drop_long_sequences",
"=",
"False",
",",
"shard_multiplier",
"=",
"1",
",",
"length_multiplier",
"=",
"1",
")",
":",
"return",
"batching_scheme",
"(",
"batch_size",
"=",
"hparams",
".",
"batch_size",
",",
"min_length",
"=",
"hparams",
".",
"min_length",
",",
"max_length",
"=",
"hparams",
".",
"max_length",
",",
"min_length_bucket",
"=",
"hparams",
".",
"min_length_bucket",
",",
"length_bucket_step",
"=",
"hparams",
".",
"length_bucket_step",
",",
"drop_long_sequences",
"=",
"drop_long_sequences",
",",
"shard_multiplier",
"=",
"shard_multiplier",
",",
"length_multiplier",
"=",
"length_multiplier",
")"
] |
Wrapper around _batching_scheme with hparams.
|
[
"Wrapper",
"around",
"_batching_scheme",
"with",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L167-L180
|
21,793
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
pad_for_tpu
|
def pad_for_tpu(shapes_dict, hparams, max_length):
"""Pads unknown features' dimensions for TPU."""
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_seq_length)
targets_none_filler = get_filler(hparams.max_target_seq_length)
def pad_one_shape(shape, none_filler):
return [
(dim if dim is not None else none_filler) for dim in shape.as_list()
]
for key, shape in six.iteritems(shapes_dict):
if key == "inputs":
padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)
elif key == "targets":
padded_shapes[key] = pad_one_shape(shape, targets_none_filler)
else:
padded_shapes[key] = pad_one_shape(shape, max_length)
return padded_shapes
|
python
|
def pad_for_tpu(shapes_dict, hparams, max_length):
"""Pads unknown features' dimensions for TPU."""
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_seq_length)
targets_none_filler = get_filler(hparams.max_target_seq_length)
def pad_one_shape(shape, none_filler):
return [
(dim if dim is not None else none_filler) for dim in shape.as_list()
]
for key, shape in six.iteritems(shapes_dict):
if key == "inputs":
padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)
elif key == "targets":
padded_shapes[key] = pad_one_shape(shape, targets_none_filler)
else:
padded_shapes[key] = pad_one_shape(shape, max_length)
return padded_shapes
|
[
"def",
"pad_for_tpu",
"(",
"shapes_dict",
",",
"hparams",
",",
"max_length",
")",
":",
"padded_shapes",
"=",
"{",
"}",
"def",
"get_filler",
"(",
"specified_max_length",
")",
":",
"if",
"not",
"specified_max_length",
":",
"return",
"max_length",
"return",
"min",
"(",
"specified_max_length",
",",
"max_length",
")",
"inputs_none_filler",
"=",
"get_filler",
"(",
"hparams",
".",
"max_input_seq_length",
")",
"targets_none_filler",
"=",
"get_filler",
"(",
"hparams",
".",
"max_target_seq_length",
")",
"def",
"pad_one_shape",
"(",
"shape",
",",
"none_filler",
")",
":",
"return",
"[",
"(",
"dim",
"if",
"dim",
"is",
"not",
"None",
"else",
"none_filler",
")",
"for",
"dim",
"in",
"shape",
".",
"as_list",
"(",
")",
"]",
"for",
"key",
",",
"shape",
"in",
"six",
".",
"iteritems",
"(",
"shapes_dict",
")",
":",
"if",
"key",
"==",
"\"inputs\"",
":",
"padded_shapes",
"[",
"key",
"]",
"=",
"pad_one_shape",
"(",
"shape",
",",
"inputs_none_filler",
")",
"elif",
"key",
"==",
"\"targets\"",
":",
"padded_shapes",
"[",
"key",
"]",
"=",
"pad_one_shape",
"(",
"shape",
",",
"targets_none_filler",
")",
"else",
":",
"padded_shapes",
"[",
"key",
"]",
"=",
"pad_one_shape",
"(",
"shape",
",",
"max_length",
")",
"return",
"padded_shapes"
] |
Pads unknown features' dimensions for TPU.
|
[
"Pads",
"unknown",
"features",
"dimensions",
"for",
"TPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L194-L218
|
21,794
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
standardize_shapes
|
def standardize_shapes(features, batch_size=None):
"""Set the right shapes for the features."""
for fname in ["inputs", "targets"]:
if fname not in features:
continue
f = features[fname]
while len(f.get_shape()) < 4:
f = tf.expand_dims(f, axis=-1)
features[fname] = f
if batch_size:
# Ensure batch size is set on all features
for _, t in six.iteritems(features):
shape = t.get_shape().as_list()
shape[0] = batch_size
t.set_shape(t.get_shape().merge_with(shape))
# Assert shapes are fully known
t.get_shape().assert_is_fully_defined()
return features
|
python
|
def standardize_shapes(features, batch_size=None):
"""Set the right shapes for the features."""
for fname in ["inputs", "targets"]:
if fname not in features:
continue
f = features[fname]
while len(f.get_shape()) < 4:
f = tf.expand_dims(f, axis=-1)
features[fname] = f
if batch_size:
# Ensure batch size is set on all features
for _, t in six.iteritems(features):
shape = t.get_shape().as_list()
shape[0] = batch_size
t.set_shape(t.get_shape().merge_with(shape))
# Assert shapes are fully known
t.get_shape().assert_is_fully_defined()
return features
|
[
"def",
"standardize_shapes",
"(",
"features",
",",
"batch_size",
"=",
"None",
")",
":",
"for",
"fname",
"in",
"[",
"\"inputs\"",
",",
"\"targets\"",
"]",
":",
"if",
"fname",
"not",
"in",
"features",
":",
"continue",
"f",
"=",
"features",
"[",
"fname",
"]",
"while",
"len",
"(",
"f",
".",
"get_shape",
"(",
")",
")",
"<",
"4",
":",
"f",
"=",
"tf",
".",
"expand_dims",
"(",
"f",
",",
"axis",
"=",
"-",
"1",
")",
"features",
"[",
"fname",
"]",
"=",
"f",
"if",
"batch_size",
":",
"# Ensure batch size is set on all features",
"for",
"_",
",",
"t",
"in",
"six",
".",
"iteritems",
"(",
"features",
")",
":",
"shape",
"=",
"t",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"shape",
"[",
"0",
"]",
"=",
"batch_size",
"t",
".",
"set_shape",
"(",
"t",
".",
"get_shape",
"(",
")",
".",
"merge_with",
"(",
"shape",
")",
")",
"# Assert shapes are fully known",
"t",
".",
"get_shape",
"(",
")",
".",
"assert_is_fully_defined",
"(",
")",
"return",
"features"
] |
Set the right shapes for the features.
|
[
"Set",
"the",
"right",
"shapes",
"for",
"the",
"features",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L240-L259
|
21,795
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
_file_num_records_cached
|
def _file_num_records_cached(filename):
"""Return the number of TFRecords in a file."""
# Cache the result, as this is expensive to compute
if filename in _file_num_records_cache:
return _file_num_records_cache[filename]
ret = 0
for _ in tf.python_io.tf_record_iterator(filename):
ret += 1
_file_num_records_cache[filename] = ret
return ret
|
python
|
def _file_num_records_cached(filename):
"""Return the number of TFRecords in a file."""
# Cache the result, as this is expensive to compute
if filename in _file_num_records_cache:
return _file_num_records_cache[filename]
ret = 0
for _ in tf.python_io.tf_record_iterator(filename):
ret += 1
_file_num_records_cache[filename] = ret
return ret
|
[
"def",
"_file_num_records_cached",
"(",
"filename",
")",
":",
"# Cache the result, as this is expensive to compute",
"if",
"filename",
"in",
"_file_num_records_cache",
":",
"return",
"_file_num_records_cache",
"[",
"filename",
"]",
"ret",
"=",
"0",
"for",
"_",
"in",
"tf",
".",
"python_io",
".",
"tf_record_iterator",
"(",
"filename",
")",
":",
"ret",
"+=",
"1",
"_file_num_records_cache",
"[",
"filename",
"]",
"=",
"ret",
"return",
"ret"
] |
Return the number of TFRecords in a file.
|
[
"Return",
"the",
"number",
"of",
"TFRecords",
"in",
"a",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L269-L278
|
21,796
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/data_reader.py
|
pad_batch
|
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features
|
python
|
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features
|
[
"def",
"pad_batch",
"(",
"features",
",",
"batch_multiple",
")",
":",
"feature",
"=",
"list",
"(",
"features",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"feature",
")",
"[",
"0",
"]",
"mod",
"=",
"batch_size",
"%",
"batch_multiple",
"has_mod",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"cast",
"(",
"mod",
",",
"tf",
".",
"bool",
")",
",",
"tf",
".",
"int32",
")",
"batch_padding",
"=",
"batch_multiple",
"*",
"has_mod",
"-",
"mod",
"padded_features",
"=",
"{",
"}",
"for",
"k",
",",
"feature",
"in",
"features",
".",
"items",
"(",
")",
":",
"rank",
"=",
"len",
"(",
"feature",
".",
"shape",
")",
"paddings",
"=",
"[",
"[",
"0",
",",
"0",
"]",
"for",
"_",
"in",
"range",
"(",
"rank",
")",
"]",
"paddings",
"[",
"0",
"]",
"[",
"1",
"]",
"=",
"batch_padding",
"padded_feature",
"=",
"tf",
".",
"pad",
"(",
"feature",
",",
"paddings",
")",
"padded_features",
"[",
"k",
"]",
"=",
"padded_feature",
"return",
"padded_features"
] |
Pad batch dim of features to nearest multiple of batch_multiple.
|
[
"Pad",
"batch",
"dim",
"of",
"features",
"to",
"nearest",
"multiple",
"of",
"batch_multiple",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L292-L307
|
21,797
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/gene_expression.py
|
generate_shard_args
|
def generate_shard_args(outfiles, num_examples):
"""Generate start and end indices per outfile."""
num_shards = len(outfiles)
num_examples_per_shard = num_examples // num_shards
start_idxs = [i * num_examples_per_shard for i in range(num_shards)]
end_idxs = list(start_idxs)
end_idxs.pop(0)
end_idxs.append(num_examples)
return zip(start_idxs, end_idxs, outfiles)
|
python
|
def generate_shard_args(outfiles, num_examples):
"""Generate start and end indices per outfile."""
num_shards = len(outfiles)
num_examples_per_shard = num_examples // num_shards
start_idxs = [i * num_examples_per_shard for i in range(num_shards)]
end_idxs = list(start_idxs)
end_idxs.pop(0)
end_idxs.append(num_examples)
return zip(start_idxs, end_idxs, outfiles)
|
[
"def",
"generate_shard_args",
"(",
"outfiles",
",",
"num_examples",
")",
":",
"num_shards",
"=",
"len",
"(",
"outfiles",
")",
"num_examples_per_shard",
"=",
"num_examples",
"//",
"num_shards",
"start_idxs",
"=",
"[",
"i",
"*",
"num_examples_per_shard",
"for",
"i",
"in",
"range",
"(",
"num_shards",
")",
"]",
"end_idxs",
"=",
"list",
"(",
"start_idxs",
")",
"end_idxs",
".",
"pop",
"(",
"0",
")",
"end_idxs",
".",
"append",
"(",
"num_examples",
")",
"return",
"zip",
"(",
"start_idxs",
",",
"end_idxs",
",",
"outfiles",
")"
] |
Generate start and end indices per outfile.
|
[
"Generate",
"start",
"and",
"end",
"indices",
"per",
"outfile",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gene_expression.py#L208-L216
|
21,798
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/gene_expression.py
|
to_example_dict
|
def to_example_dict(encoder, inputs, mask, outputs):
"""Convert single h5 record to an example dict."""
# Inputs
bases = []
input_ids = []
last_idx = -1
for row in np.argwhere(inputs):
idx, base_id = row
idx, base_id = int(idx), int(base_id)
assert idx > last_idx # if not, means 2 True values in 1 row
# Some rows are all False. Those rows are mapped to UNK_ID.
while idx != last_idx + 1:
bases.append(encoder.UNK)
last_idx += 1
bases.append(encoder.BASES[base_id])
last_idx = idx
assert len(inputs) == len(bases)
input_ids = encoder.encode(bases)
input_ids.append(text_encoder.EOS_ID)
# Targets: mask and output
targets_mask = [float(v) for v in mask]
# The output is (n, m); store targets_shape so that it can be reshaped
# properly on the other end.
targets = [float(v) for v in outputs.flatten()]
targets_shape = [int(dim) for dim in outputs.shape]
assert mask.shape[0] == outputs.shape[0]
example_keys = ["inputs", "targets_mask", "targets", "targets_shape"]
ex_dict = dict(
zip(example_keys, [input_ids, targets_mask, targets, targets_shape]))
return ex_dict
|
python
|
def to_example_dict(encoder, inputs, mask, outputs):
"""Convert single h5 record to an example dict."""
# Inputs
bases = []
input_ids = []
last_idx = -1
for row in np.argwhere(inputs):
idx, base_id = row
idx, base_id = int(idx), int(base_id)
assert idx > last_idx # if not, means 2 True values in 1 row
# Some rows are all False. Those rows are mapped to UNK_ID.
while idx != last_idx + 1:
bases.append(encoder.UNK)
last_idx += 1
bases.append(encoder.BASES[base_id])
last_idx = idx
assert len(inputs) == len(bases)
input_ids = encoder.encode(bases)
input_ids.append(text_encoder.EOS_ID)
# Targets: mask and output
targets_mask = [float(v) for v in mask]
# The output is (n, m); store targets_shape so that it can be reshaped
# properly on the other end.
targets = [float(v) for v in outputs.flatten()]
targets_shape = [int(dim) for dim in outputs.shape]
assert mask.shape[0] == outputs.shape[0]
example_keys = ["inputs", "targets_mask", "targets", "targets_shape"]
ex_dict = dict(
zip(example_keys, [input_ids, targets_mask, targets, targets_shape]))
return ex_dict
|
[
"def",
"to_example_dict",
"(",
"encoder",
",",
"inputs",
",",
"mask",
",",
"outputs",
")",
":",
"# Inputs",
"bases",
"=",
"[",
"]",
"input_ids",
"=",
"[",
"]",
"last_idx",
"=",
"-",
"1",
"for",
"row",
"in",
"np",
".",
"argwhere",
"(",
"inputs",
")",
":",
"idx",
",",
"base_id",
"=",
"row",
"idx",
",",
"base_id",
"=",
"int",
"(",
"idx",
")",
",",
"int",
"(",
"base_id",
")",
"assert",
"idx",
">",
"last_idx",
"# if not, means 2 True values in 1 row",
"# Some rows are all False. Those rows are mapped to UNK_ID.",
"while",
"idx",
"!=",
"last_idx",
"+",
"1",
":",
"bases",
".",
"append",
"(",
"encoder",
".",
"UNK",
")",
"last_idx",
"+=",
"1",
"bases",
".",
"append",
"(",
"encoder",
".",
"BASES",
"[",
"base_id",
"]",
")",
"last_idx",
"=",
"idx",
"assert",
"len",
"(",
"inputs",
")",
"==",
"len",
"(",
"bases",
")",
"input_ids",
"=",
"encoder",
".",
"encode",
"(",
"bases",
")",
"input_ids",
".",
"append",
"(",
"text_encoder",
".",
"EOS_ID",
")",
"# Targets: mask and output",
"targets_mask",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"mask",
"]",
"# The output is (n, m); store targets_shape so that it can be reshaped",
"# properly on the other end.",
"targets",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"outputs",
".",
"flatten",
"(",
")",
"]",
"targets_shape",
"=",
"[",
"int",
"(",
"dim",
")",
"for",
"dim",
"in",
"outputs",
".",
"shape",
"]",
"assert",
"mask",
".",
"shape",
"[",
"0",
"]",
"==",
"outputs",
".",
"shape",
"[",
"0",
"]",
"example_keys",
"=",
"[",
"\"inputs\"",
",",
"\"targets_mask\"",
",",
"\"targets\"",
",",
"\"targets_shape\"",
"]",
"ex_dict",
"=",
"dict",
"(",
"zip",
"(",
"example_keys",
",",
"[",
"input_ids",
",",
"targets_mask",
",",
"targets",
",",
"targets_shape",
"]",
")",
")",
"return",
"ex_dict"
] |
Convert single h5 record to an example dict.
|
[
"Convert",
"single",
"h5",
"record",
"to",
"an",
"example",
"dict",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gene_expression.py#L263-L295
|
21,799
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
linear_interpolate
|
def linear_interpolate(tensor1, tensor2, coeffs):
"""Linearly interpolate between two tensors at coeff.
Args:
tensor1: 4-D Tensor, shape=(NHWC)
tensor2: 4-D Tensor, shape=(NHWC)
coeffs: list of floats.
Returns:
interp_latents: 5-D Tensor, with interp_latents[i] representing
interpolations at coeffs[i].
shape=(len(coeffs), NHWC)
"""
interp_tensors = []
for coeff in coeffs:
interp_tensor = tensor1 + coeff * (tensor2 - tensor1)
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0)
|
python
|
def linear_interpolate(tensor1, tensor2, coeffs):
"""Linearly interpolate between two tensors at coeff.
Args:
tensor1: 4-D Tensor, shape=(NHWC)
tensor2: 4-D Tensor, shape=(NHWC)
coeffs: list of floats.
Returns:
interp_latents: 5-D Tensor, with interp_latents[i] representing
interpolations at coeffs[i].
shape=(len(coeffs), NHWC)
"""
interp_tensors = []
for coeff in coeffs:
interp_tensor = tensor1 + coeff * (tensor2 - tensor1)
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0)
|
[
"def",
"linear_interpolate",
"(",
"tensor1",
",",
"tensor2",
",",
"coeffs",
")",
":",
"interp_tensors",
"=",
"[",
"]",
"for",
"coeff",
"in",
"coeffs",
":",
"interp_tensor",
"=",
"tensor1",
"+",
"coeff",
"*",
"(",
"tensor2",
"-",
"tensor1",
")",
"interp_tensors",
".",
"append",
"(",
"interp_tensor",
")",
"return",
"tf",
".",
"concat",
"(",
"interp_tensors",
",",
"axis",
"=",
"0",
")"
] |
Linearly interpolate between two tensors at coeff.
Args:
tensor1: 4-D Tensor, shape=(NHWC)
tensor2: 4-D Tensor, shape=(NHWC)
coeffs: list of floats.
Returns:
interp_latents: 5-D Tensor, with interp_latents[i] representing
interpolations at coeffs[i].
shape=(len(coeffs), NHWC)
|
[
"Linearly",
"interpolate",
"between",
"two",
"tensors",
"at",
"coeff",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L34-L50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.